repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
boweniac/autogan | autogan/agents/universal_agent.py | [
{
"identifier": "AgentSwitch",
"path": "autogan/agents/agent_switch.py",
"snippet": "class AgentSwitch:\n def __init__(\n self,\n organizational_structure: List,\n task_tag: Optional[str] = \"/task\",\n opening_speaker: Optional[any] = None,\n default_agent_config: Optional[Dict] = None,\n default_super_rich: Optional[str] = None,\n default_stream_mode: Optional[bool] = None,\n response_func: Optional[ResponseFuncType]\n = default_response_func,\n ):\n \"\"\"All messages sent by agents need to be forwarded through the AgentSwitch object.\n 所有 agent 发送的消息,都需要通过 AgentSwitch 对象进行转发。\n\n **Forwarding:**\n 转发:\n\n The AgentSwitch object determines who to forward the message to based on the agent name after the @ symbol in the message.\n AgentSwitch 对象通过消息中 @ 符号后的 agent name 来判断将消息转发给谁。\n\n **Conversation domain:**\n 会话域:\n\n In each round of dialogue, the agent does not need to use all historical conversation records as its context.\n 每轮对话 agent 无需将所有的历史会话记录作为其上下文。\n\n The agent's conversation domain is based on the task. that is, the context of each round of dialogue for the agent only focuses on the historical conversation records of the current task.\n agent 的会话域以任务为基础。即 agent 每轮对话的上下文仅聚焦于当前任务的历史会话记录。\n\n **Task:**\n 任务:\n\n The AgentSwitch object determines whether the content of the message is a task through the task tag in the message.\n AgentSwitch 对象通过消息中的 task tag,来判断消息的内容是否是一个任务。\n\n If it is a task, the AgentSwitch object will call the receiver's new_task method.\n 如果是任务,AgentSwitch 对象会调用接收方的 new_task 方法。\n\n The default task tag is /task, which can be modified through the task_tag parameter when initializing the AgentSwitch object.\n task tag 默认为 /task,该值可在初始化 AgentSwitch 对象时,通过 task_tag 参数修改。\n\n **Organizational structure:**\n 组织架构:\n\n A multidimensional list containing agent objects.\n 一个包含 agent 对象的多维列表。\n\n Each list is equivalent to a department, and the first agent in the list is the leader of the department.\n 每个列表相当于一个部门,列表中的第一个 agent 为部门的 leader。\n\n Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together.\n 每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。\n\n Note: There cannot be agents with the same name in the organizational structure.\n 注意:组织架构中不能有相同名称的 agent。\n\n :param organizational_structure: A multidimensional list containing agent objects.\n 一个包含 agent 对象的多维列表。\n :param opening_speaker_name: The name of the human agent invited to publish the first task.\n 被邀请发布第一个任务的人工 agent 名称。\n :param task_tag: Publish tasks to other agents by adding task_tag to the message.\n 通过在消息中添加 task_tag 来向其他 agent 发布任务。\n \"\"\"\n self.task_tag = task_tag\n self._default_agent_config = default_agent_config\n self._default_super_rich = default_super_rich\n self._default_stream_mode = default_stream_mode\n self._response_func = response_func\n self._agents = {} # key: agent name value: agent object\n\n self._init_agents(organizational_structure)\n self._init_agents_workmates(organizational_structure)\n if opening_speaker:\n self._inviting_to_speak(opening_speaker)\n\n def _init_agents(self, agent_list: list):\n for item in agent_list:\n if isinstance(item, list):\n self._init_agents(item)\n elif isinstance(item, str):\n continue\n else:\n self._agents[item.name] = item\n if item.agent_config is None and self._default_agent_config is not None:\n item.set_agent_config(self._default_agent_config)\n if item.super_rich is None and self._default_super_rich is not None:\n item.super_rich = self._default_super_rich\n if item.stream_mode is None:\n if self._default_stream_mode is None or self._default_stream_mode:\n item.stream_mode = True\n else:\n item.stream_mode = False\n if self._response_func:\n item.response_func = self._response_func\n\n def _init_agents_workmates(self, agent_list: list):\n \"\"\"Arrange for each agent to communicate with other agents according to the organizational structure.\n 根据组织架构,为每个 agent 安排可以与其沟通的其他 agent\n\n An agent should not exist in multiple departments.\n agent 不应存在于多个部门中\n\n :param agent_list: Organizational structure\n 组织架构\n \"\"\"\n if isinstance(agent_list[0], str):\n # The current list is workflow mode\n l = len(agent_list)\n\n for index, main_agent in enumerate(agent_list):\n # Skip the first element\n if index == 0:\n continue\n\n workmates = \"\"\n\n if index == l - 1:\n # If this is the last element\n name = \"\\\\\"\n elif isinstance(agent_list[index + 1], list):\n # If the next element is a list\n name = agent_list[index + 1][0].name\n duty = agent_list[index + 1][0].duty\n workmates = f\"\"\"\n{name} : {duty}\"\"\"\n else:\n # If the next element is agent\n name = agent_list[index + 1].name\n duty = agent_list[index + 1].duty\n workmates = f\"\"\"\n{name} : {duty}\"\"\"\n\n if isinstance(main_agent, list):\n # If the current element is a list\n self._init_agents_workmates(main_agent)\n if not main_agent[0].pipeline or main_agent[0].pipeline == \"\\\\\":\n main_agent[0].workmates += workmates\n main_agent[0].pipeline = name\n else:\n # If the current element is agent\n if not main_agent.pipeline or main_agent.pipeline == \"\\\\\":\n main_agent.workmates += workmates\n main_agent.pipeline = name\n else:\n # The current list is non-workflow mode.\n for main_agent in agent_list:\n workmates = \"\"\n\n if isinstance(main_agent, list):\n # If the current element is a list\n self._init_agents_workmates(main_agent)\n\n # If the current element is a workflow list, no hierarchical relationship is established.\n if isinstance(main_agent[0], str):\n continue\n\n # Establish a leveling relationship between current department leaders\n for agent in agent_list:\n if isinstance(agent, list):\n # If other elements are lists\n\n if isinstance(agent[0], str):\n if agent[0] == \"F\":\n # If it is a workflow\n\n # Determine whether the second element is a list.\n if isinstance(agent[1], list):\n name = agent[1][0].name\n duty = agent[1][0].duty\n else:\n name = agent[1].name\n duty = agent[1].duty\n else:\n # Skip other types of workflow\n continue\n else:\n # If it is a department\n if agent[0].name != main_agent[0].name and agent[0].duty is not None:\n name = agent[0].name\n duty = agent[0].duty\n else:\n # Skip departments that duplicate the current department\n continue\n else:\n # If other elements are agent\n name = agent.name\n duty = agent.duty\n workmates += f\"\"\"\n{name} : {duty}\"\"\"\n main_agent[0].workmates += workmates\n else:\n # If the current element is agent\n\n # Establish a level relationship of the current agent\n for agent in agent_list:\n if isinstance(agent, list):\n # If other elements are lists\n\n # Determine whether it is a department or a workflow\n if isinstance(agent[0], str):\n if agent[0] == \"F\":\n # If it is a workflow\n\n # Determine whether the second element is a list.\n if isinstance(agent[1], list):\n name = agent[1][0].name\n duty = agent[1][0].duty\n else:\n name = agent[1].name\n duty = agent[1].duty\n else:\n # Skip other types of workflow\n continue\n else:\n # If it is a department\n name = agent[0].name\n duty = agent[0].duty\n else:\n # If other elements are agent\n if agent.name != main_agent.name and agent.duty is not None:\n name = agent.name\n duty = agent.duty\n else:\n # Skip the duplicate agent with the current agent\n continue\n workmates += f\"\"\"\n{name} : {duty}\"\"\"\n main_agent.workmates += workmates\n\n def _inviting_to_speak(self, invited_speaker):\n \"\"\"Invite the human agent to publish the first task\n 邀请人工 agent 发布第一个任务\n\n :param invited_speaker_name: The name of the human agent\n 人工 agent 名称。\n \"\"\"\n if invited_speaker.name not in self._agents:\n print(\"agent does not exist\")\n return\n new_task_id = self.create_time_based_uuid()\n invited_speaker.receive(self, new_task_id, \"system\", \"Please enter\", 2)\n\n def handle_and_forward(self, task_id: str, pusher_name: str, content: str,\n completion_tokens: Optional[int]):\n \"\"\"Handle messages and forward to other agent.\n 处理消息并转发给其他代理\n\n **Forwarding:**\n 转发:\n Determines who to forward the message to based on the agent name after the @ symbol in the message.\n 通过消息中 @ 符号后的 agent name 来判断将消息转发给谁。\n\n **Task:**\n 任务:\n Determines whether the content of the message is a task through the task tag in the message.\n 通过消息中的 task tag,来判断消息的内容是否是一个任务。\n\n If it is a task, will call the receiver's new_task method.\n 如果是任务,对象会调用接收方的 new_task 方法。\n\n **Conversation domain control:**\n 会话域控制:\n Translate the task id of the pusher into the task id of the receiver to connect the context.\n 将推送方的任务 id,转换为接收方的任务 id,以衔接上下文。\n\n - If the pusher is the task publisher, it is necessary to convert the task id of the pusher into the sub-task id of the receiver.\n - 如推送方为任务发布者,则需要将推送方的任务 id 转换为接收方的子任务 id。\n\n - If the pusher is executing the task published by the receiver, it is necessary to convert the task id of the pusher into the parent task id of the receiver.\n - 如推送方正在执行接收方发布的任务,则需要将推送方的任务 id 转换为接收方的上级任务 id。\n\n :param task_id: pusher task id.\n :param pusher_name: pusher_name.\n :param content: message content.\n :param completion_tokens: message content tokens.\n \"\"\"\n # Get pusher object.\n pusher = self._agents[pusher_name]\n\n # Recognize the recipient's name.\n match = re.findall(r'@(\\w+)', content)\n\n if match:\n if match[0] not in self._agents:\n # Handling the case of incorrect recipient name.\n warn = f\"@{pusher_name} {match[0]} not exist, do not @{match[0]} again, Also please do not attempt to converse with me, this is just a system message.\"\n self._response_func(\"system\", \"system\", \"\", False, 0, warn, 0, None)\n pusher.receive(self, task_id, \"system\", warn, 12)\n\n # Get receiver object.\n receiver = self._agents[match[0]]\n if re.search(fr'@\\w+ {self.task_tag}', content):\n # Generate a new task id.\n new_task_id = self.create_time_based_uuid()\n\n # Establish a relationship between the push task and the receiver task.\n pusher.sub_to_main_task_id[new_task_id] = task_id\n receiver.main_to_sub_task_id[task_id] = new_task_id\n # Create a new task.\n receiver.new_task(self, new_task_id, pusher_name, content, completion_tokens)\n else:\n switch_task_id = task_id\n if receiver.main_to_sub_task_id and task_id in receiver.main_to_sub_task_id:\n # Translate the session ID of the pusher into the sub-session ID of the receiver.\n switch_task_id = receiver.main_to_sub_task_id[task_id]\n if receiver.main_to_sub_task_id and task_id in receiver.sub_to_main_task_id:\n # Translate the session id of the sender into the superior session id of the receiver.\n switch_task_id = receiver.sub_to_main_task_id[task_id]\n if switch_task_id == task_id:\n # If no subtasks of the task from the pusher are found, a prompt is needed to create the task first.\n # Generate a new task id.\n new_task_id = self.create_time_based_uuid()\n\n # Establish a relationship between the push task and the receiver task.\n pusher.sub_to_main_task_id[new_task_id] = task_id\n receiver.main_to_sub_task_id[task_id] = new_task_id\n # Create a new task.\n content = content.replace(f\"@{match[0]} \", f\"@{match[0]} {self.task_tag} \")\n receiver.new_task(self, new_task_id, pusher_name, content, completion_tokens)\n else:\n receiver.receive(self, switch_task_id, pusher_name, content, completion_tokens)\n else:\n # Handling the situation where the recipient is not recognized.\n if pusher.pipeline != \"\\\\\":\n warn = f\"@{pusher_name} Any reply must start with @ + recipient's name, Also please do not attempt to converse with me, this is just a system message.\"\n self._response_func(\"system\", \"system\", \"\", False, 0, warn, 0, None)\n pusher.receive(self, task_id, \"system\", warn, 12)\n\n @staticmethod\n def create_time_based_uuid():\n # 获取当前时间的时间戳\n timestamp = time.time()\n\n # 创建一个基于时间戳的UUID\n return uuid.uuid5(uuid.NAMESPACE_DNS, str(timestamp))"
},
{
"identifier": "compressed_messages",
"path": "autogan/utils/compressed_messages_utils.py",
"snippet": "def compressed_messages(messages: List[Dict], focus: str, summary_model_config: LLMConfig, agent_name: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None,\n safe_size: Optional[int] = 4096) -> tuple[Optional[list], Optional[list], Optional[int]]:\n \"\"\"Compress Conversation Context\n 压缩会话上下文\n\n The content to be compressed is divided into: recent original conversation content, and distant content that needs to be compressed.\n 待压缩的会话内容会被分为:近期的原始会话内容、远期需要压缩的会话内容。\n\n When compressing distant conversation records, attention is focused on the 'focus'\n 在压缩远期会话记录时,会将注意力集中于 focus\n\n **Recent Original Conversation Content:**\n 近期原始会话内容:\n\n First, traverse the 'messages' in reverse order, extract the recent conversation records, until the cumulative tokens of the conversation records exceed 50% of the 'safe_size'\n 先反向遍历 messages,提取近期的会话记录,直至会话记录的累计 tokens 超过 safe_size 的 50%\n\n If the tokens of the first recent conversation record exceed 50% of the 'safe_size', then directly extract the first recent conversation record\n 如近期第一条会话记录的 tokens 就超过了 safe_size 的 50% 则直接提取近期第一条会话记录\n\n **Distant Compressed Conversation Content:**\n 远期压缩会话内容:\n\n The remaining conversation records will be compressed as distant conversation records. The size after compression is expected to be within the range of ('safe_size' - cumulative original conversation tokens)\n 剩余的会话记录将作为远期会话记录进行压缩,压缩后的大小被期望保持在 (safe_size - 累计原始会话 tokens) 范围之内\n\n If the value of 'safe_size' - cumulative original conversation tokens is less than 0, then the size after compression is expected to be 1024 tokens\n 如 safe_size - 累计原始会话 tokens 的值小于 0 则压缩后的大小被期望保持在 1024 tokens\n\n Note: The compression process does not treat messages from the 'system' role specially, and they should be excluded from 'messages'.\n 注意:压缩过程并未对 system 角色的消息进行特殊处理,应将其排除在 messages 之外。\n\n :param messages: The conversation content to be compressed, excluding 'system message' and 'focus message'. It should include 'role', 'content', 'tokens' fields.\n 待压缩的会话内容,应排除掉 system message 和 focus message。需包含 'role','content','tokens' 字段。\n :param focus: The focus direction when compressing distant conversation records\n 压缩远期会话记录时的专注方向\n :param summary_model_config: The LLM model configuration used to compress distant conversation records\n 用于压缩远期会话记录的 LLM 模型配置\n :param agent_name:\n :param response_func: Used to return results to the interface or terminal.\n 用于向接口或终端返回结果\n :param stream_mode:\n :param safe_size: 'max_messages_tokens' of 'agent main model' minus the tokens of 'system message' and 'focus message'. When 'safe_size' is less than 0, it will be forcibly defined as 1024\n agent main model 的 max_messages_tokens 减去 system message 和 focus message 的 tokens,当 safe_size 小于 0 时,将被强制定义为 1024\n\n :return:\n --conversation_messages: The compressed conversation records, the difference from 'request_messages' is that the 'tokens' field of each message is retained\n 压缩后的会话记录,与 request_messages 的区别是保留了每条消息的 tokens 字段\n --request_messages: The message content requested to 'llm', removed the 'tokens' field of each message\n 用于向 llm 请求的消息内容,去掉了每条消息的 tokens 字段\n --total_tokens: The total tokens after compression\n 压缩后的整体tokens\n \"\"\"\n conversation_messages = []\n request_messages = []\n total_tokens = 0\n\n if len(messages) == 0:\n return None, None, None\n\n if safe_size < 0:\n safe_size = 1024\n # Reverse traverse the message to extract recent original conversation content.\n i = 0\n for message in reversed(messages):\n tokens = message[\"tokens\"]\n if total_tokens + tokens > int(safe_size * 0.5) and i != 0:\n break\n message_copy = message.copy()\n message_copy.pop('tokens', None)\n conversation_messages.insert(0, message)\n request_messages.insert(0, message_copy)\n total_tokens += tokens\n i -= 1\n # Compress the remaining messages as distant conversation records.\n if len(messages) > (i * -1):\n compressed_size = safe_size - total_tokens\n if compressed_size <= 0:\n compressed_size = 1024\n\n # 压缩剩余 messages\n content, tokens = generate_messages_summary(messages[:i], focus, summary_model_config, compressed_size, agent_name, response_func, stream_mode)\n\n if content:\n conversation_messages.insert(\n 0,\n {'role': 'assistant', 'content': f'Earlier historical conversation records: {content}',\n 'tokens': tokens}\n )\n request_messages.insert(\n 0,\n {'role': 'assistant', 'content': f'Earlier historical conversation records: {content}'}\n )\n total_tokens += tokens\n if conversation_messages and request_messages:\n return conversation_messages, request_messages, total_tokens\n else:\n return None, None, None"
},
{
"identifier": "compressed_text_universal",
"path": "autogan/utils/compressed_text_utils.py",
"snippet": "def compressed_text_universal(text: str, summary_model_config: LLMConfig, agent_name: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None,\n focus: Optional[str] = None, safe_size: Optional[int] = None) \\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Compress the text, generating either a regular summary or a cue summary.\n 压缩文本,可生成普通摘要或线索摘要。\n\n First, the long text is sliced, and then a summary is generated for each slice.\n 首先将长文本切片,然后逐切片的生成摘要。\n\n If the value of the focus parameter is not None, then the attention will be focused on the focus area while generating the summary.\n 如 focus 参数的值不为 None 则在生成摘要时注意力集中于 focus。\n\n If the value of the safe_size parameter is not None and the length of the initial compression result exceeds the safe_size, the summary will be further compressed, with the compressed size expected to stay within the range of the safe_size.\n 如 safe_size 参数的值不为 None 且初次压缩结果长度超过 safe_size,则会对摘要进一步压缩,压缩后的大小被期望保持在 safe_size 范围之内。\n\n :param text: Text to be compressed.\n 待压缩的文本。\n :param summary_model_config: LLM configuration used for text compression.\n 用于压缩文本的 LLM 配置。\n :param agent_name:\n :param response_func: Used to return results to the interface or terminal.\n 用于向接口或终端返回结果\n :param stream_mode:\n :param focus: The focus direction when compressing text.\n 压缩文本时的专注方向。\n :param safe_size: The target size of the text after compression, if not provided there is no limit.\n 文本压缩后的目标尺寸,如果为空则不做限制。\n\n :return:\n --compressed_text: The text after compression.\n 压缩后的文本。\n --total_tokens: Total tokens after compression.\n 压缩后的整体tokens。\n \"\"\"\n\n compressed_text = \"\"\n total_tokens = 0\n\n split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model)\n\n for st in split_texts:\n if focus:\n content, tokens = generate_text_clues(st, focus, summary_model_config, agent_name, response_func,\n stream_mode)\n else:\n content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode)\n\n if content:\n compressed_text += content + \"\\n\"\n total_tokens += tokens\n\n if compressed_text:\n if safe_size and safe_size < total_tokens:\n return compressed_text_into_safe_size(compressed_text, safe_size, summary_model_config, agent_name,\n response_func, stream_mode)\n else:\n return compressed_text, total_tokens\n else:\n return None, None"
},
{
"identifier": "AgentConfig",
"path": "autogan/oai/config_utils.py",
"snippet": "class AgentConfig:\n \"\"\"The agent configuration includes:\n agent 配置包括:\n\n - main_model: The LLM configuration of the agent's main body.\n agent 主体的 LLM 配置。\n\n - summary_model: The LLM configuration used for compressing context and generating text summaries.\n 用于压缩上下文以及生成文本摘要的 LLM 配置。\n\n - request_interval_time: The interval time of LLM requests.\n LLM 请求间隔时间。\n\n - request_timeout:The timeout of LLM requests.\n LLM 请求超时时间。\n\n - max_retries: The maximum number of retries for LLM requests.\n LLM 请求最大重试次数。\n \"\"\"\n\n def __init__(\n self,\n config: Dict,\n ):\n model_filter = config[\"main_model\"].get(\"model_filter\", \"\")\n # main model config\n self._main_model_api_key_list = ConfigList(config[\"main_model\"][\"api_key_list\"], model_filter)\n self._main_model_max_messages_tokens = config[\"main_model\"][\"max_messages_tokens\"]\n\n # summary model config\n if \"summary_model\" in config:\n model_filter = config[\"summary_model\"].get(\"model_filter\", \"\")\n self._summary_model_api_key_list = ConfigList(config[\"summary_model\"][\"api_key_list\"], model_filter)\n self._summary_model_max_messages_tokens = config[\"summary_model\"][\"max_messages_tokens\"]\n else:\n # Use the main_model configuration when the summary_model configuration is empty.\n self._summary_model_api_key_list = self._main_model_api_key_list\n self._summary_model_max_messages_tokens = self._main_model_max_messages_tokens\n\n self._request_interval_time = config[\"request_interval_time\"]\n self._request_timeout = config[\"request_timeout\"]\n self._max_retries = config[\"max_retries\"]\n\n @property\n def main_model_config(self):\n return LLMConfig(\n self._main_model_api_key_list,\n self._main_model_max_messages_tokens,\n self._request_interval_time,\n self._request_timeout,\n self._max_retries\n )\n\n @property\n def summary_model_config(self):\n return LLMConfig(\n self._summary_model_api_key_list,\n self._summary_model_max_messages_tokens,\n self._request_interval_time,\n self._request_timeout,\n self._max_retries\n )"
},
{
"identifier": "count_text_tokens",
"path": "autogan/oai/count_tokens_utils.py",
"snippet": "def count_text_tokens(text: str, model: Optional[str] = \"gpt-3.5-turbo\") -> int:\n \"\"\"Calculate the tokens of the text.\n\n :param text: The text to be tokenized\n :param model: Calculate tokens for a specific model. If the model is not listed, it will default to calculating the number of tokens based on the gpt-3.5-turbo standard.\n\n :return: tokens\n \"\"\"\n\n if not text:\n return 0\n\n model_list = ['gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']\n if model not in model_list:\n model = \"gpt-3.5-turbo\"\n\n try:\n encoding = tiktoken.encoding_for_model(model)\n num_tokens = len(encoding.encode(text))\n except Exception as e:\n print(e)\n num_tokens = 0\n\n return num_tokens"
},
{
"identifier": "generate_chat_completion",
"path": "autogan/oai/generate_utils.py",
"snippet": "def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Call the LLM interface\n\n Currently, only the chatgpt model of openai (including azure) is adapted.\n\n :param llm_config: LLM configuration.\n :param messages:\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n :param response_func: Used to return results to the interface or terminal.\n :param stream_mode:\n \"\"\"\n\n # When a certain configuration in the configuration list fails to request,\n # continue to try the next configuration until all configurations in the list are attempted.\n loop = llm_config.len_of_api_key_list\n for i in range(loop):\n time.sleep(llm_config.request_interval_time)\n api_key = llm_config.next_api_key\n try:\n completion_content = \"\"\n completion_tokens = 0\n index = 1\n for message in chat_completions(messages, api_key, llm_config.request_timeout,\n llm_config.max_retries, stream_mode):\n content = \"\"\n if stream_mode:\n if (message and \"choices\" in message and \"delta\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"delta\"]\n and message[\"choices\"][0][\"delta\"][\"content\"]):\n content = message[\"choices\"][0][\"delta\"][\"content\"]\n completion_content += content\n else:\n if (message and \"choices\" in message and \"message\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"message\"]\n and message[\"choices\"][0][\"message\"][\"content\"]):\n content = message[\"choices\"][0][\"message\"][\"content\"]\n completion_content = content\n if message and \"usage\" in message and \"completion_tokens\" in message[\"usage\"]:\n completion_tokens = message[\"usage\"][\"completion_tokens\"]\n response_func(agent_name, gen, api_key[\"model\"], stream_mode, index, content, completion_tokens, message)\n if content:\n index += 1\n\n if completion_content:\n if completion_tokens == 0:\n completion_tokens = count_text_tokens(completion_content, api_key['model'])\n return completion_content, completion_tokens\n else:\n raise ValueError(\"The return value is empty.\")\n except Exception as e:\n if i == loop - 1:\n print(f\"generate_chat_completion Exception: {e}\")\n return None, None"
},
{
"identifier": "environment_info",
"path": "autogan/utils/environment_utils.py",
"snippet": "def environment_info() -> str:\n \"\"\"Current environment information\n\n :return: --current_time: Y.m.d H:M:S week:%w\n \"\"\"\n info = f'current time: {get_time()}'\n\n return info"
},
{
"identifier": "default_response_func",
"path": "autogan/utils/response.py",
"snippet": "def default_response_func(agent_name: str, gen: str, model: str, stream_mode: bool, index: int,\n content: Optional[str], tokens: Optional[int], response: any):\n \"\"\"default response function\n 默认响应函数提供终端打印支持\n The default response function provides terminal printing support.\n\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n 用于区分 agent 回复、深思、压缩上下文、普通摘要、线索摘要\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n - system:\n - tool:\n - tool_call:\n :param model:\n :param stream_mode:\n :param index: response sequence\n :param content: completion content\n 生成内容\n :param tokens: completion tokens\n 生成内容的 tokens\n :param response: Respond to raw data\n 响应原始数据\n :return:\n \"\"\"\n if stream_mode:\n end = \"\"\n else:\n end = \"\\n\"\n\n if content:\n if gen == \"main\":\n if index == 1:\n print(f\"\\n{agent_name}: \", end=end)\n print(content, end=end)\n elif gen == \"idea\" or gen == \"tool_call\":\n if index == 1:\n print(\n colored(\n f\"\\n{agent_name}: \",\n \"cyan\",\n ),\n end=end,\n flush=True,\n )\n print(\n colored(\n content,\n \"cyan\",\n ),\n end=end,\n flush=True,\n )\n elif gen == \"system\":\n print(\n colored(\n f\"\\n{agent_name}: {content}\",\n \"red\",\n ),\n end=end,\n flush=True,\n )\n elif gen == \"tool\":\n print(\n colored(\n f\"\\n{agent_name}: {content}\",\n \"blue\",\n ),\n end=end,\n flush=True,\n )\n elif gen == \"search\":\n print(\n colored(\n f\"\\nurl: {content}\",\n \"cyan\",\n ),\n end=end,\n flush=True,\n )"
}
] | import re
from collections import defaultdict
from typing import Optional, Dict, Any
from autogan.agents.agent_switch import AgentSwitch
from autogan.utils.compressed_messages_utils import compressed_messages
from autogan.utils.compressed_text_utils import compressed_text_universal
from autogan.oai.config_utils import AgentConfig
from autogan.oai.count_tokens_utils import count_text_tokens
from autogan.oai.generate_utils import generate_chat_completion
from autogan.utils.environment_utils import environment_info
from autogan.utils.response import default_response_func
from termcolor import colored | 9,965 | return x
class UniversalAgent:
def __init__(
self,
name: str,
agent_config: Optional[Dict] = None,
duty: Optional[str] = None,
work_flow: Optional[str] = None,
use_tool: Optional[str] = None, # only | join
super_rich: Optional[str] = None, # auto | on | off
stream_mode: Optional[bool] = None,
):
"""Agent base class
Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together.
每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。
To provide functions beyond the modeling capabilities for the agent, you can override the tool_function method.
想要为 agent 提供模型能力之外的功能,可以通过重写 tool_function 方法来实现。
:param name: The agent name should be unique in the organizational structure.
agent name 在组织架构中应当是唯一的。
:param agent_config: The agent configuration includes:
agent 配置包括:
- main_model: The LLM configuration of the agent's main body.
agent 主体的 LLM 配置。
- summary_model: The LLM configuration used for compressing context and generating text summaries.
用于压缩上下文以及生成文本摘要的 LLM 配置。
- request_interval_time: The interval time of LLM requests.
LLM 请求间隔时间。
- request_timeout:The timeout of LLM requests.
LLM 请求超时时间。
- max_retries: The maximum number of retries for LLM requests.
LLM 请求最大重试次数。
:param duty: Used to explain one's job responsibilities to other agents.
用于向其他 agent 说明自己的工作职责。
:param work_flow: Defines the workflow of the agent.
定义 agent 的工作流程。
:param use_tool: Defines the mode of the agent using the tool_function:
定义 agent 使用 tool_function 的模式:
- None: means not using the tool function.
不使用工具函数。
- only: Do not use the LLM, only use the tool function to generate results.
不使用 LLM,仅使用工具函数生成结果。
- join: The content generated by the LLM will be used as the input parameter for the tool_function.
LLM 生成的内容将作为 tool_function 的输入参数
:param super_rich: Whether to enable the deep thought function. When enabled,
it uses a set of analysis processes to refine the output of the agent. However,
this can increase the number of tokens used, so it is not recommended for use with the gpt-4 model.
The name "super_rich" is a reminder that using this function with gpt-4 can be expensive,
even more so than Elon Musk's earning speed.
是否开启深思功能,开启后会使用一套分析流程来收敛 agent 的输出结果,但这样做会增加 tokens 的消耗,因此不建议在gpt-4模型下使用。
之所以这个参数叫 super_rich ,是为了提醒用户,如果在 gpt-4 下使用,其花钱的速度可能会超过马斯克赚钱的速度。
- auto: Disable for GPT-4, enable for other models
在 gpt-4下禁用,其他模型开启
- on: Always enabled
始终开启
- off: Always disabled
始终关闭
:param stream_mode: Whether to enable the stream_mode
定义 agent 的工作流程。
"""
self.name = name
self.agent_config = AgentConfig(agent_config) if agent_config else None
self.duty = duty
self.super_rich = super_rich # auto | on | off
self.stream_mode = stream_mode
self.response_func = default_response_func # Used to return results to the interface or terminal.
self.workmates = "" # relevant personnel's name and duty
self.pipeline = "" # In a linear workflow, this is the next person to communicate with.
# Translate the session ID of the pusher into the sub-session ID of the receiver.
self.sub_to_main_task_id = defaultdict(str)
# Translate the session id of the sender into the superior session id of the receiver.
self.main_to_sub_task_id = defaultdict(str)
self._work_flow = work_flow
self._use_tool = use_tool # only | join
self._conversation_messages = defaultdict(list) # key: task id,value: Conversation history
self._conversation_focus = defaultdict(Dict) # key: task id,value: {"task_issuer": "", "task_content": ""}
def set_agent_config(self, agent_config: Dict):
self.agent_config = AgentConfig(agent_config)
def new_task(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,
completion_tokens: int):
"""Accept tasks posted by other agent.
:param switch: AgentSwitch object
:param task_id: New task id
:param sender_name: Task Issuer's Name
:param content: Task content
:param completion_tokens: Task content tokens
"""
# Avoid excessively long task content
if (self._use_tool != "only" and completion_tokens >
self.agent_config.main_model_config.max_messages_tokens * 0.5):
self._push_to_switch(switch, task_id, "The task is too long", 5)
# Cache task information to maintain focus during task execution
task_content = content.replace(f"@{self.name}", "please help me")
task_content = task_content.replace(f"{switch.task_tag}", "")
self._conversation_focus[task_id] = {'task_issuer': sender_name, 'task_content': task_content}
# Start the generation process
self._generate_process(switch, task_id, sender_name, content, completion_tokens)
def receive(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,
completion_tokens: int):
"""Receive messages sent by other agents (excluding new task requests)
:param switch: AgentSwitch object
:param task_id: Task id
:param sender_name: Name of the agent sending the message
:param content: Message content
:param completion_tokens: Message content tokens
"""
if self._use_tool != "only":
safe_size = self.agent_config.main_model_config.max_messages_tokens
if completion_tokens > safe_size:
# 如消息内容过长,则对其进行压缩
|
try:
except ImportError:
def colored(x, *args, **kwargs):
return x
class UniversalAgent:
def __init__(
self,
name: str,
agent_config: Optional[Dict] = None,
duty: Optional[str] = None,
work_flow: Optional[str] = None,
use_tool: Optional[str] = None, # only | join
super_rich: Optional[str] = None, # auto | on | off
stream_mode: Optional[bool] = None,
):
"""Agent base class
Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together.
每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。
To provide functions beyond the modeling capabilities for the agent, you can override the tool_function method.
想要为 agent 提供模型能力之外的功能,可以通过重写 tool_function 方法来实现。
:param name: The agent name should be unique in the organizational structure.
agent name 在组织架构中应当是唯一的。
:param agent_config: The agent configuration includes:
agent 配置包括:
- main_model: The LLM configuration of the agent's main body.
agent 主体的 LLM 配置。
- summary_model: The LLM configuration used for compressing context and generating text summaries.
用于压缩上下文以及生成文本摘要的 LLM 配置。
- request_interval_time: The interval time of LLM requests.
LLM 请求间隔时间。
- request_timeout:The timeout of LLM requests.
LLM 请求超时时间。
- max_retries: The maximum number of retries for LLM requests.
LLM 请求最大重试次数。
:param duty: Used to explain one's job responsibilities to other agents.
用于向其他 agent 说明自己的工作职责。
:param work_flow: Defines the workflow of the agent.
定义 agent 的工作流程。
:param use_tool: Defines the mode of the agent using the tool_function:
定义 agent 使用 tool_function 的模式:
- None: means not using the tool function.
不使用工具函数。
- only: Do not use the LLM, only use the tool function to generate results.
不使用 LLM,仅使用工具函数生成结果。
- join: The content generated by the LLM will be used as the input parameter for the tool_function.
LLM 生成的内容将作为 tool_function 的输入参数
:param super_rich: Whether to enable the deep thought function. When enabled,
it uses a set of analysis processes to refine the output of the agent. However,
this can increase the number of tokens used, so it is not recommended for use with the gpt-4 model.
The name "super_rich" is a reminder that using this function with gpt-4 can be expensive,
even more so than Elon Musk's earning speed.
是否开启深思功能,开启后会使用一套分析流程来收敛 agent 的输出结果,但这样做会增加 tokens 的消耗,因此不建议在gpt-4模型下使用。
之所以这个参数叫 super_rich ,是为了提醒用户,如果在 gpt-4 下使用,其花钱的速度可能会超过马斯克赚钱的速度。
- auto: Disable for GPT-4, enable for other models
在 gpt-4下禁用,其他模型开启
- on: Always enabled
始终开启
- off: Always disabled
始终关闭
:param stream_mode: Whether to enable the stream_mode
定义 agent 的工作流程。
"""
self.name = name
self.agent_config = AgentConfig(agent_config) if agent_config else None
self.duty = duty
self.super_rich = super_rich # auto | on | off
self.stream_mode = stream_mode
self.response_func = default_response_func # Used to return results to the interface or terminal.
self.workmates = "" # relevant personnel's name and duty
self.pipeline = "" # In a linear workflow, this is the next person to communicate with.
# Translate the session ID of the pusher into the sub-session ID of the receiver.
self.sub_to_main_task_id = defaultdict(str)
# Translate the session id of the sender into the superior session id of the receiver.
self.main_to_sub_task_id = defaultdict(str)
self._work_flow = work_flow
self._use_tool = use_tool # only | join
self._conversation_messages = defaultdict(list) # key: task id,value: Conversation history
self._conversation_focus = defaultdict(Dict) # key: task id,value: {"task_issuer": "", "task_content": ""}
def set_agent_config(self, agent_config: Dict):
self.agent_config = AgentConfig(agent_config)
def new_task(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,
completion_tokens: int):
"""Accept tasks posted by other agent.
:param switch: AgentSwitch object
:param task_id: New task id
:param sender_name: Task Issuer's Name
:param content: Task content
:param completion_tokens: Task content tokens
"""
# Avoid excessively long task content
if (self._use_tool != "only" and completion_tokens >
self.agent_config.main_model_config.max_messages_tokens * 0.5):
self._push_to_switch(switch, task_id, "The task is too long", 5)
# Cache task information to maintain focus during task execution
task_content = content.replace(f"@{self.name}", "please help me")
task_content = task_content.replace(f"{switch.task_tag}", "")
self._conversation_focus[task_id] = {'task_issuer': sender_name, 'task_content': task_content}
# Start the generation process
self._generate_process(switch, task_id, sender_name, content, completion_tokens)
def receive(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,
completion_tokens: int):
"""Receive messages sent by other agents (excluding new task requests)
:param switch: AgentSwitch object
:param task_id: Task id
:param sender_name: Name of the agent sending the message
:param content: Message content
:param completion_tokens: Message content tokens
"""
if self._use_tool != "only":
safe_size = self.agent_config.main_model_config.max_messages_tokens
if completion_tokens > safe_size:
# 如消息内容过长,则对其进行压缩 | compressed_text, total_tokens = compressed_text_universal( | 2 | 2023-12-06 03:24:34+00:00 | 12k |
JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation | inference.py | [
{
"identifier": "AverageMeter",
"path": "utils/metric_util.py",
"snippet": "class AverageMeter():\r\n \"\"\" Computes and stores the average and current value \"\"\"\r\n\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n \"\"\" Reset all statistics \"\"\"\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n \"\"\" Update statistics \"\"\"\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r"
},
{
"identifier": "save_img_tensor",
"path": "utils/tensor_op.py",
"snippet": "def save_img_tensor(restored,result_dir,ippath):\r\n '''\r\n :param restored: (1,C,H,W)\r\n :param result_dir:\r\n :param ippath:\r\n :return:\r\n '''\r\n restored = torch.clamp(restored, 0, 1).cpu().detach().permute(0, 2, 3, 1).squeeze(0).numpy()\r\n util.save_img(img_as_ubyte(restored),util.Generate_rp(result_dir,ippath))\r"
},
{
"identifier": "save_image_tensor",
"path": "utils/tensor_op.py",
"snippet": "def save_image_tensor(image_tensor, output_path=\"output/\"):\r\n image_np = torch_to_np(image_tensor)\r\n p = np_to_pil(image_np)\r\n p.save(output_path)\r"
},
{
"identifier": "mkdir",
"path": "utils/util.py",
"snippet": "def mkdir(path):\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r"
},
{
"identifier": "setup_logger",
"path": "utils/util.py",
"snippet": "def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False):\r\n '''\r\n util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,\r\n screen=True, tofile=True)\r\n logger = logging.getLogger('base')\r\n logger.info(option.dict2str(opt))\r\n '''\r\n lg = logging.getLogger(logger_name)\r\n fmt = '%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s'\r\n color_fmt = colored('%(asctime)s.%(msecs)03d','green') + '- %(levelname)s: %(message)s'\r\n formatter = logging.Formatter(fmt=color_fmt,\r\n datefmt='%y-%m-%d %H:%M:%S')\r\n lg.setLevel(level)\r\n lg.propagate = False\r\n if tofile:\r\n log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))\r\n fh = logging.FileHandler(log_file, mode='w')\r\n fh.setFormatter(formatter)\r\n lg.addHandler(fh)\r\n if screen:\r\n sh = logging.StreamHandler()\r\n sh.setFormatter(formatter)\r\n lg.addHandler(sh)\r"
},
{
"identifier": "crop_HWC_img",
"path": "utils/data_util.py",
"snippet": "def crop_HWC_img(image, base=64):\r\n \"\"\"\r\n 裁切到multiple of base的size上\r\n :param image: H,W,C\r\n :param base: (int)\r\n :return:\r\n \"\"\"\r\n h = image.shape[0]\r\n w = image.shape[1]\r\n crop_h = h % base\r\n crop_w = w % base\r\n return image[crop_h // 2:h - crop_h + crop_h // 2, crop_w // 2:w - crop_w + crop_w // 2, :]\r"
},
{
"identifier": "random_augmentation",
"path": "utils/data_util.py",
"snippet": "def random_augmentation(*args):\r\n out = []\r\n flag_aug = random.randint(0,7)\r\n for data in args:\r\n out.append(data_augmentation(data, flag_aug).copy())\r\n return out\r"
},
{
"identifier": "tensor2img",
"path": "utils/data_util.py",
"snippet": "def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):\r\n \"\"\"Convert torch Tensors into image numpy arrays.\r\n\r\n After clamping to [min, max], values will be normalized to [0, 1].\r\n\r\n Args:\r\n tensor (Tensor or list[Tensor]): Accept shapes:\r\n 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);\r\n 2) 3D Tensor of shape (3/1 x H x W);\r\n 3) 2D Tensor of shape (H x W).\r\n Tensor channel should be in RGB order.\r\n rgb2bgr (bool): Whether to change rgb to bgr.\r\n out_type (numpy type): output types. If ``np.uint8``, transform outputs\r\n to uint8 type with range [0, 255]; otherwise, float type with\r\n range [0, 1]. Default: ``np.uint8``.\r\n min_max (tuple[int]): min and max values for clamp.\r\n\r\n Returns:\r\n (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of\r\n shape (H x W). The channel order is BGR.\r\n \"\"\"\r\n if not (torch.is_tensor(tensor) or\r\n (isinstance(tensor, list)\r\n and all(torch.is_tensor(t) for t in tensor))):\r\n raise TypeError(\r\n f'tensor or list of tensors expected, got {type(tensor)}')\r\n\r\n if torch.is_tensor(tensor):\r\n tensor = [tensor]\r\n result = []\r\n for _tensor in tensor:\r\n _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)\r\n _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])\r\n\r\n n_dim = _tensor.dim()\r\n if n_dim == 4:\r\n img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()\r\n img_np = img_np.transpose(1, 2, 0)\r\n if rgb2bgr:\r\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\r\n elif n_dim == 3:\r\n img_np = _tensor.numpy()\r\n img_np = img_np.transpose(1, 2, 0)\r\n if img_np.shape[2] == 1: # gray image\r\n img_np = np.squeeze(img_np, axis=2)\r\n else:\r\n if rgb2bgr:\r\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\r\n elif n_dim == 2:\r\n img_np = _tensor.numpy()\r\n else:\r\n raise TypeError('Only support 4D, 3D or 2D tensor. '\r\n f'But received with dimension: {n_dim}')\r\n if out_type == np.uint8:\r\n # Unlike MATLAB, numpy.unit8() WILL NOT round by default.\r\n img_np = (img_np * 255.0).round()\r\n img_np = img_np.astype(out_type)\r\n result.append(img_np)\r\n if len(result) == 1:\r\n result = result[0]\r\n return result\r"
},
{
"identifier": "compute_psnr_ssim",
"path": "metrics/psnr_ssim.py",
"snippet": "def compute_psnr_ssim(recoverd, clean):\r\n \"\"\"\r\n model.output输入\r\n \"\"\"\r\n assert recoverd.shape == clean.shape\r\n recoverd = np.clip(recoverd.detach().cpu().numpy(), 0, 1)\r\n clean = np.clip(clean.detach().cpu().numpy(), 0, 1)\r\n\r\n recoverd = recoverd.transpose(0, 2, 3, 1)\r\n clean = clean.transpose(0, 2, 3, 1)\r\n psnr = 0\r\n ssim = 0\r\n\r\n for i in range(recoverd.shape[0]):\r\n # psnr_val += compare_psnr(clean[i], recoverd[i])\r\n # ssim += compare_ssim(clean[i], recoverd[i], multichannel=True)\r\n psnr += peak_signal_noise_ratio(clean[i], recoverd[i], data_range=1)\r\n ssim += structural_similarity(clean[i], recoverd[i], data_range=1, multichannel=True)\r\n\r\n return psnr / recoverd.shape[0], ssim / recoverd.shape[0], recoverd.shape[0]\r"
},
{
"identifier": "calculate_psnr",
"path": "metrics/psnr_ssim.py",
"snippet": "def calculate_psnr(img1, img2, crop_border=0, test_y_channel=False):\r\n \"\"\"img1 and img2 have range [0, 255] np.uint8\r\n tensor2img后输入\r\n crop_border (int): Cropped pixels in each edge of an image. These\r\n pixels are not involved in the PSNR calculation.\r\n test_y_channel (bool): Test on Y channel of YCbCr. Default: False.\r\n\r\n Returns:\r\n float: psnr result.\r\n \"\"\"\r\n img1 = img1.astype(np.float64)\r\n img2 = img2.astype(np.float64)\r\n if crop_border != 0:\r\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]\r\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]\r\n if test_y_channel:\r\n img1 = to_y_channel(img1)\r\n img2 = to_y_channel(img2)\r\n\r\n mse = np.mean((img1 - img2)**2)\r\n if mse == 0:\r\n return float('inf')\r\n return 20 * math.log10(255.0 / math.sqrt(mse))\r"
},
{
"identifier": "calculate_ssim",
"path": "metrics/psnr_ssim.py",
"snippet": "def calculate_ssim(img1, img2):\r\n '''calculate SSIM\r\n the same outputs as MATLAB's\r\n img1, img2: [0, 255]\r\n '''\r\n if not img1.shape == img2.shape:\r\n raise ValueError('Input images must have the same dimensions.')\r\n if img1.ndim == 2:\r\n return ssim(img1, img2)\r\n elif img1.ndim == 3:\r\n if img1.shape[2] == 3:\r\n ssims = []\r\n for i in range(3):\r\n ssims.append(ssim(img1, img2))\r\n return np.array(ssims).mean()\r\n elif img1.shape[2] == 1:\r\n return ssim(np.squeeze(img1), np.squeeze(img2))\r\n else:\r\n raise ValueError('Wrong input image dimensions.')\r"
},
{
"identifier": "IDR_restormer",
"path": "models/archs/IDR_restormer_arch.py",
"snippet": "class IDR_restormer(nn.Module):\n def __init__(self,\n inp_channels=3,\n out_channels=3,\n dim=48,\n num_blocks=[4, 6, 6, 8],\n num_refinement_blocks=4,\n heads=[1, 2, 4, 8],\n ffn_expansion_factor=2.66,\n bias=False,\n LayerNorm_type='WithBias', ## Other option 'BiasFree'\n num_degra_queries = 24,\n keep_degra = 48,\n degra_type = 5,\n sam = True,\n ops_type = 5,\n pred = True\n ):\n super(IDR_restormer, self).__init__()\n\n self.de_dict = {'denoise': 0, 'denoise_15': 0, 'denoise_25': 0, 'denoise_50': 0, 'derain': 1, 'dehaze': 2, 'deblur': 3, 'delowlight': 4, 'clean': 5}\n\n self.patch_embed =OverlapPatchEmbed_Keep(inp_channels, dim)\n\n self.encoder_level1 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=dim, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias,\n LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])\n\n self.down1_2 = Downsample(dim) ## From Level 1 to Level 2\n self.encoder_level2 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])\n\n self.down2_3 = Downsample(int(dim * 2 ** 1)) ## From Level 2 to Level 3\n self.encoder_level3 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])\n\n self.down3_4 = Downsample(int(dim * 2 ** 2)) ## From Level 3 to Level 4\n self.latent = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 3), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[3])])\n\n self.up4_3 = Upsample(int(dim * 2 ** 3)) ## From Level 4 to Level 3\n self.reduce_chan_level3 = nn.Conv2d(int(dim * 2 ** 3), int(dim * 2 ** 2), kernel_size=1, bias=bias)\n self.decoder_level3 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])\n\n self.up3_2 = Upsample(int(dim * 2 ** 2)) ## From Level 3 to Level 2\n self.reduce_chan_level2 = nn.Conv2d(int(dim * 2 ** 2), int(dim * 2 ** 1), kernel_size=1, bias=bias)\n self.decoder_level2 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])\n\n self.up2_1 = Upsample(int(dim * 2 ** 1)) ## From Level 2 to Level 1 (NO 1x1 conv to reduce channels)\n\n self.decoder_level1 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])\n\n self.refinement = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_refinement_blocks)])\n\n self.output = nn.Conv2d(int(dim * 2 ** 1), out_channels, kernel_size=3, stride=1, padding=1, bias=bias)\n\n self.degra_key = nn.Parameter(torch.randn(degra_type, num_degra_queries, int(dim * 2 ** 3)), requires_grad=True)\n self.dmixer = PI_MLP_Mixer(dim=int(dim * 2 ** 3),num_degra=num_degra_queries*degra_type,keep_degra=keep_degra,init='pca')\n self.kdp_level1 = Key_TransformerBlock(dim=dim, dimkey=int(dim * 2 ** 3), num_heads=heads[0], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)\n self.kdp_level2 = Key_TransformerBlock(dim=int(dim * 2 ** 1), dimkey=int(dim * 2 ** 3), num_heads=heads[1], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)\n self.kdp_level3 = Key_TransformerBlock(dim=int(dim * 2 ** 2), dimkey=int(dim * 2 ** 3), num_heads=heads[2], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)\n self.cri_pix = nn.L1Loss().cuda()\n\n\n\n def forward(self, inp_img, degra_type=None, gt=None, epoch=None):\n \"\"\"\n only input_image is required during inference\n \"\"\"\n flag=0\n batch_size,c,h,w = inp_img.shape\n if epoch and epoch <= 550:\n # stage 1 training - Task-oriented knowledge collection\n de_type = degra_type[0]\n degra_id = self.de_dict[de_type]\n degra_key = self.degra_key[degra_id,:,:].unsqueeze(0).expand(batch_size,-1,-1)\n else:\n # stage 2 training - Ingredients-oriented knowedge intergation\n if flag==0:\n U,S,V = process_USV(self.degra_key.detach())\n flag=1\n U,V = self.dmixer(U,V,batch_size)\n degra_key = [U,S,V]\n de_type = None\n\n\n inp_enc_level1 = self.patch_embed(inp_img)\n out_enc_level1 = self.encoder_level1(inp_enc_level1)\n torch_resize1 = Resize([out_enc_level1.shape[2],out_enc_level1.shape[3]])\n inp_img1 = torch_resize1(inp_img)\n out_enc_level1,output_img1,pred1 = self.kdp_level1(out_enc_level1,degra_key,inp_img1,degra_type=de_type)\n\n inp_enc_level2 = self.down1_2(out_enc_level1)\n out_enc_level2 = self.encoder_level2(inp_enc_level2)\n torch_resize2 = Resize([out_enc_level2.shape[2],out_enc_level2.shape[3]])\n inp_img2 = torch_resize2(inp_img)\n out_enc_level2,output_img2,pred2 = self.kdp_level2(out_enc_level2,degra_key,inp_img2,degra_type=de_type)\n\n inp_enc_level3 = self.down2_3(out_enc_level2)\n out_enc_level3 = self.encoder_level3(inp_enc_level3)\n torch_resize3 = Resize([out_enc_level3.shape[2],out_enc_level3.shape[3]])\n inp_img3 = torch_resize3(inp_img)\n out_enc_level3,output_img3,pred3 = self.kdp_level3(out_enc_level3,degra_key,inp_img3,degra_type=de_type)\n\n inp_enc_level4 = self.down3_4(out_enc_level3)\n latent = self.latent(inp_enc_level4)\n\n inp_dec_level3 = self.up4_3(latent)\n inp_dec_level3 = torch.cat([inp_dec_level3, out_enc_level3], 1)\n inp_dec_level3 = self.reduce_chan_level3(inp_dec_level3)\n out_dec_level3 = self.decoder_level3(inp_dec_level3)\n\n inp_dec_level2 = self.up3_2(out_dec_level3)\n inp_dec_level2 = torch.cat([inp_dec_level2, out_enc_level2], 1)\n inp_dec_level2 = self.reduce_chan_level2(inp_dec_level2)\n out_dec_level2 = self.decoder_level2(inp_dec_level2)\n\n inp_dec_level1 = self.up2_1(out_dec_level2)\n inp_dec_level1 = torch.cat([inp_dec_level1, out_enc_level1], 1)\n out_dec_level1 = self.decoder_level1(inp_dec_level1)\n\n out_dec_level1 = self.refinement(out_dec_level1)\n out_dec_level1 = self.output(out_dec_level1) + inp_img\n \n if gt is not None:\n gt_img1 = torch_resize1(gt)\n gt_img2 = torch_resize2(gt)\n gt_img3 = torch_resize3(gt)\n output_img = [output_img1,output_img2,output_img3] \n gt_img = [gt_img1,gt_img2,gt_img3] \n loss = np.sum([self.cri_pix(output_img[j],gt_img[j]) for j in range(len(output_img))])\n return [out_dec_level1,loss,pred1,pred2,pred3]\n else:\n return out_dec_level1"
}
] | import argparse
import subprocess
import numpy as np
import os
import torch
import torch.nn as nn
import logging
from tqdm import tqdm
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision.transforms import ToPILImage, Compose, RandomCrop, ToTensor
from utils.metric_util import AverageMeter
from utils.tensor_op import save_img_tensor, save_image_tensor
from utils.util import mkdir, setup_logger
from utils.data_util import crop_HWC_img, random_augmentation, tensor2img
from metrics.psnr_ssim import compute_psnr_ssim, calculate_psnr, calculate_ssim
from models.archs.IDR_restormer_arch import IDR_restormer | 7,611 | y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
return y[:,:,None].astype(np.float32)
def __getitem__(self, idx):
degraded_path = self.ids[idx]
clean_path = self._get_gt_path(degraded_path)
degraded_img = crop_HWC_img(np.array(Image.open(degraded_path).convert('RGB')), base=32)
clean_img = crop_HWC_img(np.array(Image.open(clean_path).convert('RGB')), base=32)
clean_img, degraded_img = self.toTensor(clean_img), self.toTensor(degraded_img)
degraded_name = degraded_path.split('/')[-1][:-4]
return [degraded_name], degraded_img, clean_img
def __len__(self):
return self.length
def test_Denoise(net, dataset, task="CBSD68", sigma=15,save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + 'denoise/' + str(sigma) + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
dataset.set_sigma(sigma)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([clean_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0]
temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch)
psnr.update(temp_psnr, N)
ssim.update(temp_ssim, N)
if save_img:
save_image_tensor(restored, output_path + clean_name[0] + '.png')
logger.info("Deonise sigma=%d: psnr: %.2f, ssim: %.4f" % (sigma, psnr.avg, ssim.avg))
def test_Derain_Dehaze(net, dataset, task="derain",save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + task + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([degraded_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0]
temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch)
N = degrad_patch.shape[0]
psnr.update(temp_psnr, N)
ssim.update(temp_ssim, N)
if save_img:
save_image_tensor(restored, output_path + degraded_name[0] + '.png')
logger.info("PSNR: %.2f, SSIM: %.4f" % (psnr.avg, ssim.avg))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Parameters
parser.add_argument('--cuda', type=int, default=0)
parser.add_argument('--mode', type=int, default=0,
help='0 for 5 tasks, 1 for denoising details, 2 for unknowing UDC')
parser.add_argument('--denoise_CBSD68_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--denoise_urban100_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--denoise_Kodak24_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--derain_path', type=str, default="", help='save path of test raining images')
parser.add_argument('--dehaze_path', type=str, default="", help='save path of test hazy images')
parser.add_argument('--deblur_path', type=str, default="", help='save path of test blur images')
parser.add_argument('--low_light_path', type=str, default="", help='save path of test low-light images')
parser.add_argument('--udc_T_path', type=str, default="", help='save path of test udc Toled images')
parser.add_argument('--udc_P_path', type=str, default="", help='save path of test udc Poled images')
parser.add_argument('--output_path', type=str, default="./results/visualization", help='output save path')
parser.add_argument('--ckpt_path', type=str, default="", help='checkpoint save path')
parser.add_argument('--log_path', type=str, default="./results/log", help='checkpoint save path')
opt = parser.parse_args()
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.set_device(opt.cuda)
denoise_set = DenoiseTestDataset(opt)
derain_set = DerainDehazeDataset(opt)
# Make network
net = IDR_restormer(inp_channels=3, out_channels=3, dim=24, num_blocks=[2,3,3,4], num_refinement_blocks=2, heads=[1,2,4,8], ffn_expansion_factor=2.66, bias=False, LayerNorm_type='WithBias', num_degra_queries = 24, keep_degra=48)
net = net.cuda()
net.eval()
net.load_state_dict(torch.load(opt.ckpt_path, map_location=torch.device(opt.cuda)))
|
class DenoiseTestDataset(Dataset):
def __init__(self, args, dataset="CBSD68"):
super(DenoiseTestDataset, self).__init__()
self.args = args
self.clean_ids = []
self.sigma = 15
self.dataset_dict = {'CBSD68': 0, 'urban100': 1, 'Kodak24':2}
self.set_dataset(dataset)
self.toTensor = ToTensor()
def _init_clean_ids(self):
if self.task_idx == 0:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_CBSD68_path)
self.clean_ids += [self.args.denoise_CBSD68_path + id_ for id_ in name_list]
elif self.task_idx == 1:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_urban100_path)
self.clean_ids += [self.args.denoise_urban100_path + id_ for id_ in name_list]
elif self.task_idx == 2:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_Kodak24_path)
self.clean_ids += [self.args.denoise_Kodak24_path + id_ for id_ in name_list]
self.num_clean = len(self.clean_ids)
def set_dataset(self, dataset):
self.task_idx = self.dataset_dict[dataset]
self._init_clean_ids()
def _add_gaussian_noise(self, clean_patch):
noise = np.random.randn(*clean_patch.shape)
noisy_patch = np.clip(clean_patch + noise * self.sigma, 0, 255).astype(np.uint8)
return noisy_patch, clean_patch
def _edgeComputation(self,x):
x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:])
x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:])
y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
return y[:,:,None].astype(np.float32)
def set_sigma(self, sigma):
self.sigma = sigma
def __getitem__(self, clean_id):
clean_img = crop_HWC_img(np.array(Image.open(self.clean_ids[clean_id]).convert('RGB')), base=32)
clean_name = self.clean_ids[clean_id].split("/")[-1].split('.')[0]
noisy_img, _ = self._add_gaussian_noise(clean_img)
clean_img, noisy_img = self.toTensor(clean_img), self.toTensor(noisy_img)
return [clean_name], noisy_img, clean_img
def __len__(self):
return self.num_clean
class DerainDehazeDataset(Dataset):
def __init__(self, args, task="derain"):
super(DerainDehazeDataset, self).__init__()
self.ids = []
self.task_idx = 0
self.args = args
self.task_dict = {'derain': 0, 'dehaze': 1, 'deblur':2, 'low-light':3, 'UDC_T':4, 'UDC_P':5}
self.toTensor = ToTensor()
self.set_dataset(task)
def _init_input_ids(self):
if self.task_idx == 0:
self.ids = []
name_list = os.listdir(self.args.derain_path + 'input/')
self.ids += [self.args.derain_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 1:
self.ids = []
name_list = os.listdir(self.args.dehaze_path + 'input/')
self.ids += [self.args.dehaze_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 2:
self.ids = []
name_list = os.listdir(self.args.deblur_path + 'input/')
self.ids += [self.args.deblur_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 3:
self.ids = []
name_list = os.listdir(self.args.low_light_path + 'input/')
self.ids += [self.args.low_light_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 4:
self.ids = []
name_list = os.listdir(self.args.udc_T_path + 'input/')
self.ids += [self.args.udc_T_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 5:
self.ids = []
name_list = os.listdir(self.args.udc_P_path + 'input/')
self.ids += [self.args.udc_P_path + 'input/' + id_ for id_ in name_list]
self.length = len(self.ids)
def _get_gt_path(self, degraded_name):
if self.task_idx == 0:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 1:
dir_name = degraded_name.split("input")[0] + 'target/'
name = degraded_name.split('/')[-1].split('_')[0] + '.png'
gt_name = dir_name + name
elif self.task_idx == 2:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 3:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 4:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 5:
gt_name = degraded_name.replace("input", "target")
return gt_name
def set_dataset(self, task):
self.task_idx = self.task_dict[task]
self._init_input_ids()
def _edgeComputation(self,x):
x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:])
x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:])
y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
return y[:,:,None].astype(np.float32)
def __getitem__(self, idx):
degraded_path = self.ids[idx]
clean_path = self._get_gt_path(degraded_path)
degraded_img = crop_HWC_img(np.array(Image.open(degraded_path).convert('RGB')), base=32)
clean_img = crop_HWC_img(np.array(Image.open(clean_path).convert('RGB')), base=32)
clean_img, degraded_img = self.toTensor(clean_img), self.toTensor(degraded_img)
degraded_name = degraded_path.split('/')[-1][:-4]
return [degraded_name], degraded_img, clean_img
def __len__(self):
return self.length
def test_Denoise(net, dataset, task="CBSD68", sigma=15,save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + 'denoise/' + str(sigma) + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
dataset.set_sigma(sigma)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([clean_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0]
temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch)
psnr.update(temp_psnr, N)
ssim.update(temp_ssim, N)
if save_img:
save_image_tensor(restored, output_path + clean_name[0] + '.png')
logger.info("Deonise sigma=%d: psnr: %.2f, ssim: %.4f" % (sigma, psnr.avg, ssim.avg))
def test_Derain_Dehaze(net, dataset, task="derain",save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + task + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([degraded_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0]
temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch)
N = degrad_patch.shape[0]
psnr.update(temp_psnr, N)
ssim.update(temp_ssim, N)
if save_img:
save_image_tensor(restored, output_path + degraded_name[0] + '.png')
logger.info("PSNR: %.2f, SSIM: %.4f" % (psnr.avg, ssim.avg))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Parameters
parser.add_argument('--cuda', type=int, default=0)
parser.add_argument('--mode', type=int, default=0,
help='0 for 5 tasks, 1 for denoising details, 2 for unknowing UDC')
parser.add_argument('--denoise_CBSD68_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--denoise_urban100_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--denoise_Kodak24_path', type=str, default="", help='save path of test noisy images')
parser.add_argument('--derain_path', type=str, default="", help='save path of test raining images')
parser.add_argument('--dehaze_path', type=str, default="", help='save path of test hazy images')
parser.add_argument('--deblur_path', type=str, default="", help='save path of test blur images')
parser.add_argument('--low_light_path', type=str, default="", help='save path of test low-light images')
parser.add_argument('--udc_T_path', type=str, default="", help='save path of test udc Toled images')
parser.add_argument('--udc_P_path', type=str, default="", help='save path of test udc Poled images')
parser.add_argument('--output_path', type=str, default="./results/visualization", help='output save path')
parser.add_argument('--ckpt_path', type=str, default="", help='checkpoint save path')
parser.add_argument('--log_path', type=str, default="./results/log", help='checkpoint save path')
opt = parser.parse_args()
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.set_device(opt.cuda)
denoise_set = DenoiseTestDataset(opt)
derain_set = DerainDehazeDataset(opt)
# Make network
net = IDR_restormer(inp_channels=3, out_channels=3, dim=24, num_blocks=[2,3,3,4], num_refinement_blocks=2, heads=[1,2,4,8], ffn_expansion_factor=2.66, bias=False, LayerNorm_type='WithBias', num_degra_queries = 24, keep_degra=48)
net = net.cuda()
net.eval()
net.load_state_dict(torch.load(opt.ckpt_path, map_location=torch.device(opt.cuda)))
| setup_logger('base', opt.log_path, level=logging.INFO, phase='test', screen=True, tofile=False) | 4 | 2023-12-07 10:58:34+00:00 | 12k |
TACJu/Compositor | Compositor_Mask2Former/mask2former_video/data_video/ytvis_eval.py | [
{
"identifier": "YTVOS",
"path": "Compositor_Mask2Former/mask2former_video/data_video/datasets/ytvis_api/ytvos.py",
"snippet": "class YTVOS:\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:\n \"\"\"\n # load dataset\n self.dataset,self.anns,self.cats,self.vids = dict(),dict(),dict(),dict()\n self.vidToAnns, self.catToVids = defaultdict(list), defaultdict(list)\n if not annotation_file == None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r'))\n assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.createIndex()\n\n def createIndex(self):\n # create index\n print('creating index...')\n anns, cats, vids = {}, {}, {}\n vidToAnns,catToVids = defaultdict(list),defaultdict(list)\n if 'annotations' in self.dataset:\n for ann in self.dataset['annotations']:\n vidToAnns[ann['video_id']].append(ann)\n anns[ann['id']] = ann\n\n if 'videos' in self.dataset:\n for vid in self.dataset['videos']:\n vids[vid['id']] = vid\n\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n cats[cat['id']] = cat\n\n if 'annotations' in self.dataset and 'categories' in self.dataset:\n for ann in self.dataset['annotations']:\n catToVids[ann['category_id']].append(ann['video_id'])\n\n print('index created!')\n\n # create class members\n self.anns = anns\n self.vidToAnns = vidToAnns\n self.catToVids = catToVids\n self.vids = vids\n self.cats = cats\n\n def info(self):\n \"\"\"\n Print information about the annotation file.\n :return:\n \"\"\"\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))\n\n def getAnnIds(self, vidIds=[], catIds=[], areaRng=[], iscrowd=None):\n \"\"\"\n Get ann ids that satisfy given filter conditions. default skips that filter\n :param vidIds (int array) : get anns for given vids\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids\n \"\"\"\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(vidIds) == 0:\n lists = [self.vidToAnns[vidId] for vidId in vidIds if vidId in self.vidToAnns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['avg_area'] > areaRng[0] and ann['avg_area'] < areaRng[1]]\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n return ids\n\n def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n \"\"\"\n filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids\n \"\"\"\n catNms = catNms if _isArrayLike(catNms) else [catNms]\n supNms = supNms if _isArrayLike(supNms) else [supNms]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(catNms) == len(supNms) == len(catIds) == 0:\n cats = self.dataset['categories']\n else:\n cats = self.dataset['categories']\n cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]\n cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]\n ids = [cat['id'] for cat in cats]\n return ids\n\n def getVidIds(self, vidIds=[], catIds=[]):\n '''\n Get vid ids that satisfy given filter conditions.\n :param vidIds (int array) : get vids for given ids\n :param catIds (int array) : get vids with all given cats\n :return: ids (int array) : integer array of vid ids\n '''\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == 0:\n ids = self.vids.keys()\n else:\n ids = set(vidIds)\n for i, catId in enumerate(catIds):\n if i == 0 and len(ids) == 0:\n ids = set(self.catToVids[catId])\n else:\n ids &= set(self.catToVids[catId])\n return list(ids)\n\n def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]\n\n def loadCats(self, ids=[]):\n \"\"\"\n Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.cats[id] for id in ids]\n elif type(ids) == int:\n return [self.cats[ids]]\n\n def loadVids(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying vid\n :return: vids (object array) : loaded vid objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.vids[id] for id in ids]\n elif type(ids) == int:\n return [self.vids[ids]]\n\n\n def loadRes(self, resFile):\n \"\"\"\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object\n \"\"\"\n res = YTVOS()\n res.dataset['videos'] = [img for img in self.dataset['videos']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsVidIds = [ann['video_id'] for ann in anns]\n assert set(annsVidIds) == (set(annsVidIds) & set(self.getVidIds())), \\\n 'Results do not correspond to current coco set'\n if 'segmentations' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n ann['areas'] = []\n if not 'bboxes' in ann:\n ann['bboxes'] = []\n for seg in ann['segmentations']:\n # now only support compressed RLE format as segmentation results\n if seg:\n ann['areas'].append(maskUtils.area(seg))\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(maskUtils.toBbox(seg))\n else:\n ann['areas'].append(None)\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(None)\n ann['id'] = id+1\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n ann['iscrowd'] = 0\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res\n\n def annToRLE(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE to RLE.\n :return: binary mask (numpy 2D array)\n \"\"\"\n t = self.vids[ann['video_id']]\n h, w = t['height'], t['width']\n segm = ann['segmentations'][frameId]\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = segm\n return rle\n\n def annToMask(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n :return: binary mask (numpy 2D array)\n \"\"\"\n rle = self.annToRLE(ann, frameId)\n m = maskUtils.decode(rle)\n return m"
},
{
"identifier": "YTVOSeval",
"path": "Compositor_Mask2Former/mask2former_video/data_video/datasets/ytvis_api/ytvoseval.py",
"snippet": "class YTVOSeval:\n # Interface for evaluating video instance segmentation on the YouTubeVIS dataset.\n #\n # The usage for YTVOSeval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = YTVOSeval(cocoGt,cocoDt); # initialize YTVOSeval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.params = {} # evaluation parameters\n self.evalVids = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.vidIds = sorted(cocoGt.getVidIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n for i, a in enumerate(ann['segmentations']):\n if a:\n rle = coco.annToRLE(ann, i)\n ann['segmentations'][i] = rle\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['video_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['video_id'], dt['category_id']].append(dt)\n self.evalVids = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalVids\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.vidIds = list(np.unique(p.vidIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(vidId, catId): computeIoU(vidId, catId) \\\n for vidId in p.vidIds\n for catId in catIds}\n\n evaluateVid = self.evaluateVid\n maxDet = p.maxDets[-1]\n \n \n self.evalImgs = [evaluateVid(vidId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for vidId in p.vidIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, vidId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentations'] for g in gt]\n d = [d['segmentations'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bboxes'] for g in gt]\n d = [d['bboxes'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n #ious = maskUtils.iou(d,g,iscrowd)\n def iou_seq(d_seq, g_seq):\n i = .0\n u = .0\n for d, g in zip(d_seq, g_seq):\n if d and g:\n i += maskUtils.area(maskUtils.merge([d, g], True))\n u += maskUtils.area(maskUtils.merge([d, g], False))\n elif not d and g:\n u += maskUtils.area(g)\n elif d and not g:\n u += maskUtils.area(d)\n if not u > .0:\n print(\"Mask sizes in video {} and category {} may not match!\".format(vidId, catId))\n iou = i / u if u > .0 else .0\n return iou\n ious = np.zeros([len(d), len(g)])\n for i, j in np.ndindex(ious.shape):\n ious[i, j] = iou_seq(d[i], g[j])\n #print(vidId, catId, ious.shape, ious)\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['avg_area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateVid(self, vidId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['avg_area']<aRng[0] or g['avg_area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[vidId, catId][:, gtind] if len(self.ious[vidId, catId]) > 0 else self.ious[vidId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['avg_area']<aRng[0] or d['avg_area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'video_id': vidId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.vidIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.vidIds) if i in setI]\n I0 = len(_pe.vidIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()"
}
] | import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
import torch
import detectron2.utils.comm as comm
from collections import OrderedDict
from .datasets.ytvis_api.ytvos import YTVOS
from .datasets.ytvis_api.ytvoseval import YTVOSeval
from tabulate import tabulate
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table | 10,684 | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
class YTVISEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warning(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
| # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
class YTVISEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warning(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()): | self._ytvis_api = YTVOS(json_file) | 0 | 2023-12-12 11:49:28+00:00 | 12k |
neu-spiral/multi-label-emg | multi_label_emg/train.py | [
{
"identifier": "load_data_dict",
"path": "multi_label_emg/data.py",
"snippet": "def load_data_dict():\n \"\"\"\n Loads features and labels from subject folders into a single dictionary as described below.\n NOTE - preprocessing should be been done first to extract features from raw data (see README).\n\n data_dict = {\n Subj0: {\n Calibration_features: ...,\n Calibration_dir_labels: ...,\n Calibration_mod_labels: ...,\n Calibration_visual_dir_labels: ...,\n Calibration_visual_mod_labels: ...,\n SimultaneousPulse1_NoFeedback_features: ...,\n ...\n },\n ...\n }\n \"\"\"\n\n blocks = [\"Calibration\"]\n for i in [1, 2, 3]:\n for feedback in [\"NoFeedBack\", \"WithFeedBack\"]:\n blocks.append(f\"SimultaneousPulse{i}_{feedback}\")\n blocks.append(f\"HoldPulse{i}_{feedback}\")\n\n results = {}\n for i in trange(11, desc=\"Load Subjects\", leave=True):\n results[f\"Subj{i}\"] = {}\n for block in tqdm(blocks, leave=False, position=1):\n path = DATASET_DIR / \"python\" / f\"Subj{i}\" / block\n # NOTE - features.npy is created during preprocessing script\n results[f\"Subj{i}\"][f\"{block}_features\"] = np.load(path / \"features.npy\")\n results[f\"Subj{i}\"][f\"{block}_dir_labels\"] = np.load(path / \"joystick_direction_labels.npy\")\n results[f\"Subj{i}\"][f\"{block}_mod_labels\"] = np.load(path / \"joystick_modifier_labels.npy\")\n results[f\"Subj{i}\"][f\"{block}_visual_dir_labels\"] = np.load(path / \"visual_direction_labels.npy\")\n results[f\"Subj{i}\"][f\"{block}_visual_mod_labels\"] = np.load(path / \"visual_modifier_labels.npy\")\n return results"
},
{
"identifier": "AvgPairs",
"path": "multi_label_emg/models.py",
"snippet": "class AvgPairs:\n \"\"\"Create fake doubles by averaging pairs of singles. New items have hard labels including both classes\"\"\"\n\n def __init__(self, n_per_class: int):\n self.n_per_class = n_per_class\n\n def __call__(self, x: np.ndarray, y_dir: np.ndarray, y_mod: np.ndarray):\n \"\"\"\n Args:\n x_single: (n_samples_in, n_features) - data/features from single gestures\n y_dir_single: (n_samples_in, DIR_PROBS_SHAPE) - one-hot labels of direction gestures\n y_mod_single: (n_samples_in, MOD_PROBS_SHAPE) - one-hot labels of modifier gestures\n\n Returns:\n x_prime: (n_samples_aug, n_features) - augmented data\n y_prime_dir: (n_samples_aug, len(DIRECTION_GESTURES)) - augmented labels\n y_prime_mod: (n_samples_aug, len(MODIFIER_GESTURES)) - augmented labels\n \"\"\"\n x_dir, x_mod, y_dir, y_mod = split_dir_mod(x, y_dir, y_mod)\n x_aug, y_dir_aug, y_mod_aug = [], [], []\n for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):\n x_aug.append((x1 + x2) / 2)\n y_dir_aug.append(y1)\n y_mod_aug.append(y2)\n x_aug = np.stack(x_aug)\n y_dir_aug = np.stack(y_dir_aug)\n y_mod_aug = np.stack(y_mod_aug)\n\n if self.n_per_class > 0:\n # For each combination class, truncate to self.n_per_class\n res_x, res_y_dir, res_y_mod = [], [], []\n for d in np.unique(y_dir_aug, axis=0):\n for m in np.unique(y_mod_aug, axis=0):\n idx = np.where(np.logical_and((y_dir_aug == d).all(-1), (y_mod_aug == m).all(-1)))[0]\n perm = np.random.permutation(len(idx))\n res_x.append(x_aug[idx[perm[: self.n_per_class]]])\n res_y_dir.append(y_dir_aug[idx[perm[: self.n_per_class]]])\n res_y_mod.append(y_mod_aug[idx[perm[: self.n_per_class]]])\n\n x_aug = np.concatenate(res_x)\n y_dir_aug = np.concatenate(res_y_dir)\n y_mod_aug = np.concatenate(res_y_mod)\n\n return x_aug, y_dir_aug, y_mod_aug\n\n def __repr__(self):\n return f\"{type(self).__name__}(n_per_class={self.n_per_class})\""
},
{
"identifier": "ElementwiseMaxPairs",
"path": "multi_label_emg/models.py",
"snippet": "class ElementwiseMaxPairs:\n \"\"\"Create fake doubles by taking elementwise max of each feature.\n New items have hard labels including both classes\"\"\"\n\n def __init__(self, n_per_class: int):\n self.n_per_class = n_per_class\n\n def __call__(self, x: np.ndarray, y_dir: np.ndarray, y_mod: np.ndarray):\n \"\"\"\n Args:\n x_single: (n_samples_in, n_features) - data/features from single gestures\n y_dir_single: (n_samples_in, DIR_PROBS_SHAPE) - one-hot labels of direction gestures\n y_mod_single: (n_samples_in, MOD_PROBS_SHAPE) - one-hot labels of modifier gestures\n\n Returns:\n x_prime: (n_samples_aug, n_features) - augmented data\n y_prime_dir: (n_samples_aug, len(DIRECTION_GESTURES)) - augmented labels\n y_prime_mod: (n_samples_aug, len(MODIFIER_GESTURES)) - augmented labels\n \"\"\"\n x_dir, x_mod, y_dir, y_mod = split_dir_mod(x, y_dir, y_mod)\n x_aug, y_dir_aug, y_mod_aug = [], [], []\n for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):\n x_aug.append(np.maximum(x1, x2))\n y_dir_aug.append(y1)\n y_mod_aug.append(y2)\n x_aug = np.stack(x_aug)\n y_dir_aug = np.stack(y_dir_aug)\n y_mod_aug = np.stack(y_mod_aug)\n\n if self.n_per_class > 0:\n # For each combination class, truncate to self.n_per_class\n res_x, res_y_dir, res_y_mod = [], [], []\n for d in np.unique(y_dir_aug, axis=0):\n for m in np.unique(y_mod_aug, axis=0):\n idx = np.where(np.logical_and((y_dir_aug == d).all(-1), (y_mod_aug == m).all(-1)))[0]\n perm = np.random.permutation(len(idx))\n res_x.append(x_aug[idx[perm[: self.n_per_class]]])\n res_y_dir.append(y_dir_aug[idx[perm[: self.n_per_class]]])\n res_y_mod.append(y_mod_aug[idx[perm[: self.n_per_class]]])\n\n x_aug = np.concatenate(res_x)\n y_dir_aug = np.concatenate(res_y_dir)\n y_mod_aug = np.concatenate(res_y_mod)\n\n return x_aug, y_dir_aug, y_mod_aug\n\n def __repr__(self):\n return f\"{type(self).__name__}(n_per_class={self.n_per_class})\""
},
{
"identifier": "ParallelA",
"path": "multi_label_emg/models.py",
"snippet": "class ParallelA(BaseParallelModel):\n DEFAULT_SAVE_NAME = \"ParallelA.pkl\"\n\n def __init__(\n self,\n dir_clf,\n mod_clf,\n use_augmentation: bool,\n n_aug_per_class: int = -1,\n include_rest_data_for_clf: bool = False,\n ):\n self.dir_clf = dir_clf\n self.mod_clf = mod_clf\n self.use_augmentation = use_augmentation\n self.n_aug_per_class = n_aug_per_class\n self._n_aug_created = None\n self.include_rest_data_for_clf = include_rest_data_for_clf\n\n def get_params(self, deep=True):\n return {\n \"dir_clf\": self.dir_clf,\n \"mod_clf\": self.mod_clf,\n \"use_augmentation\": self.use_augmentation,\n \"n_aug_per_class\": self.n_aug_per_class,\n \"include_rest_data_for_clf\": self.include_rest_data_for_clf,\n }\n\n def fit(self, features, y_dir, y_mod):\n if self.use_augmentation:\n aug = AvgPairs(self.n_aug_per_class)\n aug_features, aug_dir_labels, aug_mod_labels = aug(features, y_dir, y_mod)\n features = np.concatenate([features, aug_features])\n y_dir = np.concatenate([y_dir, aug_dir_labels])\n y_mod = np.concatenate([y_mod, aug_mod_labels])\n self._n_aug_created = len(aug_features)\n\n if y_dir.ndim == 2:\n y_dir = y_dir.argmax(-1)\n if y_mod.ndim == 2:\n y_mod = y_mod.argmax(-1)\n\n if self.include_rest_data_for_clf:\n # In this case, the label (NoDir, NoMod) could mean \"active and doesn't fit our classes\" or \"resting\"\n self.dir_clf.fit(features, y_dir)\n self.mod_clf.fit(features, y_mod)\n else:\n # In this case, the label (NoDir, NoMod) means \"active and doesn't fit classes\".\n # \"Rest\" data is out-of-domain\n active_idx = np.logical_or(y_dir != NO_DIR_IDX, y_mod != NO_MOD_IDX)\n active_features = features[active_idx]\n active_y_dir = y_dir[active_idx]\n active_y_mod = y_mod[active_idx]\n\n self.dir_clf.fit(active_features, active_y_dir)\n self.mod_clf.fit(active_features, active_y_mod)\n return self\n\n def predict_proba(self, features):\n \"\"\"Only for gestures\"\"\"\n dir_probs = self.dir_clf.predict_proba(features)\n mod_probs = self.mod_clf.predict_proba(features)\n return dir_probs, mod_probs\n\n def predict(self, features):\n \"\"\"features.shape == (n_channels, n_samples) or (n_trials, n_channels, n_samples)\"\"\"\n dir_probs = self.dir_clf.predict_proba(features)\n mod_probs = self.mod_clf.predict_proba(features)\n return dir_probs.argmax(-1), mod_probs.argmax(-1)\n\n def save(self, save_dir: Path) -> Path:\n assert save_dir.exists() and save_dir.is_dir()\n file_path = save_dir / self.DEFAULT_SAVE_NAME\n with open(file_path, \"wb\") as f:\n pickle.dump(self, f)\n return file_path\n\n @classmethod\n def load(cls, file_path: Path) -> \"ParallelA\":\n with open(file_path, \"rb\") as f:\n return pickle.load(f)\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}(dir_clf={self.dir_clf}, \"\n f\"use_augmentation={self.use_augmentation}, \"\n f\"n_aug_per_class={self.n_aug_per_class}, \"\n + f\"mod_clf={self.mod_clf}, \"\n + f\"include_rest_data_for_clf={self.include_rest_data_for_clf})\"\n )"
},
{
"identifier": "ParallelB",
"path": "multi_label_emg/models.py",
"snippet": "class ParallelB(BaseParallelModel):\n DEFAULT_SAVE_NAME = \"ParallelB.pkl\"\n\n def __init__(\n self,\n dir_clf,\n mod_clf,\n has_dir_clf,\n has_mod_clf,\n use_augmentation: bool,\n n_aug_per_class: int = -1,\n ):\n self.has_dir_clf = has_dir_clf\n self.has_mod_clf = has_mod_clf\n self.dir_clf = dir_clf\n self.mod_clf = mod_clf\n self.use_augmentation = use_augmentation\n self.n_aug_per_class = n_aug_per_class\n self._n_aug_created = None\n\n def get_params(self, deep=True):\n return {\n \"dir_clf\": self.dir_clf,\n \"mod_clf\": self.mod_clf,\n \"has_dir_clf\": self.dir_clf,\n \"has_mod_clf\": self.mod_clf,\n \"use_augmentation\": self.use_augmentation,\n \"n_aug_per_class\": self.n_aug_per_class,\n }\n\n def fit(self, features, y_dir, y_mod):\n if self.use_augmentation:\n aug = AvgPairs(self.n_aug_per_class)\n aug_features, aug_dir_labels, aug_mod_labels = aug(features, y_dir, y_mod)\n features = np.concatenate([features, aug_features])\n y_dir = np.concatenate([y_dir, aug_dir_labels])\n y_mod = np.concatenate([y_mod, aug_mod_labels])\n self._n_aug_created = len(aug_features)\n\n if y_dir.ndim == 2:\n y_dir = y_dir.argmax(-1)\n if y_mod.ndim == 2:\n y_mod = y_mod.argmax(-1)\n has_direction = y_dir != NO_DIR_IDX\n has_modifier = y_mod != NO_MOD_IDX\n # Event check\n self.has_dir_clf.fit(features, has_direction.astype(int))\n self.has_mod_clf.fit(features, has_modifier.astype(int))\n # Direction and modifier\n self.dir_clf.fit(features[has_direction], y_dir[has_direction])\n self.mod_clf.fit(features[has_modifier], y_mod[has_modifier])\n return self\n\n def predict_proba(self, features):\n p_has_direction = self.has_dir_clf.predict_proba(features)\n p_has_modifier = self.has_mod_clf.predict_proba(features)\n\n p_dir_probs = self.dir_clf.predict_proba(features)\n p_mod_probs = self.mod_clf.predict_proba(features)\n\n # Check probs\n dir_probs = np.zeros((features.shape[0], 5))\n mod_probs = np.zeros((features.shape[0], 3))\n dir_probs[:, NO_DIR_IDX] = p_has_direction[:, 0] # p(no_direction | x)\n mod_probs[:, NO_MOD_IDX] = p_has_modifier[:, 0] # p(no_modifier | x)\n dir_probs[:, :NO_DIR_IDX] = np.multiply(\n p_dir_probs, p_has_direction[:, 1][..., None]\n ) # p(direction | has_direction)\n mod_probs[:, :NO_MOD_IDX] = np.multiply(\n p_mod_probs, p_has_modifier[:, 1][..., None]\n ) # p(modifier | has_modifier)\n assert np.allclose(dir_probs.sum(-1), 1) and np.allclose(mod_probs.sum(-1), 1), \"Probabilities should sum to 1\"\n # return probs\n \"\"\"Only for gestures\"\"\"\n return dir_probs, mod_probs\n\n def predict(self, features):\n dir_probs, mod_probs = self.predict_proba(features)\n return dir_probs.argmax(-1), mod_probs.argmax(-1)\n\n def save(self, save_dir: Path) -> Path:\n assert save_dir.exists() and save_dir.is_dir()\n file_path = save_dir / self.DEFAULT_SAVE_NAME\n with open(file_path, \"wb\") as f:\n pickle.dump(self, f)\n return file_path\n\n @classmethod\n def load(cls, file_path: Path) -> \"ParallelB\":\n with open(file_path, \"rb\") as f:\n return pickle.load(f)\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}(has_dir_clf={self.has_dir_clf}, \"\n f\"dir_clf={self.dir_clf}, \"\n f\"use_augmentation={self.use_augmentation}, \"\n f\"n_aug_per_class={self.n_aug_per_class}, \"\n f\"has_mod_clf={self.has_mod_clf}),\"\n f\"mod_clf={self.mod_clf})\"\n )"
},
{
"identifier": "NO_DIR_IDX",
"path": "multi_label_emg/utils.py",
"snippet": "NO_DIR_IDX = len(DIRECTION_GESTURES) # When predicting direction, we have an extra class representing \"None\""
},
{
"identifier": "NO_MOD_IDX",
"path": "multi_label_emg/utils.py",
"snippet": "NO_MOD_IDX = len(MODIFIER_GESTURES)"
},
{
"identifier": "RESULTS_DIR",
"path": "multi_label_emg/utils.py",
"snippet": "RESULTS_DIR = PROJECT_ROOT.parent / \"results\" # For experiment outputs and figures"
},
{
"identifier": "canonical_coords",
"path": "multi_label_emg/utils.py",
"snippet": "def canonical_coords():\n \"\"\"NOTE - order does not matter: (Up, Pinch) and (Pinch, Up) are both labeled as (Up, Pinch)\n Make a list table so we can convert:\n from integer labels such as (0, 1),\n to an index in confusion matrix and a string label\"\"\"\n result_int = []\n result_str = []\n\n # Add (<DIR>, NoMod) items\n for i, d in enumerate(DIRECTION_GESTURES):\n result_int.append((i, NO_MOD_IDX))\n result_str.append(f\"({d}, NoMod)\")\n\n # Add (NoDir, <MOD>) items\n for i, m in enumerate(MODIFIER_GESTURES):\n result_int.append((NO_DIR_IDX, i))\n result_str.append(f\"(NoDir, {m})\")\n\n # Add (<DIR>, <MOD>) items\n for i, d in enumerate(DIRECTION_GESTURES):\n for j, m in enumerate(MODIFIER_GESTURES):\n result_int.append((i, j))\n result_str.append(f\"({d}, {m})\")\n\n # Add the (NoDir, NoMod) item\n result_int.append((NO_DIR_IDX, NO_MOD_IDX))\n result_str.append(\"(NoDir, NoMod)\")\n\n return result_int, result_str"
},
{
"identifier": "confusion_matrix",
"path": "multi_label_emg/utils.py",
"snippet": "def confusion_matrix(y_true_2d, y_pred_2d, normalize_rows=True):\n \"\"\"\n Number of classes = 4 direction + 2 modifier + 4*2 combinations + (NoDir, NoMod) = 15\n Create a confusion matrix of shape (15, 15), arranged according to the canonical\n coordinates above\n\n NOTE - result may contain nans - use nanmean later\n \"\"\"\n coords, coords_str = canonical_coords()\n\n cm = np.zeros((len(coords), len(coords)), dtype=int)\n for yt, yp in zip(y_true_2d, y_pred_2d):\n cm[coords.index(tuple(yt)), coords.index(tuple(yp))] += 1\n if normalize_rows:\n cm = cm.astype(float)\n with np.errstate(all=\"ignore\"): # Ignore division by zero for empty rows\n cm /= cm.sum(axis=-1, keepdims=True)\n return cm"
},
{
"identifier": "str2bool",
"path": "multi_label_emg/utils.py",
"snippet": "def str2bool(s):\n if s.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif s.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise ValueError(\"Boolean value expected.\")"
}
] | import sys
import numpy as np
import plotly.graph_objects as go
import argparse
from loguru import logger
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KernelDensity, KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.svm import SVC
from multi_label_emg.data import load_data_dict
from multi_label_emg.models import AvgPairs, ElementwiseMaxPairs, ParallelA, ParallelB
from multi_label_emg.utils import (
NO_DIR_IDX,
NO_MOD_IDX,
RESULTS_DIR,
canonical_coords,
confusion_matrix,
str2bool,
) | 7,889 |
# NOTE - we use HoldPulse1_NoFeedback and SimultaneousPulse1_NoFeedback for train set in the "upper bound"
# otherwise, these blocks are not used
# Load test data
if include_doubles_in_train:
# We use blocks 1 and 2 of the "NoFeedBack" portion of experiment
# Double check that we're not using augmentation
assert doubles_method == "none"
assert singles_method == "none"
# Add real combos to train set
train_features = np.concatenate(
[
train_features,
data["HoldPulse1_NoFeedBack_features"],
data["SimultaneousPulse1_NoFeedBack_features"],
data["HoldPulse2_NoFeedBack_features"],
data["SimultaneousPulse2_NoFeedBack_features"],
]
)
train_dir_labels = np.concatenate(
[
train_dir_labels,
data["HoldPulse1_NoFeedBack_dir_labels"],
data["SimultaneousPulse1_NoFeedBack_dir_labels"],
data["HoldPulse2_NoFeedBack_dir_labels"],
data["SimultaneousPulse2_NoFeedBack_dir_labels"],
]
)
train_mod_labels = np.concatenate(
[
train_mod_labels,
data["HoldPulse1_NoFeedBack_mod_labels"],
data["SimultaneousPulse1_NoFeedBack_mod_labels"],
data["HoldPulse2_NoFeedBack_mod_labels"],
data["SimultaneousPulse2_NoFeedBack_mod_labels"],
]
)
logger.info(f"Initial train set: {train_features.shape=}, {train_dir_labels.shape=}, {train_mod_labels.shape=}")
# Don't use "Feedback" blocks for this analysis
test_blocks = ["HoldPulse3_NoFeedBack", "SimultaneousPulse3_NoFeedBack"]
test_features = np.concatenate([data[f"{block}_features"] for block in test_blocks])
test_dir_labels = np.concatenate([data[f"{block}_dir_labels"] for block in test_blocks])
test_mod_labels = np.concatenate([data[f"{block}_mod_labels"] for block in test_blocks])
logger.info(f"test set: {test_features.shape=}, {test_dir_labels.shape=}, {test_mod_labels.shape=}")
# Vary strategy for augmented doubles
double_features_aug, double_dir_labels_aug, double_mod_labels_aug = get_augmented_doubles(
doubles_method,
feature_combine_type,
fraction_doubles_per_class,
train_features,
train_dir_labels,
train_mod_labels,
)
# Make augmented singles
# Figure out how many doubles per class. Take avg and then apply rel_fraction_singles_per_class to
# get the number of singles per class
n_singles_per_class = 0
if singles_method != "none":
doubles_labels_2d = np.stack((double_dir_labels_aug.argmax(-1), double_mod_labels_aug.argmax(-1)), axis=-1)
class_sizes = np.unique(doubles_labels_2d, axis=0, return_counts=True)[-1]
n_singles_per_class = int(np.round(np.mean(class_sizes) * rel_fraction_singles_per_class))
single_features_aug, single_dir_labels_aug, single_mod_labels_aug = get_augmented_singles(
singles_method, n_singles_per_class, train_features, train_dir_labels, train_mod_labels
)
# Merge all train data
train_features = np.concatenate([train_features, double_features_aug, single_features_aug])
train_dir_labels = np.concatenate([train_dir_labels, double_dir_labels_aug, single_dir_labels_aug])
train_mod_labels = np.concatenate([train_mod_labels, double_mod_labels_aug, single_mod_labels_aug])
logger.info(f"Augmented train set: {train_features.shape=}, {train_dir_labels.shape=}, {train_mod_labels.shape=}")
# Create model
if parallel_model_type == "ParallelA":
model = ParallelA(
get_clf(clf_name, num_classes=5),
get_clf(clf_name, num_classes=3),
use_augmentation=False,
include_rest_data_for_clf=True,
)
elif parallel_model_type == "ParallelB":
model = ParallelB(
dir_clf=get_clf(clf_name, num_classes=4),
mod_clf=get_clf(clf_name, num_classes=2),
has_dir_clf=get_clf(clf_name, num_classes=2),
has_mod_clf=get_clf(clf_name, num_classes=2),
use_augmentation=False,
# include_rest_data_for_clf=True, # NOTE - always using true, flag is not in model
)
elif parallel_model_type == "SerialControl":
model = get_clf(clf_name, num_classes=15)
else:
raise ValueError(f"Unknown parallel model type: {parallel_model_type}")
# Train
logger.info("Train...")
if parallel_model_type == "SerialControl":
# Convert labels to integer by making 2-digit numbers,
# where the 10s place is the dir label and the 1s place is the mod label
train_labels = train_dir_labels.argmax(-1) * 10 + train_mod_labels.argmax(-1)
model.fit(train_features, train_labels)
else:
model.fit(train_features, train_dir_labels, train_mod_labels)
# Evaluate
logger.info("Evaluate")
if parallel_model_type == "SerialControl":
combined_preds = model.predict(test_features)
dir_preds = combined_preds // 10
mod_preds = combined_preds % 10
else:
dir_preds, mod_preds = model.predict(test_features)
preds_2d = np.stack([dir_preds, mod_preds], axis=-1)
true_labels_2d = np.stack([test_dir_labels.argmax(-1), test_mod_labels.argmax(-1)], axis=-1)
|
def get_name(
subject: str,
seed: int,
parallel_model_type: str,
clf_name: str,
doubles_method: str,
fraction_doubles_per_class: float,
singles_method: str,
rel_fraction_singles_per_class: float,
include_doubles_in_train: bool,
feature_combine_type: str,
):
return "__".join(
[
f"subj={subject}",
f"seed={seed}",
f"par={parallel_model_type}",
f"clf={clf_name}",
f"doubles={doubles_method}",
f"frac_doubles={fraction_doubles_per_class}",
f"singles={singles_method}",
f"frac_singles={rel_fraction_singles_per_class}",
f"incl_doubles={include_doubles_in_train}",
f"feat_type={feature_combine_type}",
]
)
def plot_confusion_matrix(data: np.ndarray):
def make_text(cm):
text = []
for v in cm.flatten():
text.append(f"{round(v, 2)}")
return np.array(text).reshape(cm.shape)
coords, coords_str = canonical_coords()
text = make_text(data)
fig = go.Figure()
fig.update_layout(
# margin=margin,
xaxis=dict(
title="Predicted",
tickangle=-45,
tickmode="array",
ticktext=coords_str,
tickvals=list(range(len(coords_str))),
constrain="domain",
),
yaxis=dict(
title="Actual",
tickmode="array",
ticktext=coords_str,
tickvals=list(range(len(coords_str))),
autorange="reversed",
scaleanchor="x",
scaleratio=1,
constrain="domain",
),
)
fig.add_trace(
go.Heatmap(z=data, text=text, texttemplate="%{text}", zmin=0, zmax=1, colorscale="Blues", showscale=False)
)
return fig
def subset_doubles_uniform(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, take n_per_class items uniformly at random"""
res_x, res_y_dir, res_y_mod = [], [], []
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
subset_idx = np.random.choice(idx, size=n_per_class, replace=False)
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_doubles_near_mean(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, take n_per_class items closest to the mean of these synthetic items"""
# Find class means
class_means = {}
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_means[(d, m)] = np.mean(features_aug[idx], axis=0)
# Subset each class by taking items closest to mean
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
class_mean = class_means[(d, m)]
idx = np.where((labels_2d == (d, m)).all(-1))[0]
dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1)
k_smallest_idx = np.argpartition(dists, n_per_class)[:n_per_class]
subset_idx = idx[k_smallest_idx]
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_doubles_spaced_quantiles(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, rank items by their distance to the class mean,
and take items with ranks 1, K+1, 2K+1.
The spacing K will be approx (class_size / n_per_class)
"""
# Find class means
class_means = {}
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_means[(d, m)] = np.mean(features_aug[idx], axis=0)
# Subset each class by taking items closest to mean
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
class_mean = class_means[(d, m)]
idx = np.where((labels_2d == (d, m)).all(-1))[0]
dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
spacing = int(np.floor(len(idx) / n_per_class))
# Since we use floor, we step slightly too little.
# In case this gives us extra items, we also truncate.
subset_idx = idx[ranked_distances[::spacing][:n_per_class]]
n_subset = len(subset_idx)
assert abs(n_subset - n_per_class) <= 1
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_dir_mod(
method: str, fraction_doubles_per_class: float, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray
):
# Should have 1-hot vector labels
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
# Figure out how many items we have per class
# Then use fraction_doubles_per_class to figure out how many doubles we want
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
n_per_class = int(np.round(fraction_doubles_per_class * np.mean(class_sizes)))
n_per_class = min(n_per_class, np.min(class_sizes))
logger.info(f"Initial class sizes: {class_sizes}, n_per_class: {n_per_class}")
# For each class, fit a multivariate gaussian and sample the requested number of points
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_mean = np.mean(features[idx], axis=0)
if method == "subsetInput_uniform":
subset_idx = np.random.choice(idx, n_per_class, replace=False)
elif method == "subsetInput_near_mean":
dists = np.linalg.norm(features[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
subset_idx = idx[ranked_distances[:n_per_class]]
elif method == "subsetInput_spaced_quantiles":
dists = np.linalg.norm(features[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
spacing = int(np.floor(len(idx) / n_per_class))
# Since we use floor, we step slightly too little.
# In case this gives us extra items, we also truncate.
subset_idx = idx[ranked_distances[::spacing][:n_per_class]]
n_subset = len(subset_idx)
assert abs(n_subset - n_per_class) <= 1
res_x.append(features[subset_idx])
res_y_dir.append(dir_labels[subset_idx])
res_y_mod.append(mod_labels[subset_idx])
res_x = np.concatenate(res_x)
res_y_dir = np.concatenate(res_y_dir)
res_y_mod = np.concatenate(res_y_mod)
labels_2d = np.stack([res_y_dir.argmax(-1), res_y_mod.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Class sizes after subset: {class_sizes}")
return res_x, res_y_dir, res_y_mod
def get_augmented_doubles(
method: str,
feature_combine_type: str,
fraction_doubles_per_class: float,
features: np.ndarray,
dir_labels: np.ndarray,
mod_labels: np.ndarray,
):
if feature_combine_type == "avg":
aug = AvgPairs(-1)
elif feature_combine_type == "max":
aug = ElementwiseMaxPairs(-1)
else:
raise ValueError(f"Unknown feature_combine_type: {feature_combine_type}")
if method == "none":
logger.info("No synthetic doubles")
# We create nothing and return early
features_aug = np.empty((0, *features.shape[1:]))
dir_labels_aug = np.empty((0, *dir_labels.shape[1:]))
mod_labels_aug = np.empty((0, *mod_labels.shape[1:]))
return features_aug, dir_labels_aug, mod_labels_aug
if method.startswith("subsetInput"):
# NOTE - here, n_per_class means how many items in each INPUT class
# Do the subsetting before making combinations
logger.info("Subset before creating doubles")
features_subset, dir_labels_subset, mod_labels_subset = subset_dir_mod(
method, fraction_doubles_per_class, features, dir_labels, mod_labels
)
features_aug, dir_labels_aug, mod_labels_aug = aug(features_subset, dir_labels_subset, mod_labels_subset)
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Final synthetic double class sizes: {class_sizes}")
return features_aug, dir_labels_aug, mod_labels_aug
# Other methods create all combinations and THEN subset
# First, create all augmented items
logger.info("Subset after creating doubles")
features_aug, dir_labels_aug, mod_labels_aug = aug(features, dir_labels, mod_labels)
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Initial synthetic double class sizes: {class_sizes}")
# check these are all doubles
items_with_dir = dir_labels_aug.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels_aug.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == len(features_aug)
# Figure out how many items we want per class
n_per_class = int(np.round(fraction_doubles_per_class * np.mean(class_sizes)))
n_per_class = min(n_per_class, np.min(class_sizes))
# Then, subset as requested
if method == "all":
pass
elif method == "subset_uniform":
features_aug, dir_labels_aug, mod_labels_aug = subset_doubles_uniform(
n_per_class, features_aug, dir_labels_aug, mod_labels_aug
)
elif method == "subset_near_mean":
features_aug, dir_labels_aug, mod_labels_aug = subset_doubles_near_mean(
n_per_class, features_aug, dir_labels_aug, mod_labels_aug
)
elif method == "subset_spaced_quantiles":
features_aug, dir_labels_aug, mod_labels_aug = subset_doubles_spaced_quantiles(
n_per_class, features_aug, dir_labels_aug, mod_labels_aug
)
else:
raise ValueError(f"Unknown augmentation method: {method}")
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Final synthetic double class sizes: {class_sizes}")
return features_aug, dir_labels_aug, mod_labels_aug
def get_noise_simple(x, relative_std):
"""Add noise to x, where the noise standard deviation is relative_std * x.std()"""
return np.random.randn(*x.shape) * relative_std * x.std(0)
def balanced_sample_singles(features, dir_labels, mod_labels, n_per_class):
# Should have 1-hot vector labels
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
n_needed = n_per_class
selected_idx = []
while True:
if n_needed >= len(idx):
# Take all items in this class 1 more time
selected_idx.append(idx)
n_needed -= len(idx)
else:
# Take the remaining items randomly
selected_idx.append(np.random.choice(idx, n_needed, replace=False))
break
selected_idx = np.concatenate(selected_idx)
res_x.append(features[selected_idx])
res_y_dir.append(dir_labels[selected_idx])
res_y_mod.append(mod_labels[selected_idx])
return np.concatenate(res_x), np.concatenate(res_y_dir), np.concatenate(res_y_mod)
def sample_singles_gmm(features, dir_labels, mod_labels, n_per_class, n_components):
"""Fit a GMM to each class, then sample as requested"""
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
# For each class, fit a multivariate gaussian and sample the requested number of points
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
# NOTE - d and m are now integer values. We need to convert them to 1-hot vectors for the output
d_onehot = np.zeros(dir_labels.shape[1])
d_onehot[d] = 1
m_onehot = np.zeros(mod_labels.shape[1])
m_onehot[m] = 1
idx = np.where((labels_2d == (d, m)).all(-1))[0]
gmm = GaussianMixture(n_components=n_components)
gmm.fit(features[idx])
res_x.append(gmm.sample(n_per_class)[0])
res_y_dir.append(np.tile(d_onehot, (n_per_class, 1)))
res_y_mod.append(np.tile(m_onehot, (n_per_class, 1)))
return np.concatenate(res_x), np.concatenate(res_y_dir), np.concatenate(res_y_mod)
def sample_singles_kde(features, dir_labels, mod_labels, n_per_class, bandwidth):
"""Fit a GMM to each class, then sample as requested"""
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
# For each class, fit a multivariate gaussian and sample the requested number of points
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
# NOTE - d and m are now integer values. We need to convert them to 1-hot vectors for the output
d_onehot = np.zeros(dir_labels.shape[1])
d_onehot[d] = 1
m_onehot = np.zeros(mod_labels.shape[1])
m_onehot[m] = 1
idx = np.where((labels_2d == (d, m)).all(-1))[0]
kde = KernelDensity(bandwidth=bandwidth)
kde.fit(features[idx])
res_x.append(kde.sample(n_per_class))
res_y_dir.append(np.tile(d_onehot, (n_per_class, 1)))
res_y_mod.append(np.tile(m_onehot, (n_per_class, 1)))
return np.concatenate(res_x), np.concatenate(res_y_dir), np.concatenate(res_y_mod)
def get_augmented_singles(
method: str, n_per_class: int, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray
):
if method == "none":
logger.info("No augmented singles")
# Return empties so we can just concatenate and not worry about it
features_aug = np.empty((0, *features.shape[1:]))
dir_labels_aug = np.empty((0, *dir_labels.shape[1:]))
mod_labels_aug = np.empty((0, *mod_labels.shape[1:]))
return features_aug, dir_labels_aug, mod_labels_aug
logger.info(f"Augmenting singles with method {method}")
if method.startswith("add-gaussian"):
# First, choose a subset of items according to n_per_class
features, dir_labels_aug, mod_labels_aug = balanced_sample_singles(
features, dir_labels, mod_labels, n_per_class
)
if method == "add-gaussian-0.05":
factor = 0.05
elif method == "add-gaussian-0.1":
factor = 0.1
elif method == "add-gaussian-0.2":
factor = 0.2
elif method == "add-gaussian-0.3":
factor = 0.3
elif method == "add-gaussian-0.4":
factor = 0.4
elif method == "add-gaussian-0.5":
factor = 0.5
elif method == "add-gaussian-0.6":
factor = 0.6
else:
raise ValueError(f"Unknown gaussian factor: {method}")
features_aug = features + get_noise_simple(features, factor)
elif method.startswith("fit-gmm"):
if method == "fit-gmm-1":
nc = 1
elif method == "fit-gmm-3":
nc = 3
elif method == "fit-gmm-5":
nc = 5
elif method == "fit-gmm-10":
nc = 10
features_aug, dir_labels_aug, mod_labels_aug = sample_singles_gmm(
features, dir_labels, mod_labels, n_per_class, n_components=nc
)
elif method.startswith("fit-kde"):
if method == "fit-kde-gaussian-scott":
bandwidth = "scott"
if method == "fit-kde-gaussian-silverman":
bandwidth = "silverman"
if method == "fit-kde-gaussian-0.01":
bandwidth = 0.01
if method == "fit-kde-gaussian-0.1":
bandwidth = 0.1
if method == "fit-kde-gaussian-1.0":
bandwidth = 1.0
if method == "fit-kde-gaussian-10.0":
bandwidth = 10.0
features_aug, dir_labels_aug, mod_labels_aug = sample_singles_kde(
features, dir_labels, mod_labels, n_per_class, bandwidth=bandwidth
)
else:
raise NotImplementedError()
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Augmented singles class sizes: {class_sizes}")
return features_aug, dir_labels_aug, mod_labels_aug
def get_clf(name: str, num_classes: int):
if name == "mlp":
return make_pipeline(
RobustScaler(), MLPClassifier(hidden_layer_sizes=[100, 100, 100], early_stopping=True, max_iter=200)
)
elif name == "logr":
return make_pipeline(RobustScaler(), LogisticRegression(class_weight="balanced", max_iter=2000, n_jobs=-1))
elif name == "svc":
return make_pipeline(RobustScaler(), SVC(class_weight="balanced", probability=True))
elif name == "rf":
return make_pipeline(RobustScaler(), RandomForestClassifier(class_weight="balanced", n_jobs=-1))
elif name == "knn":
return make_pipeline(RobustScaler(), KNeighborsClassifier())
elif name == "lda":
return make_pipeline(RobustScaler(), LinearDiscriminantAnalysis())
elif name == "gbc":
return make_pipeline(RobustScaler(), GradientBoostingClassifier())
else:
raise ValueError(f"Unknown model name: {name}")
def balance_classes(train_features, train_dir_labels, train_mod_labels):
# Subsample the "Rest" class since it will be overrepresented
assert train_dir_labels.ndim == 2
assert train_mod_labels.ndim == 2
labels_2d = np.stack([train_dir_labels.argmax(-1), train_mod_labels.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Before pruning 'Rest' items, class sizes: {class_sizes}")
rest_idx = np.where((labels_2d == [NO_DIR_IDX, NO_MOD_IDX]).all(-1))[0]
active_idx = np.where((labels_2d != [NO_DIR_IDX, NO_MOD_IDX]).any(-1))[0]
active_counts = np.unique(labels_2d[active_idx], axis=0, return_counts=True)[-1]
avg_n_active = int(np.mean(active_counts))
subset_rest_idx = np.random.choice(rest_idx, avg_n_active, replace=False)
res_x = np.concatenate((train_features[active_idx], train_features[subset_rest_idx]))
res_y_dir = np.concatenate((train_dir_labels[active_idx], train_dir_labels[subset_rest_idx]))
res_y_mod = np.concatenate((train_mod_labels[active_idx], train_mod_labels[subset_rest_idx]))
res_labels_2d = np.stack([res_y_dir.argmax(-1), res_y_mod.argmax(-1)], axis=-1)
res_class_sizes = np.unique(res_labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"After pruning 'Rest' items, class sizes: {res_class_sizes}")
return res_x, res_y_dir, res_y_mod
def remove_double_gestures(train_features, train_dir_labels, train_mod_labels):
labels_2d = np.stack([train_dir_labels.argmax(-1), train_mod_labels.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Before removing double gestures, class sizes: {class_sizes}")
items_with_dir = train_dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = train_mod_labels.argmax(-1) != NO_MOD_IDX
# Remove items with both direction and modifier
singles_idx = ~np.logical_and(items_with_dir, items_with_mod)
res_features = train_features[singles_idx]
res_dir_labels = train_dir_labels[singles_idx]
res_mod_labels = train_mod_labels[singles_idx]
res_labels_2d = np.stack([res_dir_labels.argmax(-1), res_mod_labels.argmax(-1)], axis=-1)
res_class_sizes = np.unique(res_labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"After removing double gestures, class sizes: {res_class_sizes}")
return res_features, res_dir_labels, res_mod_labels
@logger.catch(onerror=lambda _: sys.exit(1))
def run_training(
subject: str,
parallel_model_type: str,
clf_name: str,
doubles_method: str,
fraction_doubles_per_class: float,
singles_method: str,
rel_fraction_singles_per_class: float,
include_doubles_in_train: bool,
feature_combine_type: str,
):
# We don't want to modify code in the gest module itself.
# Thus, we'll do augmentation manually here, and tell the model not to do
# any further augmentation.
# Load train data
data_dict = load_data_dict()
try:
data = data_dict[subject]
except KeyError:
raise ValueError(f"Unknown subject: {subject}")
train_features = data["Calibration_features"]
train_dir_labels = data["Calibration_dir_labels"]
train_mod_labels = data["Calibration_mod_labels"]
# First, reduce amount of "Rest" items in train set
train_features, train_dir_labels, train_mod_labels = balance_classes(
train_features, train_dir_labels, train_mod_labels
)
# Remove any double gestures that occured due to bad participant behavior
train_features, train_dir_labels, train_mod_labels = remove_double_gestures(
train_features, train_dir_labels, train_mod_labels
)
# NOTE - we use HoldPulse1_NoFeedback and SimultaneousPulse1_NoFeedback for train set in the "upper bound"
# otherwise, these blocks are not used
# Load test data
if include_doubles_in_train:
# We use blocks 1 and 2 of the "NoFeedBack" portion of experiment
# Double check that we're not using augmentation
assert doubles_method == "none"
assert singles_method == "none"
# Add real combos to train set
train_features = np.concatenate(
[
train_features,
data["HoldPulse1_NoFeedBack_features"],
data["SimultaneousPulse1_NoFeedBack_features"],
data["HoldPulse2_NoFeedBack_features"],
data["SimultaneousPulse2_NoFeedBack_features"],
]
)
train_dir_labels = np.concatenate(
[
train_dir_labels,
data["HoldPulse1_NoFeedBack_dir_labels"],
data["SimultaneousPulse1_NoFeedBack_dir_labels"],
data["HoldPulse2_NoFeedBack_dir_labels"],
data["SimultaneousPulse2_NoFeedBack_dir_labels"],
]
)
train_mod_labels = np.concatenate(
[
train_mod_labels,
data["HoldPulse1_NoFeedBack_mod_labels"],
data["SimultaneousPulse1_NoFeedBack_mod_labels"],
data["HoldPulse2_NoFeedBack_mod_labels"],
data["SimultaneousPulse2_NoFeedBack_mod_labels"],
]
)
logger.info(f"Initial train set: {train_features.shape=}, {train_dir_labels.shape=}, {train_mod_labels.shape=}")
# Don't use "Feedback" blocks for this analysis
test_blocks = ["HoldPulse3_NoFeedBack", "SimultaneousPulse3_NoFeedBack"]
test_features = np.concatenate([data[f"{block}_features"] for block in test_blocks])
test_dir_labels = np.concatenate([data[f"{block}_dir_labels"] for block in test_blocks])
test_mod_labels = np.concatenate([data[f"{block}_mod_labels"] for block in test_blocks])
logger.info(f"test set: {test_features.shape=}, {test_dir_labels.shape=}, {test_mod_labels.shape=}")
# Vary strategy for augmented doubles
double_features_aug, double_dir_labels_aug, double_mod_labels_aug = get_augmented_doubles(
doubles_method,
feature_combine_type,
fraction_doubles_per_class,
train_features,
train_dir_labels,
train_mod_labels,
)
# Make augmented singles
# Figure out how many doubles per class. Take avg and then apply rel_fraction_singles_per_class to
# get the number of singles per class
n_singles_per_class = 0
if singles_method != "none":
doubles_labels_2d = np.stack((double_dir_labels_aug.argmax(-1), double_mod_labels_aug.argmax(-1)), axis=-1)
class_sizes = np.unique(doubles_labels_2d, axis=0, return_counts=True)[-1]
n_singles_per_class = int(np.round(np.mean(class_sizes) * rel_fraction_singles_per_class))
single_features_aug, single_dir_labels_aug, single_mod_labels_aug = get_augmented_singles(
singles_method, n_singles_per_class, train_features, train_dir_labels, train_mod_labels
)
# Merge all train data
train_features = np.concatenate([train_features, double_features_aug, single_features_aug])
train_dir_labels = np.concatenate([train_dir_labels, double_dir_labels_aug, single_dir_labels_aug])
train_mod_labels = np.concatenate([train_mod_labels, double_mod_labels_aug, single_mod_labels_aug])
logger.info(f"Augmented train set: {train_features.shape=}, {train_dir_labels.shape=}, {train_mod_labels.shape=}")
# Create model
if parallel_model_type == "ParallelA":
model = ParallelA(
get_clf(clf_name, num_classes=5),
get_clf(clf_name, num_classes=3),
use_augmentation=False,
include_rest_data_for_clf=True,
)
elif parallel_model_type == "ParallelB":
model = ParallelB(
dir_clf=get_clf(clf_name, num_classes=4),
mod_clf=get_clf(clf_name, num_classes=2),
has_dir_clf=get_clf(clf_name, num_classes=2),
has_mod_clf=get_clf(clf_name, num_classes=2),
use_augmentation=False,
# include_rest_data_for_clf=True, # NOTE - always using true, flag is not in model
)
elif parallel_model_type == "SerialControl":
model = get_clf(clf_name, num_classes=15)
else:
raise ValueError(f"Unknown parallel model type: {parallel_model_type}")
# Train
logger.info("Train...")
if parallel_model_type == "SerialControl":
# Convert labels to integer by making 2-digit numbers,
# where the 10s place is the dir label and the 1s place is the mod label
train_labels = train_dir_labels.argmax(-1) * 10 + train_mod_labels.argmax(-1)
model.fit(train_features, train_labels)
else:
model.fit(train_features, train_dir_labels, train_mod_labels)
# Evaluate
logger.info("Evaluate")
if parallel_model_type == "SerialControl":
combined_preds = model.predict(test_features)
dir_preds = combined_preds // 10
mod_preds = combined_preds % 10
else:
dir_preds, mod_preds = model.predict(test_features)
preds_2d = np.stack([dir_preds, mod_preds], axis=-1)
true_labels_2d = np.stack([test_dir_labels.argmax(-1), test_mod_labels.argmax(-1)], axis=-1) | return confusion_matrix(true_labels_2d, preds_2d) | 9 | 2023-12-12 16:50:34+00:00 | 12k |
ebb-earl-co/tidal-wave | tidal_wave/playlist.py | [
{
"identifier": "AudioFormat",
"path": "tidal_wave/media.py",
"snippet": "class AudioFormat(str, Enum):\n sony_360_reality_audio = \"360\"\n dolby_atmos = \"Atmos\"\n hi_res = \"HiRes\"\n mqa = \"MQA\"\n lossless = \"Lossless\"\n high = \"High\"\n low = \"Low\""
},
{
"identifier": "PlaylistsEndpointResponseJSON",
"path": "tidal_wave/models.py",
"snippet": "class PlaylistsEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, videos/<VIDEOID> endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the (music) video, including video quality, video title, date,\n video artists, duration, etc.\"\"\"\n\n uuid: str = field(repr=False)\n title: str\n number_of_tracks: int\n number_of_videos: int\n description: str\n created: Annotated[datetime, dataclass_wizard.Pattern(\"%Y-%m-%dT%H:%M:%S.%f%z\")]\n type: str\n public_playlist: bool\n url: str\n square_image: str # UUID v4"
},
{
"identifier": "TracksEndpointResponseJSON",
"path": "tidal_wave/models.py",
"snippet": "class TracksEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, tracks/{TRACKID} endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the audio track, including audio quality, track title, ISRC,\n track artists, album, track number, duration, etc.\"\"\"\n\n id: int = field(repr=False)\n title: str\n duration: int # seconds\n replay_gain: float = field(repr=False)\n peak: float = field(repr=False)\n track_number: int\n volume_number: int\n version: Optional[str]\n copyright: str = field(repr=False)\n url: str\n isrc: str = field(repr=False)\n explicit: bool\n audio_quality: str = field(repr=False)\n audio_modes: List[str] = field(repr=False)\n media_metadata: \"MediaMetadata\"\n artist: \"Artist\"\n artists: List[\"Artist\"]\n album: \"TrackAlbum\"\n\n def __post_init__(self):\n name: str = (\n self.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n )\n self.name: str = name if self.version is None else f\"{name} ({self.version})\""
},
{
"identifier": "VideosEndpointResponseJSON",
"path": "tidal_wave/models.py",
"snippet": "class VideosEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, videos/<VIDEOID> endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the (music) video, including video quality, video title, date,\n video artists, duration, etc.\"\"\"\n\n id: int = field(repr=False)\n title: str\n volume_number: int\n track_number: int\n release_date: Annotated[\n datetime, dataclass_wizard.Pattern(\"%Y-%m-%dT%H:%M:%S.%f%z\")\n ]\n duration: int # seconds\n quality: str\n explicit: bool\n type: str\n artist: \"Artist\"\n artists: List[\"Artist\"]\n\n def __post_init__(self):\n self.name: str = (\n self.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n )"
},
{
"identifier": "request_playlists",
"path": "tidal_wave/requesting.py",
"snippet": "def request_playlists(\n session: Session, identifier: int\n) -> Optional[PlaylistsEndpointResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"playlists\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n subclass=PlaylistsEndpointResponseJSON,\n )"
},
{
"identifier": "Track",
"path": "tidal_wave/track.py",
"snippet": "class Track:\n track_id: int\n\n def __post_init__(self):\n self._has_lyrics: Optional[bool] = None\n self.tags: dict = {}\n self.album_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n self.metadata: Optional[TracksEndpointResponseJSON] = request_tracks(\n session, self.track_id\n )\n\n def get_album(self, session: Session):\n self.album: Optional[AlbumsEndpointResponseJSON] = request_albums(\n session, self.metadata.album.id\n )\n\n def get_credits(self, session: Session):\n self.credits: Optional[TracksCreditsResponseJSON] = request_credits(\n session, self.track_id\n )\n\n def get_lyrics(self, session: Session):\n if self._has_lyrics is None:\n self.lyrics: Optional[TracksLyricsResponseJSON] = request_lyrics(\n session, self.track_id\n )\n if self.lyrics is None:\n self._has_lyrics = False\n else:\n self._has_lyrics = True\n else:\n return self.lyrics\n\n def get_stream(self, session: Session, audio_format: AudioFormat):\n \"\"\"Populates self.stream, self.manifest\"\"\"\n aq: Optional[str] = af_aq.get(audio_format)\n self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream(\n session, self.track_id, aq\n )\n\n def set_manifest(self):\n \"\"\"This method sets self.manifest and self.codec\"\"\"\n self.manifest: Manifest = manifester(self.stream)\n # https://dashif.org/codecs/audio/\n if self.manifest.codecs == \"flac\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mqa\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mha1\": # Sony 360 Reality Audio\n self.codec = \"mka\"\n elif self.manifest.codecs == \"mp4a.40.5\": # HE-AAC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.29\": # HE-AAC v2\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.2\": # AAC-LC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"eac3\": # Enhanced AC-3\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.34\": # MP3\n self.codec = \"mp3\"\n\n def set_album_dir(self, out_dir: Path):\n \"\"\"This method sets self.album_dir, based on self.album and\n out_dir. In particular, self.album_dir is a subdirectory of out_dir\n based on the name of the album's artist\"\"\"\n artist_substring: str = self.album.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.album.name} \" f\"[{self.album.id}] [{self.album.release_date.year}]\"\n )\n self.album_dir: Path = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.album.number_of_volumes > 1:\n volume_substring: str = f\"Volume {self.metadata.volume_number}\"\n (self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, audio_format: AudioFormat):\n \"\"\"This method sets self.filename. It's based on self.metadata\n as well as audio_format. Additionally, if the available codecs in\n self.manifest don't match audio_format, warnings are logged\"\"\"\n _track_part: str = f\"{self.metadata.track_number:02d} - {self.metadata.name}\"\n if audio_format == AudioFormat.low:\n track_substring: str = f\"{_track_part} [L]\"\n elif audio_format == AudioFormat.high:\n track_substring: str = f\"{_track_part} [H]\"\n elif audio_format == AudioFormat.lossless:\n track_substring: str = f\"{_track_part} [CD]\"\n elif audio_format == AudioFormat.mqa:\n track_substring: str = f\"{_track_part} [Q]\"\n elif audio_format == AudioFormat.hi_res:\n track_substring: str = f\"{_track_part} [HiRes]\"\n elif audio_format == AudioFormat.dolby_atmos:\n track_substring: str = f\"{_track_part} [A]\"\n elif audio_format == AudioFormat.sony_360_reality_audio:\n track_substring: str = f\"{_track_part} [360]\"\n else:\n track_substring: str = _track_part\n\n # Check for MQA masquerading as HiRes here\n if audio_format == AudioFormat.hi_res:\n if self.manifest.codecs == \"mqa\":\n logger.warning(\n \"Even though HiRes audio format was requested, this track is only \"\n \"available in MQA format. TIDAL regards this as 'HiRes' even though \"\n \"it is probably only lossless; i.e. 16-bit 44.1 kHz quality. \"\n \"Downloading of track will continue, but it will be marked as MQA.\"\n )\n self.filename: Optional[str] = f\"{_track_part} [Q].{self.codec}\"\n elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100):\n logger.warning(\n \"Even though HiRes audio format was requested, and TIDAL responded to \"\n \"that request without error, this track is only available in lossless \"\n \"format; i.e. 16-bit 44.1 kHz quality. Downloading of track will \"\n \"continue, but it will be marked as Lossless ([CD]).\"\n )\n self.filename: Optional[str] = f\"{_track_part} [CD].{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n\n # for use in playlist file ordering\n self.trackname: str = re.match(r\"(?:\\d{2,3} - )(.+?$)\", self.filename).groups()[\n 0\n ]\n\n def set_outfile(self):\n \"\"\"Uses self.album_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n if self.album.number_of_volumes > 1:\n self.outfile: Path = (\n self.album_dir / f\"Volume {self.metadata.volume_number}\" / self.filename\n )\n self.absolute_outfile = str(self.outfile.absolute())\n else:\n self.outfile: Path = self.album_dir / self.filename\n self.absolute_outfile = str(self.outfile.absolute())\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Track {self.absolute_outfile} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes a JPEG file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_image: Path = (\n self.album_dir / f\"{a.name.replace('..', '')}.jpg\"\n )\n if not track_artist_image.exists():\n download_artist_image(session, a, self.album_dir)\n\n def save_artist_bio(self, session: Session):\n \"\"\"This method writes a JSON file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_bio_json: Path = self.album_dir / f\"{a.name}-bio.json\"\n if not track_artist_bio_json.exists():\n artist_bio: Optional[ArtistsBioResponseJSON] = request_artist_bio(\n session, a.id\n )\n if artist_bio is not None:\n logger.info(\n f\"Writing artist bio for artist {a.id} to \"\n f\"'{str(track_artist_bio_json.absolute())}\"\n )\n track_artist_bio_json.write_text(artist_bio.to_json())\n\n def save_album_cover(self, session: Session):\n \"\"\"This method saves cover.jpg to self.album_dir; the bytes for cover.jpg\n come from self.album.cover\"\"\"\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if (not self.cover_path.exists()) or (not self.album_cover_saved):\n download_cover_image(\n session=session, cover_uuid=self.album.cover, output_dir=self.album_dir\n )\n else:\n self.album_cover_saved = True\n\n def set_urls(self, session: Session):\n \"\"\"This method sets self.urls based on self.manifest\"\"\"\n if isinstance(self.manifest, JSONDASHManifest):\n self.urls: List[str] = self.manifest.urls\n elif isinstance(self.manifest, XMLDASHManifest):\n self.urls: List[str] = self.manifest.build_urls(session=session)\n self.download_headers: Dict[str, str] = {\"Accept\": self.manifest.mime_type}\n if session.session_id is not None:\n self.download_headers[\"sessionId\"] = session.session_id\n self.download_params = {k: None for k in session.params}\n\n def download_url(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method downloads self.urls[0], for use in situations when\n the manifest returned by TIDAL API contains one URL. It relies on\n byte range headers to incrementally get all content from a URL\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n # Implement HTTP range requests here to mimic official clients\n range_size: int = 1024 * 1024 # 1 MiB\n content_length: int = fetch_content_length(\n session=session, url=self.urls[0]\n )\n if content_length == 0:\n return\n\n range_headers: Iterable[str] = http_request_range_headers(\n content_length=content_length,\n range_size=range_size,\n return_tuple=False,\n )\n for rh in range_headers:\n with session.get(\n self.urls[0], params=self.download_params, headers={\"Range\": rh}\n ) as rr:\n if not rr.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(rr.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFMPEG to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile,\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(\n f\"Track {self.track_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def download_urls(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method writes the contents from self.urls to a temporary\n directory, then uses FFmpeg to re-mux the data to self.outfile\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=self.download_headers, params=self.download_params\n ) as resp:\n if not resp.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(resp.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFmpeg to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile, acodec=\"copy\", loglevel=\"quiet\"\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(f\"Track {self.track_id} written to '{self.absolute_outfile}'\")\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method GETs the data from self.urls and writes it\n to self.outfile.\"\"\"\n if len(self.urls) == 1:\n outfile: Optional[Path] = self.download_url(\n session=session, out_dir=out_dir\n )\n else:\n outfile: Optional[Path] = self.download_urls(\n session=session, out_dir=out_dir\n )\n\n return outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary,\n write the correct values of various metadata tags to the file.\n E.g. for .flac files, the album's artist is 'ALBUMARTIST',\n but for .m4a files, the album's artist is 'aART'.\"\"\"\n tags = dict()\n if (self.codec == \"flac\") or (self.codec == \"mka\"):\n tag_map = {k: v[\"flac\"] for k, v in TAG_MAPPING.items()}\n elif self.codec == \"m4a\":\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"album\"]] = self.album.title\n tags[tag_map[\"album_artist\"]] = \";\".join((a.name for a in self.album.artists))\n tags[tag_map[\"album_peak_amplitude\"]] = f\"{self.stream.album_peak_amplitude}\"\n tags[tag_map[\"album_replay_gain\"]] = f\"{self.stream.album_replay_gain}\"\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"barcode\"]] = self.album.upc\n tags[tag_map[\"comment\"]] = self.metadata.url\n tags[tag_map[\"copyright\"]] = self.metadata.copyright\n tags[tag_map[\"date\"]] = str(self.album.release_date)\n tags[tag_map[\"isrc\"]] = self.metadata.isrc\n tags[tag_map[\"title\"]] = self.metadata.name\n tags[tag_map[\"track_peak_amplitude\"]] = f\"{self.metadata.peak}\"\n tags[tag_map[\"track_replay_gain\"]] = f\"{self.metadata.replay_gain}\"\n # credits\n for tag in {\"composer\", \"engineer\", \"lyricist\", \"mixer\", \"producer\", \"remixer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.credits, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n # lyrics\n try:\n _lyrics = self.lyrics.subtitles\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[tag_map[\"lyrics\"]] = _lyrics\n\n if self.codec == \"flac\":\n # track and disk\n tags[\"DISCTOTAL\"] = f\"{self.album.number_of_volumes}\"\n tags[\"DISC\"] = f\"{self.metadata.volume_number}\"\n tags[\"TRACKTOTAL\"] = f\"{self.album.number_of_tracks}\"\n tags[\"TRACKNUMBER\"] = f\"{self.metadata.track_number}\"\n # instrument-specific\n # piano\n try:\n piano_credits: List[str] = [\n f\"{pc} (piano)\" for pc in self.credits.piano\n ]\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[\"PERFORMER\"] = piano_credits\n\n elif self.codec == \"m4a\":\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n tags[\"trkn\"] = [(self.metadata.track_number, self.album.number_of_tracks)]\n tags[\"disk\"] = [(self.metadata.volume_number, self.album.number_of_volumes)]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n # add album cover\n if self.codec == \"flac\":\n p = mutagen.flac.Picture()\n p.type = mutagen.id3.PictureType.COVER_FRONT\n p.desc = \"Album Cover\"\n p.width = p.height = 1280\n p.mime = \"image/jpeg\"\n p.data = self.cover_path.read_bytes()\n self.mutagen.add_picture(p)\n elif self.codec == \"m4a\":\n self.mutagen[\"covr\"] = [\n MP4Cover(self.cover_path.read_bytes(), imageformat=MP4Cover.FORMAT_JPEG)\n ]\n\n self.mutagen.save()\n # Make sure audio track comes first because of\n # less-sophisticated audio players that only\n # recognize the first stream\n if self.codec == \"flac\":\n with temporary_file(suffix=\".mka\") as tf:\n shutil.move(str(self.outfile.absolute()), tf.name)\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{tf.name}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy\n -metadata:s:v title='Album cover' -metadata:s:v comment='Cover (front)'\n -disposition:v attached_pic \"{self.absolute_outfile}\" \"\"\"\n )\n subprocess.run(cmd)\n elif self.codec == \"m4a\":\n with temporary_file(suffix=\".mka\") as tf:\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{self.absolute_outfile}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy \"{tf.name}\" \"\"\"\n )\n subprocess.run(cmd)\n shutil.copyfile(tf.name, self.absolute_outfile)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[TracksEndpointResponseJSON] = None,\n album: Optional[AlbumsEndpointResponseJSON] = None,\n ) -> Optional[str]:\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n self.outfile = None\n return\n\n if \"DOLBY_ATMOS\" in self.metadata.media_metadata.tags:\n if audio_format != AudioFormat.dolby_atmos:\n logger.warning(\n f\"Track {self.track_id} is only available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if audio_format == AudioFormat.dolby_atmos:\n if \"DOLBY_ATMOS\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Dolby Atmos audio format was requested, but track \"\n f\"{self.track_id} is not available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.sony_360_reality_audio:\n if \"SONY_360RA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Sony 360 Reality Audio audio format was requested, but track \"\n f\"{self.track_id} is not available in Sony 360 Reality Audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.mqa:\n if \"MQA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"MQA audio format was requested, but track \"\n f\"{self.track_id} is not available in MQA audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if album is None:\n self.get_album(session)\n else:\n self.album = album\n\n if self.album is None:\n self.outfile = None\n return\n\n self.get_credits(session)\n self.get_stream(session, audio_format)\n if self.stream is None:\n return\n self.set_manifest()\n self.set_album_dir(out_dir)\n self.set_filename(audio_format)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return\n\n try:\n self.get_lyrics(session)\n except Exception:\n pass\n\n self.save_album_cover(session)\n\n try:\n self.save_artist_image(session)\n except Exception:\n pass\n\n try:\n self.save_artist_bio(session)\n except Exception:\n pass\n\n self.set_urls(session)\n\n if self.download(session, out_dir) is None:\n return\n\n self.craft_tags()\n self.set_tags()\n\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dump({k: v}, fp)\n return None\n\n def dumps(self) -> str:\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dumps({k: v})\n return None"
},
{
"identifier": "download_cover_image",
"path": "tidal_wave/utils.py",
"snippet": "def download_cover_image(\n session: Session,\n cover_uuid: str,\n output_dir: Path,\n file_name: str = \"cover.jpg\",\n dimension: Union[int, Tuple[int]] = 1280,\n) -> Optional[Path]:\n \"\"\"Given a UUID that corresponds to a (JPEG) image on Tidal's servers,\n download the image file and write it as 'cover.jpeg' or 'cover.png'\n in the directory `path_to_output_dir`. Returns path to downloaded file\"\"\"\n cover_url_part: str = cover_uuid.replace(\"-\", \"/\")\n if isinstance(dimension, int):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension}x{dimension}\"\n elif isinstance(dimension, tuple):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension[0]}x{dimension[1]}\"\n\n with session.get(url=_url, headers={\"Accept\": \"image/jpeg\"}) as r:\n if not r.ok:\n logger.warning(\n \"Could not retrieve data from Tidal resources/images URL \"\n f\"due to error code: {r.status_code}\"\n )\n logger.debug(r.reason)\n return\n else:\n bytes_to_write = BytesIO(r.content)\n\n if bytes_to_write is not None:\n output_file: Path = output_dir / file_name\n bytes_to_write.seek(0)\n output_file.write_bytes(bytes_to_write.read())\n bytes_to_write.close()\n return output_file"
},
{
"identifier": "temporary_file",
"path": "tidal_wave/utils.py",
"snippet": "@contextmanager\ndef temporary_file(suffix: str = \".mka\"):\n \"\"\"This context-managed function is a stand-in for\n tempfile.NamedTemporaryFile as that stdlib object experiences\n errors on Windows.\"\"\"\n file_name: str = os.path.join(\n tempfile.gettempdir(), f\"{os.urandom(24).hex()}{suffix}\"\n )\n if not os.path.exists(file_name):\n open(file=file_name, mode=\"x\").close()\n\n tf = open(file=file_name, mode=\"wb\")\n try:\n yield tf\n finally:\n tf.close()\n os.unlink(tf.name)"
},
{
"identifier": "TIDAL_API_URL",
"path": "tidal_wave/utils.py",
"snippet": "TIDAL_API_URL: str = \"https://api.tidal.com/v1\""
},
{
"identifier": "Video",
"path": "tidal_wave/video.py",
"snippet": "class Video:\n video_id: int\n\n def __post_init__(self):\n self.tags: dict = {}\n self.codec: str = \"mp4\"\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /videos endpoint\"\"\"\n self.metadata: Optional[VideosEndpointResponseJSON] = request_videos(\n session, self.video_id\n )\n\n def get_contributors(self, session: Session):\n \"\"\"Request from TIDAL API /videos/contributors endpoint\"\"\"\n self.contributors: Optional[\n VideosContributorsResponseJSON\n ] = request_video_contributors(session, self.video_id)\n\n def get_stream(self, session: Session, video_format=VideoFormat.high):\n \"\"\"Populates self.stream by requesting from TIDAL API\n /videos/playbackinfopostpaywall endpoint\"\"\"\n self.stream: Optional[VideosEndpointStreamResponseJSON] = request_video_stream(\n session, self.video_id, video_format.value\n )\n\n def get_m3u8(self, session: Session):\n \"\"\"This method sets self.m3u8, an m3u8.M3U8 object\n following the HTTP Live Streaming specification; parsed from\n self.stream. I.e., self.get_stream() needs to have been executed\n before calling this method. N.b. self.m3u8 almost certainly will\n be a multivariant playlist, meaning further processing of its\n contents will be necessary.\"\"\"\n self.m3u8: m3u8.Playlist = playlister(session=session, vesrj=self.stream)\n\n def set_urls(self):\n \"\"\"This method uses self.m3u8, an m3u8.M3U8 object that is variant:\n (https://developer.apple.com/documentation/http-live-streaming/creating-a-multivariant-playlist)\n It retrieves the highest-quality .m3u8 in its .playlists attribute,\n and sets self.urls as the list of strings from that m3u8.Playlist\"\"\"\n # for now, just get the highest-bandwidth playlist\n playlist: m3u8.Playlist = variant_streams(self.m3u8)\n self.M3U8 = m3u8.load(playlist.uri)\n if self.M3U8 is None or len(self.M3U8.files) == 0:\n raise TidalM3U8Exception(\n f\"HLS media segments are not available for video {self.video_id}\"\n )\n self.urls: List[str] = self.M3U8.files\n\n def set_artist_dir(self, out_dir: Path):\n \"\"\"Set self.artist_dir, which is the subdirectory of `out_dir`\n with name `self.metadata.artist.name`\"\"\"\n self.artist_dir: Path = out_dir / self.metadata.artist.name\n self.artist_dir.mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, out_dir: Path):\n \"\"\"Set self.filename, which is constructed from self.metadata.name\n and self.stream.video_quality\"\"\"\n self.filename: str = (\n f\"{self.metadata.name} [{self.stream.video_quality}].{self.codec}\"\n )\n\n def set_outfile(self):\n \"\"\"Uses self.artist_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n self.outfile: Path = self.artist_dir / self.filename\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Video {str(self.outfile.absolute())} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"Requests the HLS video files that constitute self.video_id.\n Writes HLS bytes to a temporary file, then uses FFmpeg to write the\n video data to self.outfile\"\"\"\n if session.session_id is not None:\n download_headers: Dict[str, str] = {\"sessionId\": session.session_id}\n else:\n download_headers: dict = dict()\n download_params: Dict[str, None] = {k: None for k in session.params}\n # self.outfile should already have been set by self.set_outfile()\n logger.info(\n f\"Writing video {self.video_id} to '{str(self.outfile.absolute())}'\"\n )\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=download_headers, params=download_params\n ) as download_response:\n if not download_response.ok:\n logger.warning(f\"Could not download {self}\")\n else:\n ntf.write(download_response.content)\n else:\n ntf.seek(0)\n\n # will always be .mp4 because HLS\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n str(self.outfile.absolute()),\n vcodec=\"copy\",\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n\n logger.info(\n f\"Video {self.video_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary, write the correct values of\n various metadata tags to the file. Videos are .mp4\"\"\"\n tags = dict()\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"comment\"]] = f\"https://tidal.com/browse/video/{self.video_id}\"\n tags[tag_map[\"date\"]] = str(self.metadata.release_date.date())\n tags[tag_map[\"title\"]] = self.metadata.title\n\n for tag in {\"composer\", \"director\", \"lyricist\", \"producer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.contributors, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n self.mutagen.save()\n\n def get(\n self,\n session: Session,\n out_dir: Path,\n metadata: Optional[\"VideosEndpointResponseJSON\"] = None,\n ) -> Optional[str]:\n \"\"\"The main method of this class. Executes a number of other methods\n in a row:\n - self.get_metadata()\n - self.get_contributors()\n - self.get_stream()\n - self.get_m3u8()\n - self.set_urls()\n - self.set_artist_dir()\n - self.set_filename()\n - self.set_outfile()\n - self.download()\n - self.craft_tags()\n - self.set_tags()\n \"\"\"\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n return None\n\n self.get_contributors(session)\n self.get_stream(session)\n if self.stream is None:\n return None\n self.get_m3u8(session)\n self.set_urls()\n self.set_artist_dir(out_dir)\n self.set_filename(out_dir)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return None\n\n if self.download(session, out_dir) is None:\n return None\n\n self.craft_tags()\n self.set_tags()\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n json.dump({self.metadata.title: str(self.outfile.absolute())}, fp)\n\n def dumps(self) -> str:\n return json.dumps({self.metadata.title: str(self.outfile.absolute())})"
}
] | from dataclasses import dataclass
from pathlib import Path
from types import SimpleNamespace
from typing import Dict, List, Optional, Set, Tuple, Union
from requests import HTTPError, Session
from .media import AudioFormat
from .models import (
PlaylistsEndpointResponseJSON,
TracksEndpointResponseJSON,
VideosEndpointResponseJSON,
)
from .requesting import request_playlists
from .track import Track
from .utils import download_cover_image, temporary_file, TIDAL_API_URL
from .video import Video
import json
import logging
import math
import shutil
import sys
import ffmpeg
import mutagen | 9,893 |
logger = logging.getLogger("__name__")
@dataclass
class Playlist:
playlist_id: str # UUID4
def __post_init__(self):
self.playlist_dir: Optional[Path] = None
self.playlist_cover_saved: bool = False
def get_metadata(self, session: Session):
"""Request from TIDAL API /playlists endpoint"""
self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_playlists(
session=session, identifier=self.playlist_id
)
if self.metadata is None:
return
self.name = (
self.metadata.title.replace("/", "_")
.replace("|", "_")
.replace(":", " -")
.replace('"', "")
.replace("..", "")
)
def set_items(self, session: Session):
"""Uses data from TIDAL API /playlists/items endpoint to
populate self.items"""
playlist_items: Optional[PlaylistsItemsResponseJSON] = get_playlist(
session=session, playlist_id=self.playlist_id
)
if playlist_items is None:
self.items = tuple()
else:
self.items: Tuple[Optional[PlaylistItem]] = tuple(playlist_items.items)
def set_dir(self, out_dir: Path):
"""Populates self.playlist_dir based on self.name, self.playlist_id"""
playlist_substring: str = f"{self.name} [{self.playlist_id}]"
self.playlist_dir: Path = out_dir / "Playlists" / playlist_substring
self.playlist_dir.mkdir(parents=True, exist_ok=True)
def save_cover_image(self, session: Session, out_dir: Path):
"""Requests self.metadata.image and attempts to write it to disk"""
if self.playlist_dir is None:
self.set_dir(out_dir=out_dir)
self.cover_path: Path = self.playlist_dir / "cover.jpg"
if not self.cover_path.exists():
download_cover_image(
session=session,
cover_uuid=self.metadata.square_image,
output_dir=self.playlist_dir,
dimension=1080,
)
else:
self.playlist_cover_saved = True
def save_description(self):
"""Requests self.metadata.description and attempts to write it to disk"""
description_path: Path = self.playlist_dir / "PlaylistDescription.txt"
if self.metadata.description is not None and len(self.metadata.description) > 0:
if not description_path.exists():
description_path.write_text(f"{self.metadata.description}\n")
def get_items(self, session: Session, audio_format: AudioFormat):
"""Using either Track.get() or Video.get(), attempt to request
the data for each track or video in self.items"""
if len(self.items) == 0:
return
tracks_videos: list = [None] * len(self.items)
for i, item in enumerate(self.items):
if item is None:
tracks_videos[i] = None
continue
elif isinstance(item, TracksEndpointResponseJSON):
track: Track = Track(track_id=item.id)
track.get(
session=session,
audio_format=audio_format,
out_dir=self.playlist_dir,
metadata=item,
)
tracks_videos[i] = track
|
logger = logging.getLogger("__name__")
@dataclass
class Playlist:
playlist_id: str # UUID4
def __post_init__(self):
self.playlist_dir: Optional[Path] = None
self.playlist_cover_saved: bool = False
def get_metadata(self, session: Session):
"""Request from TIDAL API /playlists endpoint"""
self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_playlists(
session=session, identifier=self.playlist_id
)
if self.metadata is None:
return
self.name = (
self.metadata.title.replace("/", "_")
.replace("|", "_")
.replace(":", " -")
.replace('"', "")
.replace("..", "")
)
def set_items(self, session: Session):
"""Uses data from TIDAL API /playlists/items endpoint to
populate self.items"""
playlist_items: Optional[PlaylistsItemsResponseJSON] = get_playlist(
session=session, playlist_id=self.playlist_id
)
if playlist_items is None:
self.items = tuple()
else:
self.items: Tuple[Optional[PlaylistItem]] = tuple(playlist_items.items)
def set_dir(self, out_dir: Path):
"""Populates self.playlist_dir based on self.name, self.playlist_id"""
playlist_substring: str = f"{self.name} [{self.playlist_id}]"
self.playlist_dir: Path = out_dir / "Playlists" / playlist_substring
self.playlist_dir.mkdir(parents=True, exist_ok=True)
def save_cover_image(self, session: Session, out_dir: Path):
"""Requests self.metadata.image and attempts to write it to disk"""
if self.playlist_dir is None:
self.set_dir(out_dir=out_dir)
self.cover_path: Path = self.playlist_dir / "cover.jpg"
if not self.cover_path.exists():
download_cover_image(
session=session,
cover_uuid=self.metadata.square_image,
output_dir=self.playlist_dir,
dimension=1080,
)
else:
self.playlist_cover_saved = True
def save_description(self):
"""Requests self.metadata.description and attempts to write it to disk"""
description_path: Path = self.playlist_dir / "PlaylistDescription.txt"
if self.metadata.description is not None and len(self.metadata.description) > 0:
if not description_path.exists():
description_path.write_text(f"{self.metadata.description}\n")
def get_items(self, session: Session, audio_format: AudioFormat):
"""Using either Track.get() or Video.get(), attempt to request
the data for each track or video in self.items"""
if len(self.items) == 0:
return
tracks_videos: list = [None] * len(self.items)
for i, item in enumerate(self.items):
if item is None:
tracks_videos[i] = None
continue
elif isinstance(item, TracksEndpointResponseJSON):
track: Track = Track(track_id=item.id)
track.get(
session=session,
audio_format=audio_format,
out_dir=self.playlist_dir,
metadata=item,
)
tracks_videos[i] = track | elif isinstance(item, VideosEndpointResponseJSON): | 3 | 2023-12-12 21:50:25+00:00 | 12k |
lbcb-sci/GNNome | train.py | [
{
"identifier": "AssemblyGraphDataset",
"path": "graph_dataset.py",
"snippet": "class AssemblyGraphDataset(DGLDataset):\n def __init__(self, root, assembler, threads=32, generate=False):\n self.root = os.path.abspath(root)\n self.assembler = assembler\n self.threads = threads\n self.assembly_dir = os.path.join(self.root, self.assembler)\n # print(self.assembly_dir)\n\n if 'raw' not in os.listdir(self.root):\n subprocess.run(f\"mkdir 'raw'\", shell=True, cwd=self.root)\n if 'output' not in os.listdir(self.assembly_dir):\n subprocess.run(f\"mkdir 'output'\", shell=True, cwd=self.assembly_dir)\n if f'processed' not in os.listdir(self.assembly_dir):\n subprocess.run(f\"mkdir 'processed'\", shell=True, cwd=self.assembly_dir)\n if f'info' not in os.listdir(self.assembly_dir):\n subprocess.run(f\"mkdir 'info'\", shell=True, cwd=self.assembly_dir)\n\n raw_dir = os.path.join(self.root, 'raw')\n save_dir = os.path.join(self.assembly_dir, f'processed')\n self.output_dir = os.path.join(self.assembly_dir, f'output')\n self.info_dir = os.path.join(self.assembly_dir, f'info')\n \n config = get_config()\n raven_dir = config['raven_dir']\n self.raven_path = os.path.join(raven_dir, f'build/bin/raven')\n self.raven_path = os.path.abspath(self.raven_path)\n hifiasm_dir = config['hifiasm_dir']\n self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')\n self.hifiasm_path = os.path.abspath(self.hifiasm_path)\n \n super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)\n\n self.graph_list = []\n if not generate:\n for file in os.listdir(self.save_dir):\n idx = int(file[:-4])\n graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]\n graph = preprocess_graph(graph, self.root, idx)\n graph = add_positional_encoding(graph)\n print(f'DGL graph idx={idx} info:\\n',graph)\n self.graph_list.append((idx, graph))\n self.graph_list.sort(key=lambda x: x[0])\n\n def has_cache(self):\n \"\"\"Check if the raw data is already processed and stored.\"\"\"\n raw_files = {int(re.findall(r'(\\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)}\n prc_files = {int(re.findall(r'(\\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)}\n return len(raw_files - prc_files) == 0 # set difference\n\n def __len__(self):\n return len(os.listdir(self.save_dir))\n\n def __getitem__(self, idx):\n i, graph = self.graph_list[idx]\n return i, graph\n\n def process(self):\n pass"
},
{
"identifier": "get_hyperparameters",
"path": "hyperparameters.py",
"snippet": "def get_hyperparameters():\n return {\n\n # Setup\n 'data_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/hifi/train',\n 'temp_path': '/home/vrcekl/scratch/gnnome_assembly/train',\n 'eval_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/hifi/evaluate',\n 'asms_path': '/home/vrcekl/scratch/gnnome_assembly/evaluate',\n 'refs_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/references',\n 'checkpoints_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/checkpoints',\n 'models_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/models',\n \n 'data_path_ont': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/ont/train',\n 'eval_path_ont': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/ont/evaluate',\n 'asms_path_ont': '/home/vrcekl/scratch/gnnome_assembly/evaluate_ont',\n \n 'raven_path': '',\n 'hifiasm_path': '',\n 'pbsim3_dir': '',\n \n 'sample_profile_id': '',\n 'sample_file': '',\n \n 'assembler': 'hifiasm',\n 'dataset': 'chm13', # Not used at the moment\n 'initials': 'LV',\n\n 'device': 'cuda:0' if torch.cuda.is_available() else 'cpu',\n 'seed': 1,\n 'wandb_mode': 'disabled', # switch between 'online' and 'disabled'\n # 'wandb_project': 'GeNNome-hifiasm',\n 'wandb_project': 'hifiasm-runs',\n # 'wandb_project': 'Sep-23_ablations',\n\n 'chr_overfit': 0,\n 'plot_nga50_during_training': False,\n 'eval_frequency': 20, \n\n # Data\n 'use_similarities': True,\n # 'pos_to_neg_ratio': 16.5, # Not used, but could be a hyperparam for loss weight\n\n # Model\n 'dim_latent': 64,\n 'num_gnn_layers': 8,\n 'node_features': 2,\n 'edge_features': 2, # Put 2 if you use similarities, 1 otherwise\n 'hidden_edge_features': 16,\n 'hidden_edge_scores': 64,\n 'nb_pos_enc': 0,\n 'type_pos_enc': 'PR',\n 'batch_norm': True,\n # 'dropout': 0.08,\n\n # Training\n 'num_epochs': 200,\n 'lr': 1e-4,\n 'use_symmetry_loss': True,\n 'alpha': 0.1,\n 'num_parts_metis_train': 200,\n 'num_parts_metis_eval': 200,\n 'num_nodes_per_cluster': 10000, # 2000 = max 10GB GPU memory for d=128, L=8\n 'npc_lower_bound': 1, # 0.8\n 'npc_upper_bound': 1, # 1.2\n 'k_extra_hops': 1,\n 'patience': 2,\n 'decay': 0.95,\n 'masking': True,\n 'mask_frac_low': 80, # ~ 25x\n 'mask_frac_high': 100, # ~ 60x\n\n # Decoding\n 'strategy': 'greedy',\n 'num_decoding_paths': 100,\n 'decode_with_labels': False,\n 'load_checkpoint': True,\n 'num_threads': 32,\n 'B': 1,\n 'len_threshold': 10,\n }"
},
{
"identifier": "get_config",
"path": "config.py",
"snippet": "def get_config():\n return {\n 'checkpoints_path': 'checkpoints',\n 'models_path': 'models',\n \n 'tool_dir': 'vendor',\n 'raven_dir': 'vendor/raven-1.8.1',\n 'hifiasm_dir': 'vendor/hifiasm-0.18.8',\n 'pbsim3_dir': 'vendor/pbsim3',\n \n 'sample_profile_id': '',\n 'sample_file': '',\n 'sequencing_depth': 60,\n }"
},
{
"identifier": "inference",
"path": "inference.py",
"snippet": "def inference(data_path, model_path, assembler, savedir, device='cpu', dropout=None):\n \"\"\"Using a pretrained model, get walks and contigs on new data.\"\"\"\n hyperparameters = get_hyperparameters()\n seed = hyperparameters['seed']\n num_gnn_layers = hyperparameters['num_gnn_layers']\n hidden_features = hyperparameters['dim_latent']\n nb_pos_enc = hyperparameters['nb_pos_enc']\n\n batch_norm = hyperparameters['batch_norm']\n node_features = hyperparameters['node_features']\n edge_features = hyperparameters['edge_features']\n hidden_edge_features = hyperparameters['hidden_edge_features']\n hidden_edge_scores = hyperparameters['hidden_edge_scores']\n\n strategy = hyperparameters['strategy']\n B = hyperparameters['B']\n nb_paths = hyperparameters['num_decoding_paths']\n len_threshold = hyperparameters['len_threshold']\n use_labels = hyperparameters['decode_with_labels']\n load_checkpoint = hyperparameters['load_checkpoint']\n threads = hyperparameters['num_threads']\n\n # assembly_path = hyperparameters['asms_path']\n\n device = 'cpu' # Hardcode, because we cannot do inference on a GPU - usually not enough memory to load the whole graph\n utils.set_seed(seed)\n time_start = datetime.now()\n\n ds = AssemblyGraphDataset(data_path, assembler)\n\n inference_dir = os.path.join(savedir, 'decode')\n if not os.path.isdir(inference_dir):\n os.makedirs(inference_dir)\n\n checkpoint_dir = os.path.join(savedir, 'checkpoint')\n if not os.path.isdir(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n walks_per_graph = []\n contigs_per_graph = []\n\n elapsed = utils.timedelta_to_str(datetime.now() - time_start)\n print(f'\\nelapsed time (loading network and data): {elapsed}\\n')\n\n for idx, g in ds:\n # Get scores\n print(f'==== Processing graph {idx} ====')\n with torch.no_grad():\n time_start_get_scores = datetime.now()\n g = g.to(device)\n x = g.ndata['x'].to(device)\n e = g.edata['e'].to(device)\n pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe = torch.cat((pe_in, pe_out), dim=1) # No PageRank\n \n if use_labels: # Debugging\n print('Decoding with labels...')\n g.edata['score'] = g.edata['y'].clone()\n else:\n print('Decoding with model scores...')\n predicts_path = os.path.join(inference_dir, f'{idx}_predicts.pt')\n if os.path.isfile(predicts_path):\n print(f'Loading the scores from:\\n{predicts_path}\\n')\n g.edata['score'] = torch.load(predicts_path)\n else:\n print(f'Loading model parameters from: {model_path}')\n model = models.SymGatedGCNModel(node_features, edge_features, hidden_features, hidden_edge_features, num_gnn_layers, hidden_edge_scores, batch_norm, nb_pos_enc, dropout=dropout)\n model.load_state_dict(torch.load(model_path, map_location=torch.device(device)))\n model.eval()\n model.to(device)\n print(f'Computing the scores with the model...\\n')\n edge_predictions = model(g, x, e, pe)\n g.edata['score'] = edge_predictions.squeeze()\n torch.save(g.edata['score'], os.path.join(inference_dir, f'{idx}_predicts.pt'))\n\n elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_scores)\n print(f'elapsed time (get_scores): {elapsed}')\n\n # Load info data\n print(f'Loading successors...')\n with open(f'{data_path}/{assembler}/info/{idx}_succ.pkl', 'rb') as f_succs:\n succs = pickle.load(f_succs)\n print(f'Loading predecessors...')\n with open(f'{data_path}/{assembler}/info/{idx}_pred.pkl', 'rb') as f_preds:\n preds = pickle.load(f_preds)\n print(f'Loading edges...')\n with open(f'{data_path}/{assembler}/info/{idx}_edges.pkl', 'rb') as f_edges:\n edges = pickle.load(f_edges)\n print(f'Done loading the auxiliary graph data!')\n\n # Get walks\n time_start_get_walks = datetime.now()\n \n # Some prefixes can be <0 and that messes up the assemblies\n g.edata['prefix_length'] = g.edata['prefix_length'].masked_fill(g.edata['prefix_length']<0, 0)\n \n if strategy == 'greedy':\n walks = get_contigs_greedy(g, succs, preds, edges, nb_paths, len_threshold, use_labels, checkpoint_dir, load_checkpoint, device='cpu', threads=threads)\n else:\n print('Invalid decoding strategy')\n raise Exception\n \n elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_walks)\n print(f'elapsed time (get_walks): {elapsed}')\n inference_path = os.path.join(inference_dir, f'{idx}_walks.pkl')\n pickle.dump(walks, open(f'{inference_path}', 'wb'))\n \n print(f'Loading reads...')\n with open(f'{data_path}/{assembler}/info/{idx}_reads.pkl', 'rb') as f_reads:\n reads = pickle.load(f_reads)\n print(f'Done!')\n \n time_start_get_contigs = datetime.now()\n contigs = evaluate.walk_to_sequence(walks, g, reads, edges)\n elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_contigs)\n print(f'elapsed time (get_contigs): {elapsed}')\n\n assembly_dir = os.path.join(savedir, f'assembly')\n if not os.path.isdir(assembly_dir):\n os.makedirs(assembly_dir)\n evaluate.save_assembly(contigs, assembly_dir, idx)\n walks_per_graph.append(walks)\n contigs_per_graph.append(contigs)\n\n elapsed = utils.timedelta_to_str(datetime.now() - time_start)\n print(f'elapsed time (total): {elapsed}')\n \n if DEBUG:\n exit(0)\n\n print(f'Found contigs for {data_path}!')\n print(f'Model used: {model_path}')\n print(f'Assembly saved in: {savedir}')"
}
] | import argparse
import copy
import os
import pickle
import random
import re
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import dgl
import wandb
import evaluate
import models
import utils
from datetime import datetime
from tqdm import tqdm
from torch.nn.functional import kl_div
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.profiler import profile, record_function, ProfilerActivity
from dgl.dataloading import GraphDataLoader
from graph_dataset import AssemblyGraphDataset
from hyperparameters import get_hyperparameters
from config import get_config
from inference import inference | 7,355 | edge_labels = g.edata['y'][sub_g.edata['_ID']].to(device)
loss = criterion(edge_predictions, edge_labels)
TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)
acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)
acc_inv, precision_inv, recall_inv, f1_inv = utils.calculate_metrics_inverse(TP, TN, FP, FN)
try:
fp_rate = FP / (FP + TN)
except ZeroDivisionError:
fp_rate = 0.0
try:
fn_rate = FN / (FN + TP)
except ZeroDivisionError:
fn_rate = 0.0
# Append results of a single mini-batch / METIS partition
# These are used for epoch mean = mean over partitions over graphs - mostly DEPRECATED
running_loss.append(loss.item())
running_fp_rate.append(fp_rate)
running_fn_rate.append(fn_rate)
running_acc.append(acc)
running_precision.append(precision)
running_recall.append(recall)
running_f1.append(f1)
# These are used for epoch mean = mean over all the partitions in all the graphs
valid_loss_epoch.append(loss.item())
valid_fp_rate_epoch.append(fp_rate)
valid_fn_rate_epoch.append(fn_rate)
valid_acc_epoch.append(acc)
valid_precision_epoch.append(precision)
valid_recall_epoch.append(recall)
valid_f1_epoch.append(f1)
# Inverse metrics because F1 and them are not good for dataset with mostly positive labels
valid_acc_inv_epoch.append(acc_inv)
valid_precision_inv_epoch.append(precision_inv)
valid_recall_inv_epoch.append(recall_inv)
valid_f1_inv_epoch.append(f1_inv)
# Average over all mini-batches (partitions) in a single graph - mostly DEPRECATED
val_loss = np.mean(running_loss)
val_fp_rate = np.mean(running_fp_rate)
val_fn_rate = np.mean(running_fn_rate)
val_acc = np.mean(running_acc)
val_precision = np.mean(running_precision)
val_recall = np.mean(running_recall)
val_f1 = np.mean(running_f1)
# elapsed = utils.timedelta_to_str(datetime.now() - time_start_eval)
# print(f'\nVALIDATION (one validation graph): Epoch = {epoch}, Graph = {idx}')
# print(f'Loss: {val_loss:.4f}, fp_rate(GT=0): {val_fp_rate:.4f}, fn_rate(GT=1): {val_fn_rate:.4f}')
# print(f'elapsed time: {elapsed}\n\n')
# Record after each graph in the dataset - mostly DEPRECATED
val_loss_all_graphs.append(val_loss)
val_fp_rate_all_graphs.append(val_fp_rate)
val_fn_rate_all_graphs.append(val_fn_rate)
val_acc_all_graphs.append(val_acc)
val_precision_all_graphs.append(val_precision)
val_recall_all_graphs.append(val_recall)
val_f1_all_graphs.append(val_f1)
# Average over all the training graphs in one epoch - mostly DEPRECATED
val_loss_all_graphs = np.mean(val_loss_all_graphs)
val_fp_rate_all_graphs = np.mean(val_fp_rate_all_graphs)
val_fn_rate_all_graphs = np.mean(val_fn_rate_all_graphs)
val_acc_all_graphs = np.mean(val_acc_all_graphs)
val_precision_all_graphs = np.mean(val_precision_all_graphs)
val_recall_all_graphs = np.mean(val_recall_all_graphs)
val_f1_all_graphs = np.mean(val_f1_all_graphs)
# Average over all the partitions in one epoch
valid_loss_epoch = np.mean(valid_loss_epoch)
valid_fp_rate_epoch = np.mean(valid_fp_rate_epoch)
valid_fn_rate_epoch = np.mean(valid_fn_rate_epoch)
valid_acc_epoch = np.mean(valid_acc_epoch)
valid_precision_epoch = np.mean(valid_precision_epoch)
valid_recall_epoch = np.mean(valid_recall_epoch)
valid_f1_epoch = np.mean(valid_f1_epoch)
valid_acc_inv_epoch = np.mean(valid_acc_inv_epoch)
valid_precision_inv_epoch = np.mean(valid_precision_inv_epoch)
valid_recall_inv_epoch = np.mean(valid_recall_inv_epoch)
valid_f1_inv_epoch = np.mean(valid_f1_inv_epoch)
loss_per_epoch_valid.append(valid_loss_epoch)
f1_inv_per_epoch_valid.append(valid_f1_inv_epoch)
elapsed = utils.timedelta_to_str(datetime.now() - time_start)
print(f'\n==> VALIDATION (all validation graphs): Epoch = {epoch}')
print(f'Loss: {valid_loss_epoch:.4f}, fp_rate(GT=0): {valid_fp_rate_epoch:.4f}, fn_rate(GT=1): {valid_fn_rate_epoch:.4f}')
print(f'Elapsed time total: {elapsed}\n\n')
if not overfit:
# Choose the model with minimal loss on validation set
if len(loss_per_epoch_valid) == 1 or len(loss_per_epoch_valid) > 1 and loss_per_epoch_valid[-1] < min(loss_per_epoch_valid[:-1]):
torch.save(model.state_dict(), model_min_loss_path)
print(f'Epoch {epoch:3}: Model MIN-LOSS saved! -> Val Loss = {valid_loss_epoch:.6f}\tVal F1 = {valid_f1_epoch:.4f}\tVal inv-F1 = {valid_f1_inv_epoch:.4f}' \
f'\tVal FPR = {valid_fp_rate_epoch:.4f}\tVal FNR = {valid_fn_rate_epoch:.4f}\t')
save_checkpoint(epoch, model, optimizer, min(loss_per_epoch_train), min(loss_per_epoch_valid), out, ckpt_path) # Save the checkpoint every epoch
scheduler.step(valid_loss_epoch)
# Code that evalates NGA50 during training -- only for overfitting
plot_nga50_during_training = hyperparameters['plot_nga50_during_training']
i = hyperparameters['chr_overfit']
eval_frequency = hyperparameters['eval_frequency']
if overfit and plot_nga50_during_training and (epoch+1) % eval_frequency == 0:
# call inference
refs_path = hyperparameters['refs_path']
save_dir = os.path.join(train_path, assembler)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
if not os.path.isdir(os.path.join(save_dir, f'assembly')):
os.mkdir(os.path.join(save_dir, f'assembly'))
if not os.path.isdir(os.path.join(save_dir, f'inference')):
os.mkdir(os.path.join(save_dir, f'inference'))
if not os.path.isdir(os.path.join(save_dir, f'reports')):
os.mkdir(os.path.join(save_dir, f'reports'))
|
def save_checkpoint(epoch, model, optimizer, loss_train, loss_valid, out, ckpt_path):
checkpoint = {
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optim_state_dict': optimizer.state_dict(),
'loss_train': loss_train,
'loss_valid': loss_valid,
}
torch.save(checkpoint, ckpt_path)
def load_checkpoint(out, model, optimizer):
ckpt_path = f'checkpoints/{out}.pt'
checkpoint = torch.load(ckpt_path)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optim_state_dict'])
loss_train = checkpoint['loss_train']
loss_valid = checkpoint['loss_valid']
return epoch, model, optimizer, loss_train, loss_valid
def view_model_param(model):
total_param = 0
for param in model.parameters():
total_param += np.prod(list(param.data.size()))
return total_param
def mask_graph(g, fraction, device):
keep_node_idx = torch.rand(g.num_nodes(), device=device) < fraction
sub_g = dgl.node_subgraph(g, keep_node_idx, store_ids=True)
return sub_g
def mask_graph_strandwise(g, fraction, device):
keep_node_idx_half = torch.rand(g.num_nodes() // 2, device=device) < fraction
keep_node_idx = torch.empty(keep_node_idx_half.size(0) * 2, dtype=keep_node_idx_half.dtype)
keep_node_idx[0::2] = keep_node_idx_half
keep_node_idx[1::2] = keep_node_idx_half
sub_g = dgl.node_subgraph(g, keep_node_idx, store_ids=True)
print(f'Masking fraction: {fraction}')
print(f'Original graph: N={g.num_nodes()}, E={g.num_edges()}')
print(f'Subsampled graph: N={sub_g.num_nodes()}, E={sub_g.num_edges()}')
return sub_g
def symmetry_loss(org_scores, rev_scores, labels, pos_weight=1.0, alpha=1.0):
BCE = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='none')
BCE_org = BCE(org_scores, labels)
BCE_rev = BCE(rev_scores, labels)
abs_diff = torch.abs(org_scores - rev_scores)
loss = (BCE_org + BCE_rev + alpha * abs_diff)
loss = loss.mean()
return loss
def train(train_path, valid_path, out, assembler, overfit=False, dropout=None, seed=None, resume=False):
hyperparameters = get_hyperparameters()
if seed is None:
seed = hyperparameters['seed']
num_epochs = hyperparameters['num_epochs']
num_gnn_layers = hyperparameters['num_gnn_layers']
hidden_features = hyperparameters['dim_latent']
nb_pos_enc = hyperparameters['nb_pos_enc']
patience = hyperparameters['patience']
lr = hyperparameters['lr']
device = hyperparameters['device']
batch_norm = hyperparameters['batch_norm']
node_features = hyperparameters['node_features']
edge_features = hyperparameters['edge_features']
hidden_edge_features = hyperparameters['hidden_edge_features']
hidden_edge_scores = hyperparameters['hidden_edge_scores']
decay = hyperparameters['decay']
wandb_mode = hyperparameters['wandb_mode']
wandb_project = hyperparameters['wandb_project']
num_nodes_per_cluster = hyperparameters['num_nodes_per_cluster']
npc_lower_bound = hyperparameters['npc_lower_bound']
npc_upper_bound = hyperparameters['npc_upper_bound']
k_extra_hops = hyperparameters['k_extra_hops']
masking = hyperparameters['masking']
mask_frac_low = hyperparameters['mask_frac_low']
mask_frac_high = hyperparameters['mask_frac_high']
use_symmetry_loss = hyperparameters['use_symmetry_loss']
alpha = hyperparameters['alpha']
config = get_config()
checkpoints_path = os.path.abspath(config['checkpoints_path'])
models_path = os.path.abspath(config['models_path'])
print(f'----- TRAIN -----')
print(f'\nSaving checkpoints: {checkpoints_path}')
print(f'Saving models: {models_path}\n')
print(f'USING SEED: {seed}')
if torch.cuda.is_available():
torch.cuda.set_device(device)
utils.set_seed(seed)
time_start = datetime.now()
timestamp = time_start.strftime('%Y-%b-%d-%H-%M-%S')
if out is None:
out = timestamp
assert train_path is not None, "train_path not specified!"
assert valid_path is not None, "valid_path not specified!"
if not overfit:
ds_train = AssemblyGraphDataset(train_path, assembler=assembler)
ds_valid = AssemblyGraphDataset(valid_path, assembler=assembler)
else:
ds_train = ds_valid = AssemblyGraphDataset(train_path, assembler=assembler)
pos_to_neg_ratio = sum([((g.edata['y']==1).sum() / (g.edata['y']==0).sum()).item() for idx, g in ds_train]) / len(ds_train)
model = models.SymGatedGCNModel(node_features, edge_features, hidden_features, hidden_edge_features, num_gnn_layers, hidden_edge_scores, batch_norm, nb_pos_enc, dropout=dropout)
model.to(device)
if not os.path.exists(models_path):
print(models_path)
os.makedirs(models_path)
out = out + f'_seed{seed}'
model_path = os.path.join(models_path, f'model_{out}.pt') # TODO: Delete this?
model_min_loss_path = os.path.join(models_path, f'model_min-loss_{out}.pt')
print(f'MODEL PATH: {model_path}')
ckpt_path = f'{checkpoints_path}/ckpt_{out}.pt'
print(f'CHECKPOINT PATH: {ckpt_path}')
print(f'\nNumber of network parameters: {view_model_param(model)}\n')
print(f'Normalization type : Batch Normalization\n') if batch_norm else print(f'Normalization type : Layer Normalization\n')
pos_weight = torch.tensor([1 / pos_to_neg_ratio], device=device)
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=decay, patience=patience, verbose=True)
start_epoch = 0
loss_per_epoch_train, loss_per_epoch_valid = [], []
f1_inv_per_epoch_valid = []
if not os.path.exists(checkpoints_path):
os.makedirs(checkpoints_path)
if resume:
# ckpt_path = f'{checkpoints_path}/ckpt_{out}.pt' # This should be the checkpoint of the old run
checkpoint = torch.load(ckpt_path)
print('Loding the checkpoint from:', ckpt_path, sep='\t')
model_path = os.path.join(models_path, f'model_{out}_resumed-{num_epochs}.pt')
ckpt_path = os.path.join(checkpoints_path, f'ckpt_{out}_resumed-{num_epochs}.pt')
print('Saving the resumed model to:', model_path, sep='\t')
print('Saving the new checkpoint to:', ckpt_path, sep='\t')
start_epoch = checkpoint['epoch'] + 1
print(f'Resuming from epoch: {start_epoch}')
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optim_state_dict'])
min_loss_train = checkpoint['loss_train']
min_loss_valid = checkpoint['loss_valid']
loss_per_epoch_train.append(min_loss_train)
loss_per_epoch_valid.append(min_loss_valid)
elapsed = utils.timedelta_to_str(datetime.now() - time_start)
print(f'Loading data done. Elapsed time: {elapsed}')
try:
with wandb.init(project=wandb_project, config=hyperparameters, mode=wandb_mode, name=out):
wandb.watch(model, criterion, log='all', log_freq=1000)
for epoch in range(start_epoch, num_epochs):
train_loss_all_graphs, train_fp_rate_all_graphs, train_fn_rate_all_graphs = [], [], []
train_acc_all_graphs, train_precision_all_graphs, train_recall_all_graphs, train_f1_all_graphs = [], [], [], []
train_loss_epoch, train_fp_rate_epoch, train_fn_rate_epoch = [], [], []
train_acc_epoch, train_precision_epoch, train_recall_epoch, train_f1_epoch = [], [], [], []
train_acc_inv_epoch, train_precision_inv_epoch, train_recall_inv_epoch, train_f1_inv_epoch = [], [], [], []
train_aps_epoch, train_aps_inv_epoch = [], []
print('\n===> TRAINING\n')
random.shuffle(ds_train.graph_list)
for data in ds_train:
model.train()
idx, g = data
print(f'\n(TRAIN: Epoch = {epoch:3}) NEW GRAPH: index = {idx}')
if masking:
fraction = random.randint(mask_frac_low, mask_frac_high) / 100 # Fraction of nodes to be left in the graph (.85 -> ~30x, 1.0 -> 60x)
g = mask_graph_strandwise(g, fraction, device)
# Number of clusters dependant on graph size!
num_nodes_per_cluster_min = int(num_nodes_per_cluster * npc_lower_bound)
num_nodes_per_cluster_max = int(num_nodes_per_cluster * npc_upper_bound) + 1
num_nodes_for_g = torch.LongTensor(1).random_(num_nodes_per_cluster_min, num_nodes_per_cluster_max).item()
num_clusters = g.num_nodes() // num_nodes_for_g + 1
if num_nodes_for_g >= g.num_nodes(): # train with full graph
print(f'\nUse METIS: False')
print(f'Use full graph')
g = g.to(device)
if use_symmetry_loss:
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
# pe = g.ndata['pe'].to(device)
# pe = (pe - pe.mean()) / pe.std()
pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
# pe = torch.cat((pe_in, pe_out, pe), dim=1)
pe = torch.cat((pe_in, pe_out), dim=1)
org_scores = model(g, x, e, pe).squeeze(-1)
edge_predictions = org_scores
edge_labels = g.edata['y'].to(device)
g = dgl.reverse(g, True, True)
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
# pe = g.ndata['pe'].to(device)
# pe = (pe - pe.mean()) / pe.std()
pe_out = g.ndata['in_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe_in = g.ndata['out_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
# pe = torch.cat((pe_in, pe_out, pe), dim=1)
pe = torch.cat((pe_in, pe_out), dim=1)
rev_scores = model(g, x, e, pe).squeeze(-1)
loss = symmetry_loss(org_scores, rev_scores, edge_labels, pos_weight, alpha=alpha)
else:
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
# pe = g.ndata['pe'].to(device)
# pe = (pe - pe.mean()) / pe.std()
pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
# pe = torch.cat((pe_in, pe_out, pe), dim=1)
pe = torch.cat((pe_in, pe_out), dim=1)
edge_predictions = model(g, x, e, pe)
edge_predictions = edge_predictions.squeeze(-1)
edge_labels = g.edata['y'].to(device)
loss = criterion(edge_predictions, edge_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss.item()
TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)
acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)
try:
fp_rate = FP / (FP + TN)
except ZeroDivisionError:
fp_rate = 0.0
try:
fn_rate = FN / (FN + TP)
except ZeroDivisionError:
fn_rate = 0.0
train_fp_rate = fp_rate
train_fn_rate = fn_rate
train_acc = acc
train_precision = precision
train_recall = recall
train_f1 = f1
train_loss_epoch.append(loss.item())
train_fp_rate_epoch.append(fp_rate)
train_fn_rate_epoch.append(fn_rate)
# elapsed = utils.timedelta_to_str(datetime.now() - time_start)
# print(f'\nTRAINING (one training graph): Epoch = {epoch}, Graph = {idx}')
# print(f'Loss: {train_loss:.4f}, fp_rate(GT=0): {train_fp_rate:.4f}, fn_rate(GT=1): {train_fn_rate:.4f}')
# print(f'elapsed time: {elapsed}\n\n')
else: # train with mini-batch
print(f'\nUse METIS: True')
print(f'Number of clusters:', num_clusters)
g = g.long()
d = dgl.metis_partition(g, num_clusters, extra_cached_hops=k_extra_hops)
sub_gs = list(d.values())
random.shuffle(sub_gs)
# Loop over all mini-batch in the graph
running_loss, running_fp_rate, running_fn_rate = [], [], []
running_acc, running_precision, running_recall, running_f1 = [], [], [], []
for sub_g in sub_gs:
if use_symmetry_loss:
sub_g = sub_g.to(device)
x = g.ndata['x'][sub_g.ndata['_ID']].to(device)
e = g.edata['e'][sub_g.edata['_ID']].to(device)
pe_in = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe = torch.cat((pe_in, pe_out), dim=1)
org_scores = model(sub_g, x, e, pe).squeeze(-1)
labels = g.edata['y'][sub_g.edata['_ID']].to(device)
sub_g = dgl.reverse(sub_g, True, True)
x = g.ndata['x'][sub_g.ndata['_ID']].to(device)
e = g.edata['e'][sub_g.edata['_ID']].to(device)
pe_out = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe_in = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe = torch.cat((pe_in, pe_out), dim=1)
rev_scores = model(sub_g, x, e, pe).squeeze(-1)
loss = symmetry_loss(org_scores, rev_scores, labels, pos_weight, alpha=alpha)
edge_predictions = org_scores
edge_labels = labels
else:
sub_g = sub_g.to(device)
x = g.ndata['x'][sub_g.ndata['_ID']].to(device)
e = g.edata['e'][sub_g.edata['_ID']].to(device)
pe_in = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe = torch.cat((pe_in, pe_out), dim=1)
edge_predictions = model(sub_g, x, e, pe)
edge_predictions = edge_predictions.squeeze(-1)
edge_labels = g.edata['y'][sub_g.edata['_ID']].to(device)
loss = criterion(edge_predictions, edge_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)
acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)
acc_inv, precision_inv, recall_inv, f1_inv = utils.calculate_metrics_inverse(TP, TN, FP, FN)
try:
fp_rate = FP / (FP + TN)
except ZeroDivisionError:
fp_rate = 0.0
try:
fn_rate = FN / (FN + TP)
except ZeroDivisionError:
fn_rate = 0.0
# Append results of a single mini-batch / METIS partition
# These are used for epoch mean = mean over partitions over graphs - mostly DEPRECATED
running_loss.append(loss.item())
running_fp_rate.append(fp_rate)
running_fn_rate.append(fn_rate)
running_acc.append(acc)
running_precision.append(precision)
running_recall.append(recall)
running_f1.append(f1)
# These are used for epoch mean = mean over all the partitions in all the graphs
train_loss_epoch.append(loss.item())
train_fp_rate_epoch.append(fp_rate)
train_fn_rate_epoch.append(fn_rate)
train_acc_epoch.append(acc)
train_precision_epoch.append(precision)
train_recall_epoch.append(recall)
train_f1_epoch.append(f1)
# Inverse metrics because F1 and them are not good for dataset with mostly positive labels
train_acc_inv_epoch.append(acc_inv)
train_precision_inv_epoch.append(precision_inv)
train_recall_inv_epoch.append(recall_inv)
train_f1_inv_epoch.append(f1_inv)
# Average over all mini-batches (partitions) in a single graph - mostly DEPRECATED
train_loss = np.mean(running_loss)
train_fp_rate = np.mean(running_fp_rate)
train_fn_rate = np.mean(running_fn_rate)
train_acc = np.mean(running_acc)
train_precision = np.mean(running_precision)
train_recall = np.mean(running_recall)
train_f1 = np.mean(running_f1)
# elapsed = utils.timedelta_to_str(datetime.now() - time_start)
# print(f'\nTRAINING (one training graph): Epoch = {epoch}, Graph = {idx}')
# print(f'Loss: {train_loss:.4f}, fp_rate(GT=0): {train_fp_rate:.4f}, fn_rate(GT=1): {train_fn_rate:.4f}')
# print(f'elapsed time: {elapsed}\n\n')
# Record after each graph in the dataset - mostly DEPRECATED
train_loss_all_graphs.append(train_loss)
train_fp_rate_all_graphs.append(train_fp_rate)
train_fn_rate_all_graphs.append(train_fn_rate)
train_acc_all_graphs.append(train_acc)
train_precision_all_graphs.append(train_precision)
train_recall_all_graphs.append(train_recall)
train_f1_all_graphs.append(train_f1)
# Average over all the training graphs in one epoch - mostly DEPRECATED
train_loss_all_graphs = np.mean(train_loss_all_graphs)
train_fp_rate_all_graphs = np.mean(train_fp_rate_all_graphs)
train_fn_rate_all_graphs = np.mean(train_fn_rate_all_graphs)
train_acc_all_graphs = np.mean(train_acc_all_graphs)
train_precision_all_graphs = np.mean(train_precision_all_graphs)
train_recall_all_graphs = np.mean(train_recall_all_graphs)
train_f1_all_graphs = np.mean(train_f1_all_graphs)
# Average over all the partitions in one epoch
train_loss_epoch = np.mean(train_loss_epoch)
train_fp_rate_epoch = np.mean(train_fp_rate_epoch)
train_fn_rate_epoch = np.mean(train_fn_rate_epoch)
train_acc_epoch = np.mean(train_acc_epoch)
train_precision_epoch = np.mean(train_precision_epoch)
train_recall_epoch = np.mean(train_recall_epoch)
train_f1_epoch = np.mean(train_f1_epoch)
train_acc_inv_epoch = np.mean(train_acc_inv_epoch)
train_precision_inv_epoch = np.mean(train_precision_inv_epoch)
train_recall_inv_epoch = np.mean(train_recall_inv_epoch)
train_f1_inv_epoch = np.mean(train_f1_inv_epoch)
loss_per_epoch_train.append(train_loss_epoch)
lr_value = optimizer.param_groups[0]['lr']
elapsed = utils.timedelta_to_str(datetime.now() - time_start)
print(f'\n==> TRAINING (all training graphs): Epoch = {epoch}')
print(f'Loss: {train_loss_epoch:.4f}, fp_rate(GT=0): {train_fp_rate_epoch:.4f}, fn_rate(GT=1): {train_fn_rate_epoch:.4f}')
print(f'Elapsed time: {elapsed}\n\n')
if overfit:
if len(loss_per_epoch_valid) == 1 or len(loss_per_epoch_train) > 1 and loss_per_epoch_train[-1] < min(loss_per_epoch_train[:-1]):
torch.save(model.state_dict(), model_path)
print(f'Epoch {epoch}: Model saved!')
save_checkpoint(epoch, model, optimizer, loss_per_epoch_train[-1], 0.0, out, ckpt_path)
scheduler.step(train_loss_all_graphs)
wandb.log({'train_loss': train_loss_all_graphs, 'train_accuracy': train_acc_all_graphs, \
'train_precision': train_precision_all_graphs, 'lr_value': lr_value, \
'train_recall': train_recall_all_graphs, 'train_f1': train_f1_all_graphs, \
'train_fp-rate': train_fp_rate_all_graphs, 'train_fn-rate': train_fn_rate_all_graphs})
continue # This will entirely skip the validation
val_loss_all_graphs, val_fp_rate_all_graphs, val_fn_rate_all_graphs = [], [], []
val_acc_all_graphs, val_precision_all_graphs, val_recall_all_graphs, val_f1_all_graphs = [], [], [], []
valid_loss_epoch, valid_fp_rate_epoch, valid_fn_rate_epoch = [], [], []
valid_acc_epoch, valid_precision_epoch, valid_recall_epoch, valid_f1_epoch = [], [], [], []
valid_acc_inv_epoch, valid_precision_inv_epoch, valid_recall_inv_epoch, valid_f1_inv_epoch = [], [], [], []
valid_aps_epoch, valid_aps_inv_epoch = [], []
with torch.no_grad():
print('\n===> VALIDATION\n')
time_start_eval = datetime.now()
model.eval()
for data in ds_valid:
idx, g = data
print(f'\n(VALID Epoch = {epoch:3}) NEW GRAPH: index = {idx}')
if masking:
fraction = random.randint(mask_frac_low, mask_frac_high) / 100 # Fraction of nodes to be left in the graph (.85 -> ~30x, 1.0 -> 60x)
g = mask_graph_strandwise(g, fraction, device)
# Number of clusters dependant on graph size!
num_nodes_per_cluster_min = int(num_nodes_per_cluster * npc_lower_bound)
num_nodes_per_cluster_max = int(num_nodes_per_cluster * npc_upper_bound) + 1
num_nodes_for_g = torch.LongTensor(1).random_(num_nodes_per_cluster_min, num_nodes_per_cluster_max).item() # DEBUG!!!
num_clusters = g.num_nodes() // num_nodes_for_g + 1
if num_nodes_for_g >= g.num_nodes(): # full graph
print(f'\nUse METIS: False')
print(f'Use full graph')
g = g.to(device)
if use_symmetry_loss:
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe = torch.cat((pe_in, pe_out), dim=1)
org_scores = model(g, x, e, pe).squeeze(-1)
edge_predictions = org_scores
edge_labels = g.edata['y'].to(device)
g = dgl.reverse(g, True, True)
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
pe_out = g.ndata['in_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe_in = g.ndata['out_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe = torch.cat((pe_in, pe_out), dim=1)
rev_scores = model(g, x, e, pe).squeeze(-1)
loss = symmetry_loss(org_scores, rev_scores, edge_labels, pos_weight, alpha=alpha)
else:
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe = torch.cat((pe_in, pe_out), dim=1)
edge_predictions = model(g, x, e, pe)
edge_predictions = edge_predictions.squeeze(-1)
edge_labels = g.edata['y'].to(device)
loss = criterion(edge_predictions, edge_labels)
val_loss = loss.item()
TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)
acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)
try:
fp_rate = FP / (FP + TN)
except ZeroDivisionError:
fp_rate = 0.0
try:
fn_rate = FN / (FN + TP)
except ZeroDivisionError:
fn_rate = 0.0
val_fp_rate = fp_rate
val_fn_rate = fn_rate
val_acc = acc
val_precision = precision
val_recall = recall
val_f1 = f1
valid_loss_epoch.append(loss.item())
valid_fp_rate_epoch.append(fp_rate)
valid_fn_rate_epoch.append(fn_rate)
# elapsed = utils.timedelta_to_str(datetime.now() - time_start_eval)
# print(f'\nVALIDATION (one validation graph): Epoch = {epoch}, Graph = {idx}')
# print(f'Loss: {val_loss:.4f}, fp_rate(GT=0): {val_fp_rate:.4f}, fn_rate(GT=1): {val_fn_rate:.4f}')
# print(f'elapsed time: {elapsed}\n\n')
else: # mini-batch
print(f'\nNum clusters:', num_clusters)
g = g.long()
d = dgl.metis_partition(g, num_clusters, extra_cached_hops=k_extra_hops)
sub_gs = list(d.values())
# g = g.to(device)
# For loop over all mini-batch in the graph
running_loss, running_fp_rate, running_fn_rate = [], [], []
running_acc, running_precision, running_recall, running_f1 = [], [], [], []
for sub_g in sub_gs:
if use_symmetry_loss:
sub_g = sub_g.to(device)
x = g.ndata['x'][sub_g.ndata['_ID']].to(device)
e = g.edata['e'][sub_g.edata['_ID']].to(device)
pe_in = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe = torch.cat((pe_in, pe_out), dim=1)
org_scores = model(sub_g, x, e, pe).squeeze(-1)
labels = g.edata['y'][sub_g.edata['_ID']].to(device)
sub_g = dgl.reverse(sub_g, True, True)
x = g.ndata['x'][sub_g.ndata['_ID']].to(device)
e = g.edata['e'][sub_g.edata['_ID']].to(device)
pe_out = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe_in = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe = torch.cat((pe_in, pe_out), dim=1)
rev_scores = model(sub_g, x, e, pe).squeeze(-1)
loss = symmetry_loss(org_scores, rev_scores, labels, pos_weight, alpha=alpha)
edge_predictions = org_scores
edge_labels = labels
else:
sub_g = sub_g.to(device)
x = g.ndata['x'][sub_g.ndata['_ID']].to(device)
e = g.edata['e'][sub_g.edata['_ID']].to(device)
pe_in = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe = torch.cat((pe_in, pe_out), dim=1)
edge_predictions = model(sub_g, x, e, pe)
edge_predictions = edge_predictions.squeeze(-1)
edge_labels = g.edata['y'][sub_g.edata['_ID']].to(device)
loss = criterion(edge_predictions, edge_labels)
TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)
acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)
acc_inv, precision_inv, recall_inv, f1_inv = utils.calculate_metrics_inverse(TP, TN, FP, FN)
try:
fp_rate = FP / (FP + TN)
except ZeroDivisionError:
fp_rate = 0.0
try:
fn_rate = FN / (FN + TP)
except ZeroDivisionError:
fn_rate = 0.0
# Append results of a single mini-batch / METIS partition
# These are used for epoch mean = mean over partitions over graphs - mostly DEPRECATED
running_loss.append(loss.item())
running_fp_rate.append(fp_rate)
running_fn_rate.append(fn_rate)
running_acc.append(acc)
running_precision.append(precision)
running_recall.append(recall)
running_f1.append(f1)
# These are used for epoch mean = mean over all the partitions in all the graphs
valid_loss_epoch.append(loss.item())
valid_fp_rate_epoch.append(fp_rate)
valid_fn_rate_epoch.append(fn_rate)
valid_acc_epoch.append(acc)
valid_precision_epoch.append(precision)
valid_recall_epoch.append(recall)
valid_f1_epoch.append(f1)
# Inverse metrics because F1 and them are not good for dataset with mostly positive labels
valid_acc_inv_epoch.append(acc_inv)
valid_precision_inv_epoch.append(precision_inv)
valid_recall_inv_epoch.append(recall_inv)
valid_f1_inv_epoch.append(f1_inv)
# Average over all mini-batches (partitions) in a single graph - mostly DEPRECATED
val_loss = np.mean(running_loss)
val_fp_rate = np.mean(running_fp_rate)
val_fn_rate = np.mean(running_fn_rate)
val_acc = np.mean(running_acc)
val_precision = np.mean(running_precision)
val_recall = np.mean(running_recall)
val_f1 = np.mean(running_f1)
# elapsed = utils.timedelta_to_str(datetime.now() - time_start_eval)
# print(f'\nVALIDATION (one validation graph): Epoch = {epoch}, Graph = {idx}')
# print(f'Loss: {val_loss:.4f}, fp_rate(GT=0): {val_fp_rate:.4f}, fn_rate(GT=1): {val_fn_rate:.4f}')
# print(f'elapsed time: {elapsed}\n\n')
# Record after each graph in the dataset - mostly DEPRECATED
val_loss_all_graphs.append(val_loss)
val_fp_rate_all_graphs.append(val_fp_rate)
val_fn_rate_all_graphs.append(val_fn_rate)
val_acc_all_graphs.append(val_acc)
val_precision_all_graphs.append(val_precision)
val_recall_all_graphs.append(val_recall)
val_f1_all_graphs.append(val_f1)
# Average over all the training graphs in one epoch - mostly DEPRECATED
val_loss_all_graphs = np.mean(val_loss_all_graphs)
val_fp_rate_all_graphs = np.mean(val_fp_rate_all_graphs)
val_fn_rate_all_graphs = np.mean(val_fn_rate_all_graphs)
val_acc_all_graphs = np.mean(val_acc_all_graphs)
val_precision_all_graphs = np.mean(val_precision_all_graphs)
val_recall_all_graphs = np.mean(val_recall_all_graphs)
val_f1_all_graphs = np.mean(val_f1_all_graphs)
# Average over all the partitions in one epoch
valid_loss_epoch = np.mean(valid_loss_epoch)
valid_fp_rate_epoch = np.mean(valid_fp_rate_epoch)
valid_fn_rate_epoch = np.mean(valid_fn_rate_epoch)
valid_acc_epoch = np.mean(valid_acc_epoch)
valid_precision_epoch = np.mean(valid_precision_epoch)
valid_recall_epoch = np.mean(valid_recall_epoch)
valid_f1_epoch = np.mean(valid_f1_epoch)
valid_acc_inv_epoch = np.mean(valid_acc_inv_epoch)
valid_precision_inv_epoch = np.mean(valid_precision_inv_epoch)
valid_recall_inv_epoch = np.mean(valid_recall_inv_epoch)
valid_f1_inv_epoch = np.mean(valid_f1_inv_epoch)
loss_per_epoch_valid.append(valid_loss_epoch)
f1_inv_per_epoch_valid.append(valid_f1_inv_epoch)
elapsed = utils.timedelta_to_str(datetime.now() - time_start)
print(f'\n==> VALIDATION (all validation graphs): Epoch = {epoch}')
print(f'Loss: {valid_loss_epoch:.4f}, fp_rate(GT=0): {valid_fp_rate_epoch:.4f}, fn_rate(GT=1): {valid_fn_rate_epoch:.4f}')
print(f'Elapsed time total: {elapsed}\n\n')
if not overfit:
# Choose the model with minimal loss on validation set
if len(loss_per_epoch_valid) == 1 or len(loss_per_epoch_valid) > 1 and loss_per_epoch_valid[-1] < min(loss_per_epoch_valid[:-1]):
torch.save(model.state_dict(), model_min_loss_path)
print(f'Epoch {epoch:3}: Model MIN-LOSS saved! -> Val Loss = {valid_loss_epoch:.6f}\tVal F1 = {valid_f1_epoch:.4f}\tVal inv-F1 = {valid_f1_inv_epoch:.4f}' \
f'\tVal FPR = {valid_fp_rate_epoch:.4f}\tVal FNR = {valid_fn_rate_epoch:.4f}\t')
save_checkpoint(epoch, model, optimizer, min(loss_per_epoch_train), min(loss_per_epoch_valid), out, ckpt_path) # Save the checkpoint every epoch
scheduler.step(valid_loss_epoch)
# Code that evalates NGA50 during training -- only for overfitting
plot_nga50_during_training = hyperparameters['plot_nga50_during_training']
i = hyperparameters['chr_overfit']
eval_frequency = hyperparameters['eval_frequency']
if overfit and plot_nga50_during_training and (epoch+1) % eval_frequency == 0:
# call inference
refs_path = hyperparameters['refs_path']
save_dir = os.path.join(train_path, assembler)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
if not os.path.isdir(os.path.join(save_dir, f'assembly')):
os.mkdir(os.path.join(save_dir, f'assembly'))
if not os.path.isdir(os.path.join(save_dir, f'inference')):
os.mkdir(os.path.join(save_dir, f'inference'))
if not os.path.isdir(os.path.join(save_dir, f'reports')):
os.mkdir(os.path.join(save_dir, f'reports')) | inference(train_path, model_path, assembler, save_dir) | 3 | 2023-12-08 04:45:45+00:00 | 12k |
Deltares/imod-python | imod/mf6/disv.py | [
{
"identifier": "Package",
"path": "imod/mf6/package.py",
"snippet": "class Package(PackageBase, abc.ABC):\n \"\"\"\n Package is used to share methods for specific packages with no time\n component.\n\n It is not meant to be used directly, only to inherit from, to implement new\n packages.\n\n This class only supports `array input\n <https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=16>`_,\n not the list input which is used in :class:`BoundaryCondition`.\n \"\"\"\n\n _pkg_id = \"\"\n _init_schemata = {}\n _write_schemata = {}\n\n def __init__(self, allargs=None):\n super().__init__(allargs)\n\n def isel(self):\n raise NotImplementedError(\n \"Selection on packages not yet supported. To make a selection on \"\n f\"the xr.Dataset, call {self._pkg_id}.dataset.isel instead.\"\n \"You can create a new package with a selection by calling \"\n f\"{__class__.__name__}(**{self._pkg_id}.dataset.isel(**selection))\"\n )\n\n def sel(self):\n raise NotImplementedError(\n \"Selection on packages not yet supported. To make a selection on \"\n f\"the xr.Dataset, call {self._pkg_id}.dataset.sel instead. \"\n \"You can create a new package with a selection by calling \"\n f\"{__class__.__name__}(**{self._pkg_id}.dataset.sel(**selection))\"\n )\n\n def _valid(self, value):\n \"\"\"\n Filters values that are None, False, or a numpy.bool_ False.\n Needs to be this specific, since 0.0 and 0 are valid values, but are\n equal to a boolean False.\n \"\"\"\n # Test singletons\n if value is False or value is None:\n return False\n # Test numpy bool (not singleton)\n elif isinstance(value, np.bool_) and not value:\n return False\n # When dumping to netCDF and reading back, None will have been\n # converted into a NaN. Only check NaN if it's a floating type to avoid\n # TypeErrors.\n elif np.issubdtype(type(value), np.floating) and np.isnan(value):\n return False\n else:\n return True\n\n @staticmethod\n def _number_format(dtype: type):\n if np.issubdtype(dtype, np.integer):\n return \"%i\"\n elif np.issubdtype(dtype, np.floating):\n return \"%.18G\"\n else:\n raise TypeError(\"dtype should be either integer or float\")\n\n @staticmethod\n def _initialize_template(pkg_id):\n loader = jinja2.PackageLoader(\"imod\", \"templates/mf6\")\n env = jinja2.Environment(loader=loader, keep_trailing_newline=True)\n if pkg_id == \"ims\":\n fname = \"sln-ims.j2\"\n elif pkg_id == \"tdis\":\n fname = \"sim-tdis.j2\"\n elif pkg_id in TRANSPORT_PACKAGES:\n fname = f\"gwt-{pkg_id}.j2\"\n elif pkg_id in EXCHANGE_PACKAGES:\n fname = f\"exg-{pkg_id}.j2\"\n else:\n fname = f\"gwf-{pkg_id}.j2\"\n return env.get_template(fname)\n\n def write_blockfile(self, pkgname, globaltimes, write_context: WriteContext):\n directory = write_context.get_formatted_write_directory()\n\n content = self.render(\n directory=directory,\n pkgname=pkgname,\n globaltimes=globaltimes,\n binary=write_context.use_binary,\n )\n filename = write_context.write_directory / f\"{pkgname}.{self._pkg_id}\"\n with open(filename, \"w\") as f:\n f.write(content)\n\n def write_binary_griddata(self, outpath, da, dtype):\n # From the modflow6 source, the header is defined as:\n # integer(I4B) :: kstp --> np.int32 : 1\n # integer(I4B) :: kper --> np.int32 : 2\n # real(DP) :: pertim --> 2 * np.int32 : 4\n # real(DP) :: totim --> 2 * np.int32 : 6\n # character(len=16) :: text --> 4 * np.int32 : 10\n # integer(I4B) :: m1, m2, m3 --> 3 * np.int32 : 13\n # so writing 13 bytes suffices to create a header.\n\n # The following code is commented out due to modflow issue 189\n # https://github.com/MODFLOW-USGS/modflow6/issues/189\n # We never write LAYERED data.\n # The (structured) dis array reader results in an error if you try to\n # read a 3D botm array. By storing nlayer * nrow * ncol in the first\n # header entry, the array is read properly.\n\n # haslayer = \"layer\" in da.dims\n # if haslayer:\n # nlayer, nrow, ncol = da.shape\n # else:\n # nrow, ncol = da.shape\n # nlayer = 1\n\n # This is a work around for the abovementioned issue.\n nval = np.product(da.shape)\n header = np.zeros(13, np.int32)\n header[-3] = np.int32(nval) # ncol\n header[-2] = np.int32(1) # nrow\n header[-1] = np.int32(1) # nlayer\n\n with open(outpath, \"w\") as f:\n header.tofile(f)\n da.values.flatten().astype(dtype).tofile(f)\n\n def write_text_griddata(self, outpath, da, dtype):\n with open(outpath, \"w\") as f:\n # Note: reshaping here avoids writing newlines after every number.\n # This dumps all the values in a single row rather than a single\n # column. This is to be preferred, since editors can easily\n # \"reshape\" a long row with \"word wrap\"; they cannot as easily\n # ignore newlines.\n fmt = self._number_format(dtype)\n data = da.values\n if data.ndim > 2:\n np.savetxt(fname=f, X=da.values.reshape((1, -1)), fmt=fmt)\n else:\n np.savetxt(fname=f, X=da.values, fmt=fmt)\n\n def render(self, directory, pkgname, globaltimes, binary):\n d = {}\n if directory is None:\n pkg_directory = pkgname\n else:\n pkg_directory = pathlib.Path(directory) / pkgname\n\n for varname in self.dataset.data_vars:\n key = self._keyword_map.get(varname, varname)\n\n if hasattr(self, \"_grid_data\") and varname in self._grid_data:\n layered, value = self._compose_values(\n self.dataset[varname], pkg_directory, key, binary=binary\n )\n if self._valid(value): # skip False or None\n d[f\"{key}_layered\"], d[key] = layered, value\n else:\n value = self[varname].values[()]\n if self._valid(value): # skip False or None\n d[key] = value\n\n if (hasattr(self, \"_auxiliary_data\")) and (names := get_variable_names(self)):\n d[\"auxiliary\"] = names\n\n return self._template.render(d)\n\n @staticmethod\n def _is_xy_data(obj):\n if isinstance(obj, (xr.DataArray, xr.Dataset)):\n xy = \"x\" in obj.dims and \"y\" in obj.dims\n elif isinstance(obj, (xu.UgridDataArray, xu.UgridDataset)):\n xy = obj.ugrid.grid.face_dimension in obj.dims\n else:\n raise TypeError(\n \"obj should be DataArray or UgridDataArray, \"\n f\"received {type(obj)} instead\"\n )\n return xy\n\n def _compose_values(self, da, directory, name, binary):\n \"\"\"\n Compose values of dictionary.\n\n Ignores times. Time dependent boundary conditions use the method from\n BoundaryCondition.\n\n See documentation of wq\n \"\"\"\n layered = False\n values = []\n if self._is_xy_data(da):\n if binary:\n path = (directory / f\"{name}.bin\").as_posix()\n values.append(f\"open/close {path} (binary)\")\n else:\n path = (directory / f\"{name}.dat\").as_posix()\n values.append(f\"open/close {path}\")\n else:\n if \"layer\" in da.dims:\n layered = True\n for layer in da.coords[\"layer\"]:\n values.append(f\"constant {da.sel(layer=layer).values[()]}\")\n else:\n value = da.values[()]\n if self._valid(value): # skip None or False\n values.append(f\"constant {value}\")\n else:\n values = None\n\n return layered, values\n\n def write(\n self,\n pkgname: str,\n globaltimes: Union[List, np.ndarray],\n write_context: WriteContext,\n ):\n directory = write_context.write_directory\n binary = write_context.use_binary\n self.write_blockfile(pkgname, globaltimes, write_context)\n\n if hasattr(self, \"_grid_data\"):\n if self._is_xy_data(self.dataset):\n pkgdirectory = directory / pkgname\n pkgdirectory.mkdir(exist_ok=True, parents=True)\n for varname, dtype in self._grid_data.items():\n key = self._keyword_map.get(varname, varname)\n da = self.dataset[varname]\n if self._is_xy_data(da):\n if binary:\n path = pkgdirectory / f\"{key}.bin\"\n self.write_binary_griddata(path, da, dtype)\n else:\n path = pkgdirectory / f\"{key}.dat\"\n self.write_text_griddata(path, da, dtype)\n\n def _validate(self, schemata: Dict, **kwargs) -> Dict[str, List[ValidationError]]:\n errors = defaultdict(list)\n for variable, var_schemata in schemata.items():\n for schema in var_schemata:\n if (\n variable in self.dataset.keys()\n ): # concentration only added to dataset if specified\n try:\n schema.validate(self.dataset[variable], **kwargs)\n except ValidationError as e:\n errors[variable].append(e)\n return errors\n\n def is_empty(self) -> bool:\n \"\"\"\n Returns True if the package is empty- for example if it contains only no-data values.\n \"\"\"\n\n # Create schemata dict only containing the\n # variables with a AllNoDataSchema and EmptyIndexesSchema (in case of\n # HFB) in the write schemata.\n allnodata_schemata = filter_schemata_dict(\n self._write_schemata, (AllNoDataSchema, EmptyIndexesSchema)\n )\n\n # Find if packages throws ValidationError for AllNoDataSchema or\n # EmptyIndexesSchema.\n allnodata_errors = self._validate(allnodata_schemata)\n return len(allnodata_errors) > 0\n\n def _validate_init_schemata(self, validate: bool):\n \"\"\"\n Run the \"cheap\" schema validations.\n\n The expensive validations are run during writing. Some are only\n available then: e.g. idomain to determine active part of domain.\n \"\"\"\n if not validate:\n return\n errors = self._validate(self._init_schemata)\n if len(errors) > 0:\n message = validation_pkg_error_message(errors)\n raise ValidationError(message)\n return\n\n def _get_vars_to_check(self):\n \"\"\"\n Helper function to get all variables which were not set to None\n \"\"\"\n variables = []\n for var in self._metadata_dict.keys():\n if ( # Filter optional variables not filled in\n self.dataset[var].size != 1\n ) or (\n self.dataset[var] != None # noqa: E711\n ):\n variables.append(var)\n\n return variables\n\n def copy(self) -> Any:\n # All state should be contained in the dataset.\n return type(self)(**self.dataset.copy())\n\n @staticmethod\n def _clip_repeat_stress(\n repeat_stress: xr.DataArray,\n time,\n time_start,\n time_end,\n ):\n \"\"\"\n Selection may remove the original data which are repeated.\n These should be re-inserted at the first occuring \"key\".\n Next, remove these keys as they've been \"promoted\" to regular\n timestamps with data.\n \"\"\"\n # First, \"pop\" and filter.\n keys, values = repeat_stress.values.T\n keep = (keys >= time_start) & (keys <= time_end)\n new_keys = keys[keep]\n new_values = values[keep]\n # Now detect which \"value\" entries have gone missing\n insert_values, index = np.unique(new_values, return_index=True)\n insert_keys = new_keys[index]\n # Setup indexer\n indexer = xr.DataArray(\n data=np.arange(time.size),\n coords={\"time\": time},\n dims=(\"time\",),\n ).sel(time=insert_values)\n indexer[\"time\"] = insert_keys\n\n # Update the key-value pairs. Discard keys that have been \"promoted\".\n keep = np.in1d(new_keys, insert_keys, assume_unique=True, invert=True)\n new_keys = new_keys[keep]\n new_values = new_values[keep]\n # Set the values to their new source.\n new_values = insert_keys[np.searchsorted(insert_values, new_values)]\n repeat_stress = xr.DataArray(\n data=np.column_stack((new_keys, new_values)),\n dims=(\"repeat\", \"repeat_items\"),\n )\n return indexer, repeat_stress\n\n @staticmethod\n def _clip_time_indexer(\n time,\n time_start,\n time_end,\n ):\n original = xr.DataArray(\n data=np.arange(time.size),\n coords={\"time\": time},\n dims=(\"time\",),\n )\n indexer = original.sel(time=slice(time_start, time_end))\n\n # The selection might return a 0-sized dimension.\n if indexer.size > 0:\n first_time = indexer[\"time\"].values[0]\n else:\n first_time = None\n\n # If the first time matches exactly, xarray will have done thing we\n # wanted and our work with the time dimension is finished.\n if (time_start is not None) and (time_start != first_time):\n # If the first time is before the original time, we need to\n # backfill; otherwise, we need to ffill the first timestamp.\n if time_start < time[0]:\n method = \"bfill\"\n else:\n method = \"ffill\"\n # Index with a list rather than a scalar to preserve the time\n # dimension.\n first = original.sel(time=[time_start], method=method)\n first[\"time\"] = [time_start]\n indexer = xr.concat([first, indexer], dim=\"time\")\n\n return indexer\n\n def __to_datetime(self, time, use_cftime):\n \"\"\"\n Helper function that converts to datetime, except when None.\n \"\"\"\n if time is None:\n return time\n else:\n return imod.wq.timeutil.to_datetime(time, use_cftime)\n\n def clip_box(\n self,\n time_min=None,\n time_max=None,\n layer_min=None,\n layer_max=None,\n x_min=None,\n x_max=None,\n y_min=None,\n y_max=None,\n state_for_boundary=None,\n ) -> \"Package\":\n \"\"\"\n Clip a package by a bounding box (time, layer, y, x).\n\n Slicing intervals may be half-bounded, by providing None:\n\n * To select 500.0 <= x <= 1000.0:\n ``clip_box(x_min=500.0, x_max=1000.0)``.\n * To select x <= 1000.0: ``clip_box(x_min=None, x_max=1000.0)``\n or ``clip_box(x_max=1000.0)``.\n * To select x >= 500.0: ``clip_box(x_min = 500.0, x_max=None.0)``\n or ``clip_box(x_min=1000.0)``.\n\n Parameters\n ----------\n time_min: optional\n time_max: optional\n layer_min: optional, int\n layer_max: optional, int\n x_min: optional, float\n x_max: optional, float\n y_min: optional, float\n y_max: optional, float\n\n Returns\n -------\n clipped: Package\n \"\"\"\n selection = self.dataset\n if \"time\" in selection:\n time = selection[\"time\"].values\n use_cftime = isinstance(time[0], cftime.datetime)\n time_start = self.__to_datetime(time_min, use_cftime)\n time_end = self.__to_datetime(time_max, use_cftime)\n\n indexer = self._clip_time_indexer(\n time=time,\n time_start=time_start,\n time_end=time_end,\n )\n\n if \"repeat_stress\" in selection.data_vars and self._valid(\n selection[\"repeat_stress\"].values[()]\n ):\n repeat_indexer, repeat_stress = self._clip_repeat_stress(\n repeat_stress=selection[\"repeat_stress\"],\n time=time,\n time_start=time_start,\n time_end=time_end,\n )\n selection = selection.drop_vars(\"repeat_stress\")\n selection[\"repeat_stress\"] = repeat_stress\n indexer = repeat_indexer.combine_first(indexer).astype(int)\n\n selection = selection.drop_vars(\"time\").isel(time=indexer)\n\n if \"layer\" in selection.coords:\n layer_slice = slice(layer_min, layer_max)\n # Cannot select if it's not a dimension!\n if \"layer\" not in selection.dims:\n selection = (\n selection.expand_dims(\"layer\")\n .sel(layer=layer_slice)\n .squeeze(\"layer\")\n )\n else:\n selection = selection.sel(layer=layer_slice)\n\n x_slice = slice(x_min, x_max)\n y_slice = slice(y_min, y_max)\n if isinstance(selection, xu.UgridDataset):\n selection = selection.ugrid.sel(x=x_slice, y=y_slice)\n elif (\"x\" in selection.coords) and (\"y\" in selection.coords):\n if selection.indexes[\"y\"].is_monotonic_decreasing:\n y_slice = slice(y_max, y_min)\n selection = selection.sel(x=x_slice, y=y_slice)\n\n cls = type(self)\n new = cls.__new__(cls)\n new.dataset = selection\n return new\n\n def mask(self, domain: GridDataArray) -> Any:\n \"\"\"\n Mask values outside of domain.\n\n Floating values outside of the condition are set to NaN (nodata).\n Integer values outside of the condition are set to 0 (inactive in\n MODFLOW terms).\n\n Parameters\n ----------\n domain: xr.DataArray of integers. Preservers values where domain is larger than 0.\n\n Returns\n -------\n masked: Package\n The package with part masked.\n \"\"\"\n masked = {}\n for var in self.dataset.data_vars.keys():\n da = self.dataset[var]\n if self.skip_masking_dataarray(var):\n masked[var] = da\n continue\n if set(domain.dims).issubset(da.dims):\n if issubclass(da.dtype.type, numbers.Integral):\n masked[var] = da.where(domain > 0, other=0)\n elif issubclass(da.dtype.type, numbers.Real):\n masked[var] = da.where(domain > 0)\n else:\n raise TypeError(\n f\"Expected dtype float or integer. Received instead: {da.dtype}\"\n )\n else:\n if da.values[()] is not None:\n if is_scalar(da.values[()]):\n masked[var] = da.values[()] # For scalars, such as options\n else:\n masked[\n var\n ] = da # For example for arrays with only a layer dimension\n else:\n masked[var] = None\n\n return type(self)(**masked)\n\n def is_regridding_supported(self) -> bool:\n \"\"\"\n returns true if package supports regridding.\n \"\"\"\n return hasattr(self, \"_regrid_method\")\n\n def get_regrid_methods(self) -> Optional[Dict[str, Tuple[RegridderType, str]]]:\n if self.is_regridding_supported():\n return self._regrid_method\n return None\n\n def _regrid_array(\n self,\n varname: str,\n regridder_collection: RegridderInstancesCollection,\n regridder_name: str,\n regridder_function: str,\n target_grid: GridDataArray,\n ) -> Optional[GridDataArray]:\n \"\"\"\n Regrids a data_array. The array is specified by its key in the dataset.\n Each data-array can represent:\n -a scalar value, valid for the whole grid\n -an array of a different scalar per layer\n -an array with a value per grid block\n -None\n \"\"\"\n\n # skip regridding for arrays with no valid values (such as \"None\")\n if not self._valid(self.dataset[varname].values[()]):\n return None\n\n # the dataarray might be a scalar. If it is, then it does not need regridding.\n if is_scalar(self.dataset[varname]):\n return self.dataset[varname].values[()]\n\n if isinstance(self.dataset[varname], xr.DataArray):\n coords = self.dataset[varname].coords\n # if it is an xr.DataArray it may be layer-based; then no regridding is needed\n if not (\"x\" in coords and \"y\" in coords):\n return self.dataset[varname]\n\n # if it is an xr.DataArray it needs the dx, dy coordinates for regridding, which are otherwise not mandatory\n if not (\"dx\" in coords and \"dy\" in coords):\n raise ValueError(\n f\"DataArray {varname} does not have both a dx and dy coordinates\"\n )\n\n # obtain an instance of a regridder for the chosen method\n regridder = regridder_collection.get_regridder(\n regridder_name,\n regridder_function,\n )\n\n # store original dtype of data\n original_dtype = self.dataset[varname].dtype\n\n # regrid data array\n regridded_array = regridder.regrid(self.dataset[varname])\n\n # reconvert the result to the same dtype as the original\n return regridded_array.astype(original_dtype)\n\n def regrid_like(\n self,\n target_grid: GridDataArray,\n regridder_types: Dict[str, Tuple[RegridderType, str]] = None,\n ) -> \"Package\":\n \"\"\"\n Creates a package of the same type as this package, based on another discretization.\n It regrids all the arrays in this package to the desired discretization, and leaves the options\n unmodified. At the moment only regridding to a different planar grid is supported, meaning\n ``target_grid`` has different ``\"x\"`` and ``\"y\"`` or different ``cell2d`` coords.\n\n The regridding methods can be specified in the _regrid_method attribute of the package. These are the defaults\n that specify how each array should be regridded. These defaults can be overridden using the input\n parameters of this function.\n\n Examples\n --------\n To regrid the npf package with a non-default method for the k-field, call regrid_like with these arguments:\n\n >>> new_npf = npf.regrid_like(like, {\"k\": (imod.RegridderType.OVERLAP, \"mean\")})\n\n\n Parameters\n ----------\n target_grid: xr.DataArray or xu.UgridDataArray\n a grid defined over the same discretization as the one we want to regrid the package to\n regridder_types: dict(str->(regridder type,str))\n dictionary mapping arraynames (str) to a tuple of regrid type (a specialization class of BaseRegridder) and function name (str)\n this dictionary can be used to override the default mapping method.\n\n Returns\n -------\n a package with the same options as this package, and with all the data-arrays regridded to another discretization,\n similar to the one used in input argument \"target_grid\"\n \"\"\"\n if not self.is_regridding_supported():\n raise NotImplementedError(\n f\"Package {type(self).__name__} does not support regridding\"\n )\n\n regridder_collection = RegridderInstancesCollection(\n self.dataset, target_grid=target_grid\n )\n\n regridder_settings = copy.deepcopy(self._regrid_method)\n if regridder_types is not None:\n regridder_settings.update(regridder_types)\n\n new_package_data = get_non_grid_data(self, list(regridder_settings.keys()))\n\n for (\n varname,\n regridder_type_and_function,\n ) in regridder_settings.items():\n regridder_name, regridder_function = regridder_type_and_function\n\n # skip variables that are not in this dataset\n if varname not in self.dataset.keys():\n continue\n\n # regrid the variable\n new_package_data[varname] = self._regrid_array(\n varname,\n regridder_collection,\n regridder_name,\n regridder_function,\n target_grid,\n )\n\n new_package = self.__class__(**new_package_data)\n\n return new_package\n\n def skip_masking_dataarray(self, array_name: str) -> bool:\n if hasattr(self, \"_skip_mask_arrays\"):\n return array_name in self._skip_mask_arrays\n return False\n\n @classmethod\n def is_grid_agnostic_package(cls) -> bool:\n return False\n\n def __repr__(self) -> str:\n typename = type(self).__name__\n return f\"{typename}\\n{self.dataset.__repr__()}\"\n\n def _repr_html_(self) -> str:\n typename = type(self).__name__\n return f\"<div>{typename}</div>{self.dataset._repr_html_()}\""
},
{
"identifier": "RegridderType",
"path": "imod/mf6/regridding_utils.py",
"snippet": "class RegridderType(Enum):\n \"\"\"\n Enumerator referring to regridder types in ``xugrid``.\n These can be used safely in scripts, remaining backwards compatible for\n when it is decided to rename regridders in ``xugrid``. For an explanation\n what each regridder type does, we refer to the `xugrid documentation <https://deltares.github.io/xugrid/examples/regridder_overview.html>`_\n \"\"\"\n\n CENTROIDLOCATOR = xu.CentroidLocatorRegridder\n BARYCENTRIC = xu.BarycentricInterpolator\n OVERLAP = xu.OverlapRegridder\n RELATIVEOVERLAP = xu.RelativeOverlapRegridder"
},
{
"identifier": "DisBottomSchema",
"path": "imod/mf6/validation.py",
"snippet": "class DisBottomSchema(NoDataComparisonSchema):\n \"\"\"\n Custom schema for the bottoms as these require some additional logic,\n because of how Modflow 6 computes cell thicknesses.\n \"\"\"\n\n def validate(self, obj: xr.DataArray, **kwargs):\n other_obj = kwargs[self.other]\n\n active = self.is_other_notnull(other_obj)\n bottom = obj\n\n # Only check for multi-layered models\n if bottom.coords[\"layer\"].size > 1:\n # Check if zero thicknesses occur in active cells. The difference across\n # layers is a \"negative thickness\"\n thickness = bottom.diff(dim=\"layer\") * -1.0\n if (thickness.where(active.isel(layer=slice(1, None))) <= 0.0).any():\n raise ValidationError(\"found thickness <= 0.0\")\n\n # To compute thicknesses properly, Modflow 6 requires bottom data in the\n # layer above the active cell in question.\n overlaying_top_inactive = np.isnan(bottom).shift(layer=1, fill_value=False)\n if (overlaying_top_inactive & active).any():\n raise ValidationError(\"inactive bottom above active cell\")"
},
{
"identifier": "WriteContext",
"path": "imod/mf6/write_context.py",
"snippet": "class WriteContext:\n \"\"\"\n This class is used in the process of writing modflow inputfiles.\n It is a container for options that are used when writing.\n\n Parameters\n ----------\n simulation_directory: Path\n The directory where the .nam file for the modflow simulation will be written\n use_binary: bool\n If True, bulk data will be written in a binary format readable by modflow. Regular package input files\n will still be rendered as text.\n use_absolute_paths: bool\n If True, paths in the modlfow inputfiles will be rendered as absoule paths on your system.\n This makes the modflow input files less portable to other systems but facilitates reading them by Flopy\n write_directory: Optional[Path] = None\n The directory where the next outputfile will be written. Users do not need to set this parameter. If not provided\n it will be set to the simulation_directrory.\n \"\"\"\n\n def __init__(\n self,\n simulation_directory: Path = \".\",\n use_binary: bool = False,\n use_absolute_paths: bool = False,\n write_directory: Optional[Union[str, Path]] = None,\n ):\n self.__simulation_directory = Path(simulation_directory)\n self.__use_binary = use_binary\n self.__use_absolute_paths = use_absolute_paths\n self.__write_directory = (\n Path(write_directory)\n if write_directory is not None\n else self.__simulation_directory\n )\n self.__is_partitioned = False\n\n def get_formatted_write_directory(self) -> Path:\n \"\"\"\n This method returns a path that is absolute or relative in agreement with the use_absolute_paths setting.\n This is usefull when the path will be written to a modflow input file. If it is not absolute, it will\n be relative to the simulation directory, which makes it usable by MF6.\n \"\"\"\n if self.use_absolute_paths:\n return self.__write_directory\n return Path(relpath(self.write_directory, self.__simulation_directory))\n\n def copy_with_new_write_directory(self, new_write_directory: Path) -> WriteContext:\n new_context = deepcopy(self)\n new_context.__write_directory = Path(new_write_directory)\n return new_context\n\n @property\n def simulation_directory(self) -> Path:\n return self.__simulation_directory\n\n @property\n def use_binary(self) -> bool:\n return self.__use_binary\n\n @use_binary.setter\n def use_binary(self, value) -> None:\n self.__use_binary = value\n\n @property\n def use_absolute_paths(self) -> bool:\n return self.__use_absolute_paths\n\n @property\n def write_directory(self) -> Path:\n return self.__write_directory\n\n @property\n def root_directory(self) -> Path:\n \"\"\"\n returns the simulation directory, or nothing, depending on use_absolute_paths; use this to compose paths\n that are in agreement with the use_absolute_paths setting.\n \"\"\"\n if self.use_absolute_paths:\n return self.__simulation_directory\n else:\n return Path(\"\")\n\n @property\n def is_partitioned(self) -> bool:\n return self.__is_partitioned\n\n @is_partitioned.setter\n def is_partitioned(self, value: bool) -> None:\n self.__is_partitioned = value"
},
{
"identifier": "AllValueSchema",
"path": "imod/schemata.py",
"snippet": "class AllValueSchema(ValueSchema):\n \"\"\"\n Validate whether all values pass a condition.\n\n E.g. if operator is \">\":\n\n assert (values > threshold).all()\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n if isinstance(self.other, str):\n other_obj = kwargs[self.other]\n else:\n other_obj = self.other\n\n if scalar_None(obj) or scalar_None(other_obj):\n return\n\n explicitly_ignored = self.get_explicitly_ignored(kwargs)\n\n ignore = (\n np.isnan(obj) | np.isnan(other_obj) | explicitly_ignored\n ) # ignore nan by setting to True\n\n condition = self.operator(obj, other_obj)\n condition = condition | ignore\n if not condition.all():\n raise ValidationError(\n f\"not all values comply with criterion: {self.operator_str} {self.other}\"\n )"
},
{
"identifier": "AnyValueSchema",
"path": "imod/schemata.py",
"snippet": "class AnyValueSchema(ValueSchema):\n \"\"\"\n Validate whether any value passes a condition.\n\n E.g. if operator is \">\":\n\n assert (values > threshold).any()\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n if isinstance(self.other, str):\n other_obj = kwargs[self.other]\n else:\n other_obj = self.other\n\n if scalar_None(obj) or scalar_None(other_obj):\n return\n\n explicitly_ignored = self.get_explicitly_ignored(kwargs)\n\n ignore = (\n ~np.isnan(obj) | ~np.isnan(other_obj) | explicitly_ignored\n ) # ignore nan by setting to False\n\n condition = self.operator(obj, other_obj)\n condition = condition | ignore\n if not condition.any():\n raise ValidationError(\n f\"not a single value complies with criterion: {self.operator_str} {self.other}\"\n )"
},
{
"identifier": "DimsSchema",
"path": "imod/schemata.py",
"snippet": "class DimsSchema(BaseSchema):\n def __init__(self, *dims: DimsT) -> None:\n self.dims = dims\n\n def _fill_in_face_dim(self, obj: Union[xr.DataArray, xu.UgridDataArray]):\n \"\"\"\n Return dims with a filled in face dim if necessary.\n \"\"\"\n if \"{face_dim}\" in self.dims and isinstance(obj, xu.UgridDataArray):\n return tuple(\n (\n obj.ugrid.grid.face_dimension if i == \"{face_dim}\" else i\n for i in self.dims\n )\n )\n elif \"{edge_dim}\" in self.dims and isinstance(obj, xu.UgridDataArray):\n return tuple(\n (\n obj.ugrid.grid.edge_dimension if i == \"{edge_dim}\" else i\n for i in self.dims\n )\n )\n else:\n return self.dims\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:\n \"\"\"Validate dimensions\n Parameters\n ----------\n dims : Tuple[Union[str, None]]\n Dimensions of the DataArray. `None` may be used as a wildcard value.\n \"\"\"\n dims = self._fill_in_face_dim(obj)\n # Force to tuple for error message print\n expected = tuple(dims)\n actual = tuple(obj.dims)\n if actual != expected:\n raise ValidationError(f\"dim mismatch: expected {expected}, got {actual}\")"
},
{
"identifier": "DTypeSchema",
"path": "imod/schemata.py",
"snippet": "class DTypeSchema(BaseSchema):\n def __init__(self, dtype: DTypeLike) -> None:\n if dtype in [\n np.floating,\n np.integer,\n np.signedinteger,\n np.unsignedinteger,\n np.generic,\n ]:\n self.dtype = dtype\n else:\n self.dtype = np.dtype(dtype)\n\n def validate(self, obj: xr.DataArray, **kwargs) -> None:\n \"\"\"\n Validate dtype\n\n Parameters\n ----------\n dtype : Any\n Dtype of the DataArray.\n \"\"\"\n if scalar_None(obj):\n return\n\n if not np.issubdtype(obj.dtype, self.dtype):\n raise ValidationError(f\"dtype {obj.dtype} != {self.dtype}\")"
},
{
"identifier": "IdentityNoDataSchema",
"path": "imod/schemata.py",
"snippet": "class IdentityNoDataSchema(NoDataComparisonSchema):\n \"\"\"\n Checks that the NoData values are located at exactly the same locations.\n\n Tests only if if all dimensions of the other object are present in the\n object. So tests if \"stage\" with `{time, layer, y, x}` compared to \"idomain\"\n `{layer, y, x}` but doesn't test if \"k\" with `{layer}` is comperated to\n \"idomain\" `{layer, y, x}`\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n other_obj = kwargs[self.other]\n\n # Only test if object has all dimensions in other object.\n missing_dims = set(other_obj.dims) - set(obj.dims)\n\n if len(missing_dims) == 0:\n valid = self.is_notnull(obj)\n other_valid = self.is_other_notnull(other_obj)\n if (valid ^ other_valid).any():\n raise ValidationError(f\"nodata is not aligned with {self.other}\")"
},
{
"identifier": "IndexesSchema",
"path": "imod/schemata.py",
"snippet": "class IndexesSchema(EmptyIndexesSchema):\n \"\"\"\n Verify indexes, check if no dims with zero size are included and that\n indexes are monotonic. Skips unstructured grid dimensions.\n \"\"\"\n\n def __init__(self) -> None:\n pass\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:\n # Test if indexes all empty\n super().validate(obj)\n\n dims_to_validate = self.get_dims_to_validate(obj)\n\n for dim in dims_to_validate:\n if dim == \"y\":\n if not obj.indexes[dim].is_monotonic_decreasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically decreasing\"\n )\n\n else:\n if not obj.indexes[dim].is_monotonic_increasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically increasing\"\n )"
}
] | import numpy as np
import pandas as pd
from imod.mf6.package import Package
from imod.mf6.regridding_utils import RegridderType
from imod.mf6.validation import DisBottomSchema
from imod.mf6.write_context import WriteContext
from imod.schemata import (
AllValueSchema,
AnyValueSchema,
DimsSchema,
DTypeSchema,
IdentityNoDataSchema,
IndexesSchema,
) | 9,518 |
class VerticesDiscretization(Package):
"""
Discretization by Vertices (DISV).
Parameters
----------
top: array of floats (xu.UgridDataArray)
bottom: array of floats (xu.UgridDataArray)
idomain: array of integers (xu.UgridDataArray)
validate: {True, False}
Flag to indicate whether the package should be validated upon
initialization. This raises a ValidationError if package input is
provided in the wrong manner. Defaults to True.
"""
_pkg_id = "disv"
_init_schemata = {
"top": [
|
class VerticesDiscretization(Package):
"""
Discretization by Vertices (DISV).
Parameters
----------
top: array of floats (xu.UgridDataArray)
bottom: array of floats (xu.UgridDataArray)
idomain: array of integers (xu.UgridDataArray)
validate: {True, False}
Flag to indicate whether the package should be validated upon
initialization. This raises a ValidationError if package input is
provided in the wrong manner. Defaults to True.
"""
_pkg_id = "disv"
_init_schemata = {
"top": [ | DTypeSchema(np.floating), | 7 | 2023-12-08 13:57:59+00:00 | 12k |
Dong142857/Live3DPortrait | models/eg3d/superresolution.py | [
{
"identifier": "Conv2dLayer",
"path": "models/eg3d/networks_stylegan2.py",
"snippet": "class Conv2dLayer(torch.nn.Module):\n def __init__(self,\n in_channels, # Number of input channels.\n out_channels, # Number of output channels.\n kernel_size, # Width and height of the convolution kernel.\n bias = True, # Apply additive bias before the activation function?\n activation = 'linear', # Activation function: 'relu', 'lrelu', etc.\n up = 1, # Integer upsampling factor.\n down = 1, # Integer downsampling factor.\n resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.\n conv_clamp = None, # Clamp the output to +-X, None = disable clamping.\n channels_last = False, # Expect the input to have memory_format=channels_last?\n trainable = True, # Update the weights of this layer during training?\n ):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.activation = activation\n self.up = up\n self.down = down\n self.conv_clamp = conv_clamp\n self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))\n self.padding = kernel_size // 2\n self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))\n self.act_gain = bias_act.activation_funcs[activation].def_gain\n\n memory_format = torch.channels_last if channels_last else torch.contiguous_format\n weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)\n bias = torch.zeros([out_channels]) if bias else None\n if trainable:\n self.weight = torch.nn.Parameter(weight)\n self.bias = torch.nn.Parameter(bias) if bias is not None else None\n else:\n self.register_buffer('weight', weight)\n if bias is not None:\n self.register_buffer('bias', bias)\n else:\n self.bias = None\n\n def forward(self, x, gain=1):\n w = self.weight * self.weight_gain\n b = self.bias.to(x.dtype) if self.bias is not None else None\n flip_weight = (self.up == 1) # slightly faster\n x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)\n\n act_gain = self.act_gain * gain\n act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None\n x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)\n return x\n\n def extra_repr(self):\n return ' '.join([\n f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',\n f'up={self.up}, down={self.down}'])"
},
{
"identifier": "SynthesisLayer",
"path": "models/eg3d/networks_stylegan2.py",
"snippet": "class SynthesisLayer(torch.nn.Module):\n def __init__(self,\n in_channels, # Number of input channels.\n out_channels, # Number of output channels.\n w_dim, # Intermediate latent (W) dimensionality.\n resolution, # Resolution of this layer.\n kernel_size = 3, # Convolution kernel size.\n up = 1, # Integer upsampling factor.\n use_noise = True, # Enable noise input?\n activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.\n resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.\n conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.\n channels_last = False, # Use channels_last format for the weights?\n ):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.w_dim = w_dim\n self.resolution = resolution\n self.up = up\n self.use_noise = use_noise\n self.activation = activation\n self.conv_clamp = conv_clamp\n self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))\n self.padding = kernel_size // 2\n self.act_gain = bias_act.activation_funcs[activation].def_gain\n\n self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)\n memory_format = torch.channels_last if channels_last else torch.contiguous_format\n self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))\n if use_noise:\n self.register_buffer('noise_const', torch.randn([resolution, resolution]))\n self.noise_strength = torch.nn.Parameter(torch.zeros([]))\n self.bias = torch.nn.Parameter(torch.zeros([out_channels]))\n\n def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):\n assert noise_mode in ['random', 'const', 'none']\n in_resolution = self.resolution // self.up\n misc.assert_shape(x, [None, self.in_channels, in_resolution, in_resolution])\n styles = self.affine(w)\n\n noise = None\n if self.use_noise and noise_mode == 'random':\n noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength\n if self.use_noise and noise_mode == 'const':\n noise = self.noise_const * self.noise_strength\n\n flip_weight = (self.up == 1) # slightly faster\n x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,\n padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)\n\n act_gain = self.act_gain * gain\n act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None\n x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)\n return x\n\n def extra_repr(self):\n return ' '.join([\n f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',\n f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])"
},
{
"identifier": "ToRGBLayer",
"path": "models/eg3d/networks_stylegan2.py",
"snippet": "class ToRGBLayer(torch.nn.Module):\n def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.w_dim = w_dim\n self.conv_clamp = conv_clamp\n self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)\n memory_format = torch.channels_last if channels_last else torch.contiguous_format\n self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))\n self.bias = torch.nn.Parameter(torch.zeros([out_channels]))\n self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))\n\n def forward(self, x, w, fused_modconv=True):\n styles = self.affine(w) * self.weight_gain\n x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv)\n x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)\n return x\n\n def extra_repr(self):\n return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'"
},
{
"identifier": "upfirdn2d",
"path": "torch_utils/ops/upfirdn2d.py",
"snippet": "def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):\n r\"\"\"Pad, upsample, filter, and downsample a batch of 2D images.\n\n Performs the following sequence of operations for each channel:\n\n 1. Upsample the image by inserting N-1 zeros after each pixel (`up`).\n\n 2. Pad the image with the specified number of zeros on each side (`padding`).\n Negative padding corresponds to cropping the image.\n\n 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it\n so that the footprint of all output pixels lies within the input image.\n\n 4. Downsample the image by keeping every Nth pixel (`down`).\n\n This sequence of operations bears close resemblance to scipy.signal.upfirdn().\n The fused op is considerably more efficient than performing the same calculation\n using standard PyTorch ops. It supports gradients of arbitrary order.\n\n Args:\n x: Float32/float64/float16 input tensor of the shape\n `[batch_size, num_channels, in_height, in_width]`.\n f: Float32 FIR filter of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable), or\n `None` (identity).\n up: Integer upsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n down: Integer downsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n padding: Padding with respect to the upsampled image. Can be a single number\n or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n flip_filter: False = convolution, True = correlation (default: False).\n gain: Overall scaling factor for signal magnitude (default: 1).\n impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n assert isinstance(x, torch.Tensor)\n assert impl in ['ref', 'cuda']\n if impl == 'cuda' and x.device.type == 'cuda' and _init():\n return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)\n return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)"
},
{
"identifier": "persistence",
"path": "torch_utils/persistence.py",
"snippet": "def persistent_class(orig_class):\n def __init__(self, *args, **kwargs):\n def init_args(self):\n def init_kwargs(self):\n def __reduce__(self):\ndef is_persistent(obj):\ndef import_hook(hook):\ndef _reconstruct_persistent_obj(meta):\ndef _module_to_src(module):\ndef _src_to_module(src):\ndef _check_pickleable(obj):\n def recurse(obj):\n class Decorator(orig_class):"
},
{
"identifier": "misc",
"path": "torch_utils/misc.py",
"snippet": "def constant(value, shape=None, dtype=None, device=None, memory_format=None):\n def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin\ndef suppress_tracer_warnings():\ndef assert_shape(tensor, ref_shape):\ndef profiled_function(fn):\n def decorator(*args, **kwargs):\n def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):\n def __iter__(self):\ndef params_and_buffers(module):\ndef named_params_and_buffers(module):\ndef copy_params_and_buffers(src_module, dst_module, require_all=False):\ndef ddp_sync(module, sync):\ndef check_ddp_consistency(module, ignore_regex=None):\ndef print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):\n def pre_hook(_mod, _inputs):\n def post_hook(mod, _inputs, outputs):\nclass InfiniteSampler(torch.utils.data.Sampler):"
},
{
"identifier": "SynthesisBlock",
"path": "models/eg3d/networks_stylegan2.py",
"snippet": "class SynthesisBlock(torch.nn.Module):\n def __init__(self,\n in_channels, # Number of input channels, 0 = first block.\n out_channels, # Number of output channels.\n w_dim, # Intermediate latent (W) dimensionality.\n resolution, # Resolution of this block.\n img_channels, # Number of output color channels.\n is_last, # Is this the last block?\n architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.\n resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.\n conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.\n use_fp16 = False, # Use FP16 for this block?\n fp16_channels_last = False, # Use channels-last memory format with FP16?\n fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training.\n **layer_kwargs, # Arguments for SynthesisLayer.\n ):\n assert architecture in ['orig', 'skip', 'resnet']\n super().__init__()\n self.in_channels = in_channels\n self.w_dim = w_dim\n self.resolution = resolution\n self.img_channels = img_channels\n self.is_last = is_last\n self.architecture = architecture\n self.use_fp16 = use_fp16\n self.channels_last = (use_fp16 and fp16_channels_last)\n self.fused_modconv_default = fused_modconv_default\n self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))\n self.num_conv = 0\n self.num_torgb = 0\n\n if in_channels == 0:\n self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))\n\n if in_channels != 0:\n self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,\n resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)\n self.num_conv += 1\n\n self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,\n conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)\n self.num_conv += 1\n\n if is_last or architecture == 'skip':\n self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,\n conv_clamp=conv_clamp, channels_last=self.channels_last)\n self.num_torgb += 1\n\n if in_channels != 0 and architecture == 'resnet':\n self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,\n resample_filter=resample_filter, channels_last=self.channels_last)\n\n def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):\n _ = update_emas # unused\n misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])\n w_iter = iter(ws.unbind(dim=1))\n if ws.device.type != 'cuda':\n force_fp32 = True\n dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32\n memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format\n if fused_modconv is None:\n fused_modconv = self.fused_modconv_default\n if fused_modconv == 'inference_only':\n fused_modconv = (not self.training)\n\n # Input.\n if self.in_channels == 0:\n x = self.const.to(dtype=dtype, memory_format=memory_format)\n x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])\n else:\n misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])\n x = x.to(dtype=dtype, memory_format=memory_format)\n\n # Main layers.\n if self.in_channels == 0:\n x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)\n elif self.architecture == 'resnet':\n y = self.skip(x, gain=np.sqrt(0.5))\n x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)\n x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)\n x = y.add_(x)\n else:\n x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)\n x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)\n\n # ToRGB.\n if img is not None:\n misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])\n img = upfirdn2d.upsample2d(img, self.resample_filter)\n if self.is_last or self.architecture == 'skip':\n y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)\n y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)\n img = img.add_(y) if img is not None else y\n\n assert x.dtype == dtype\n assert img is None or img.dtype == torch.float32\n return x, img\n\n def extra_repr(self):\n return f'resolution={self.resolution:d}, architecture={self.architecture:s}'"
},
{
"identifier": "SynthesisLayer",
"path": "models/eg3d/networks_stylegan3.py",
"snippet": "class SynthesisLayer(torch.nn.Module):\n def __init__(self,\n w_dim, # Intermediate latent (W) dimensionality.\n is_torgb, # Is this the final ToRGB layer?\n is_critically_sampled, # Does this layer use critical sampling?\n use_fp16, # Does this layer use FP16?\n\n # Input & output specifications.\n in_channels, # Number of input channels.\n out_channels, # Number of output channels.\n in_size, # Input spatial size: int or [width, height].\n out_size, # Output spatial size: int or [width, height].\n in_sampling_rate, # Input sampling rate (s).\n out_sampling_rate, # Output sampling rate (s).\n in_cutoff, # Input cutoff frequency (f_c).\n out_cutoff, # Output cutoff frequency (f_c).\n in_half_width, # Input transition band half-width (f_h).\n out_half_width, # Output Transition band half-width (f_h).\n\n # Hyperparameters.\n conv_kernel = 3, # Convolution kernel size. Ignored for final the ToRGB layer.\n filter_size = 6, # Low-pass filter size relative to the lower resolution when up/downsampling.\n lrelu_upsampling = 2, # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer.\n use_radial_filters = False, # Use radially symmetric downsampling filter? Ignored for critically sampled layers.\n conv_clamp = 256, # Clamp the output to [-X, +X], None = disable clamping.\n magnitude_ema_beta = 0.999, # Decay rate for the moving average of input magnitudes.\n ):\n super().__init__()\n self.w_dim = w_dim\n self.is_torgb = is_torgb\n self.is_critically_sampled = is_critically_sampled\n self.use_fp16 = use_fp16\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.in_size = np.broadcast_to(np.asarray(in_size), [2])\n self.out_size = np.broadcast_to(np.asarray(out_size), [2])\n self.in_sampling_rate = in_sampling_rate\n self.out_sampling_rate = out_sampling_rate\n self.tmp_sampling_rate = max(in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling)\n self.in_cutoff = in_cutoff\n self.out_cutoff = out_cutoff\n self.in_half_width = in_half_width\n self.out_half_width = out_half_width\n self.conv_kernel = 1 if is_torgb else conv_kernel\n self.conv_clamp = conv_clamp\n self.magnitude_ema_beta = magnitude_ema_beta\n\n # Setup parameters and buffers.\n self.affine = FullyConnectedLayer(self.w_dim, self.in_channels, bias_init=1)\n self.weight = torch.nn.Parameter(torch.randn([self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel]))\n self.bias = torch.nn.Parameter(torch.zeros([self.out_channels]))\n self.register_buffer('magnitude_ema', torch.ones([]))\n\n # Design upsampling filter.\n self.up_factor = int(np.rint(self.tmp_sampling_rate / self.in_sampling_rate))\n assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate\n self.up_taps = filter_size * self.up_factor if self.up_factor > 1 and not self.is_torgb else 1\n self.register_buffer('up_filter', self.design_lowpass_filter(\n numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate))\n\n # Design downsampling filter.\n self.down_factor = int(np.rint(self.tmp_sampling_rate / self.out_sampling_rate))\n assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate\n self.down_taps = filter_size * self.down_factor if self.down_factor > 1 and not self.is_torgb else 1\n self.down_radial = use_radial_filters and not self.is_critically_sampled\n self.register_buffer('down_filter', self.design_lowpass_filter(\n numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial))\n\n # Compute padding.\n pad_total = (self.out_size - 1) * self.down_factor + 1 # Desired output size before downsampling.\n pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor # Input size after upsampling.\n pad_total += self.up_taps + self.down_taps - 2 # Size reduction caused by the filters.\n pad_lo = (pad_total + self.up_factor) // 2 # Shift sample locations according to the symmetric interpretation (Appendix C.3).\n pad_hi = pad_total - pad_lo\n self.padding = [int(pad_lo[0]), int(pad_hi[0]), int(pad_lo[1]), int(pad_hi[1])]\n\n def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False):\n assert noise_mode in ['random', 'const', 'none'] # unused\n misc.assert_shape(x, [None, self.in_channels, int(self.in_size[1]), int(self.in_size[0])])\n misc.assert_shape(w, [x.shape[0], self.w_dim])\n\n # Track input magnitude.\n if update_emas:\n with torch.autograd.profiler.record_function('update_magnitude_ema'):\n magnitude_cur = x.detach().to(torch.float32).square().mean()\n self.magnitude_ema.copy_(magnitude_cur.lerp(self.magnitude_ema, self.magnitude_ema_beta))\n input_gain = self.magnitude_ema.rsqrt()\n\n # Execute affine layer.\n styles = self.affine(w)\n if self.is_torgb:\n weight_gain = 1 / np.sqrt(self.in_channels * (self.conv_kernel ** 2))\n styles = styles * weight_gain\n\n # Execute modulated conv2d.\n dtype = torch.float16 if (self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32\n x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles,\n padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain)\n\n # Execute bias, filtered leaky ReLU, and clamping.\n gain = 1 if self.is_torgb else np.sqrt(2)\n slope = 1 if self.is_torgb else 0.2\n x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype),\n up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp)\n\n # Ensure correct shape and dtype.\n misc.assert_shape(x, [None, self.out_channels, int(self.out_size[1]), int(self.out_size[0])])\n assert x.dtype == dtype\n return x\n\n @staticmethod\n def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False):\n assert numtaps >= 1\n\n # Identity filter.\n if numtaps == 1:\n return None\n\n # Separable Kaiser low-pass filter.\n if not radial:\n f = scipy.signal.firwin(numtaps=numtaps, cutoff=cutoff, width=width, fs=fs)\n return torch.as_tensor(f, dtype=torch.float32)\n\n # Radially symmetric jinc-based filter.\n x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs\n r = np.hypot(*np.meshgrid(x, x))\n f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r)\n beta = scipy.signal.kaiser_beta(scipy.signal.kaiser_atten(numtaps, width / (fs / 2)))\n w = np.kaiser(numtaps, beta)\n f *= np.outer(w, w)\n f /= np.sum(f)\n return torch.as_tensor(f, dtype=torch.float32)\n\n def extra_repr(self):\n return '\\n'.join([\n f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},',\n f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},',\n f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},',\n f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},',\n f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},',\n f'in_size={list(self.in_size)}, out_size={list(self.out_size)},',\n f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}'])"
}
] | import torch
import numpy as np
from models.eg3d.networks_stylegan2 import Conv2dLayer, SynthesisLayer, ToRGBLayer
from torch_utils.ops import upfirdn2d
from torch_utils import persistence
from torch_utils import misc
from models.eg3d.networks_stylegan2 import SynthesisBlock
from models.eg3d.networks_stylegan3 import SynthesisLayer as AFSynthesisLayer | 8,988 | x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# for 128 x 128 generation
@persistence.persistent_class
class SuperresolutionHybrid2X(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 128
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 64
self.sr_antialias = sr_antialias
self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=64,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=128,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] != self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# TODO: Delete (here for backwards compatibility with old 256x256 models)
@persistence.persistent_class
class SuperresolutionHybridDeepfp32(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 256
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 128
self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=128,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=256,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] < self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlockNoUp(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.fused_modconv_default = fused_modconv_default
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if is_last or architecture == 'skip':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet':
| # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Superresolution network architectures from the paper
"Efficient Geometry-aware 3D Generative Adversarial Networks"."""
#----------------------------------------------------------------------------
# for 512x512 generation
@persistence.persistent_class
class SuperresolutionHybrid8X(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 512
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 128
self.sr_antialias = sr_antialias
self.block0 = SynthesisBlock(channels, 128, w_dim=512, resolution=256,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=512,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] != self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# for 256x256 generation
@persistence.persistent_class
class SuperresolutionHybrid4X(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 256
use_fp16 = sr_num_fp16_res > 0
self.sr_antialias = sr_antialias
self.input_resolution = 128
self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=128,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=256,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] < self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# for 128 x 128 generation
@persistence.persistent_class
class SuperresolutionHybrid2X(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 128
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 64
self.sr_antialias = sr_antialias
self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=64,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=128,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] != self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# TODO: Delete (here for backwards compatibility with old 256x256 models)
@persistence.persistent_class
class SuperresolutionHybridDeepfp32(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 256
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 128
self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=128,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=256,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] < self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlockNoUp(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.fused_modconv_default = fused_modconv_default
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if is_last or architecture == 'skip':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet': | self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2, | 0 | 2023-12-09 15:18:53+00:00 | 12k |
blaise-tk/RVC_CLI | rvc/infer/infer.py | [
{
"identifier": "load_audio",
"path": "rvc/lib/utils.py",
"snippet": "def load_audio(file, sampling_rate):\n try:\n file = file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n out, _ = (\n ffmpeg.input(file, threads=0)\n .output(\"-\", format=\"f32le\", acodec=\"pcm_f32le\", ac=1, ar=sampling_rate)\n .run(cmd=[\"ffmpeg\", \"-nostdin\"], capture_stdout=True, capture_stderr=True)\n )\n except Exception as error:\n raise RuntimeError(f\"Failed to load audio: {error}\")\n\n return np.frombuffer(out, np.float32).flatten()"
},
{
"identifier": "SynthesizerTrnMs256NSFsid",
"path": "rvc/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n pitchf: torch.Tensor,\n y: torch.Tensor,\n y_lengths: torch.Tensor,\n ds: Optional[torch.Tensor] = None,\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n assert isinstance(rate, torch.Tensor)\n head = int(z_p.shape[2] * (1 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs256NSFsid_nono",
"path": "rvc/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs768NSFsid",
"path": "rvc/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs768NSFsid_nono",
"path": "rvc/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "Config",
"path": "rvc/configs/config.py",
"snippet": "class Config:\n def __init__(self):\n self.device = \"cuda:0\"\n self.is_half = True\n self.use_jit = False\n self.n_cpu = 0\n self.gpu_name = None\n self.json_config = self.load_config_json()\n self.gpu_mem = None\n self.instead = \"\"\n self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()\n\n @staticmethod\n def load_config_json() -> dict:\n d = {}\n for config_file in version_config_list:\n with open(f\"rvc/configs/{config_file}\", \"r\") as f:\n d[config_file] = json.load(f)\n return d\n\n @staticmethod\n def has_mps() -> bool:\n if not torch.backends.mps.is_available():\n return False\n try:\n torch.zeros(1).to(torch.device(\"mps\"))\n return True\n except Exception:\n return False\n\n @staticmethod\n def has_xpu() -> bool:\n if hasattr(torch, \"xpu\") and torch.xpu.is_available():\n return True\n else:\n return False\n\n def use_fp32_config(self):\n for config_file in version_config_list:\n self.json_config[config_file][\"train\"][\"fp16_run\"] = False\n with open(f\"rvc/configs/{config_file}\", \"r\") as f:\n strr = f.read().replace(\"true\", \"false\")\n with open(f\"rvc/configs/{config_file}\", \"w\") as f:\n f.write(strr)\n with open(\"rvc/train/preprocess/preprocess.py\", \"r\") as f:\n strr = f.read().replace(\"3.7\", \"3.0\")\n with open(\"rvc/train/preprocess/preprocess.py\", \"w\") as f:\n f.write(strr)\n\n def device_config(self) -> tuple:\n if torch.cuda.is_available():\n if self.has_xpu():\n self.device = self.instead = \"xpu:0\"\n self.is_half = True\n i_device = int(self.device.split(\":\")[-1])\n self.gpu_name = torch.cuda.get_device_name(i_device)\n if (\n (\"16\" in self.gpu_name and \"V100\" not in self.gpu_name.upper())\n or \"P40\" in self.gpu_name.upper()\n or \"P10\" in self.gpu_name.upper()\n or \"1060\" in self.gpu_name\n or \"1070\" in self.gpu_name\n or \"1080\" in self.gpu_name\n ):\n self.is_half = False\n self.use_fp32_config()\n self.gpu_mem = int(\n torch.cuda.get_device_properties(i_device).total_memory\n / 1024\n / 1024\n / 1024\n + 0.4\n )\n if self.gpu_mem <= 4:\n with open(\"rvc/train/preprocess/preprocess.py\", \"r\") as f:\n strr = f.read().replace(\"3.7\", \"3.0\")\n with open(\"rvc/train/preprocess/preprocess.py\", \"w\") as f:\n f.write(strr)\n elif self.has_mps():\n print(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"mps\"\n self.is_half = False\n self.use_fp32_config()\n else:\n print(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"cpu\"\n self.is_half = False\n self.use_fp32_config()\n\n if self.n_cpu == 0:\n self.n_cpu = cpu_count()\n\n if self.is_half:\n x_pad = 3\n x_query = 10\n x_center = 60\n x_max = 65\n else:\n x_pad = 1\n x_query = 6\n x_center = 38\n x_max = 41\n\n if self.gpu_mem is not None and self.gpu_mem <= 4:\n x_pad = 1\n x_query = 5\n x_center = 30\n x_max = 32\n\n return x_pad, x_query, x_center, x_max"
}
] | import os
import sys
import torch
import numpy as np
import soundfile as sf
from vc_infer_pipeline import VC
from rvc.lib.utils import load_audio
from fairseq import checkpoint_utils
from rvc.lib.infer_pack.models import (
SynthesizerTrnMs256NSFsid,
SynthesizerTrnMs256NSFsid_nono,
SynthesizerTrnMs768NSFsid,
SynthesizerTrnMs768NSFsid_nono,
)
from rvc.configs.config import Config | 7,334 |
config = Config()
torch.manual_seed(114514)
hubert_model = None
def load_hubert():
global hubert_model
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
["hubert_base.pt"],
suffix="",
)
hubert_model = models[0]
hubert_model = hubert_model.to(config.device)
if config.is_half:
hubert_model = hubert_model.half()
else:
hubert_model = hubert_model.float()
hubert_model.eval()
def vc_single(
sid=0,
input_audio_path=None,
f0_up_key=None,
f0_file=None,
f0_method=None,
file_index=None,
index_rate=None,
resample_sr=0,
rms_mix_rate=1,
protect=0.33,
hop_length=None,
output_path=None,
):
global tgt_sr, net_g, vc, hubert_model, version
if input_audio_path is None:
return "Please, load an audio!", None
f0_up_key = int(f0_up_key)
try:
|
config = Config()
torch.manual_seed(114514)
hubert_model = None
def load_hubert():
global hubert_model
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
["hubert_base.pt"],
suffix="",
)
hubert_model = models[0]
hubert_model = hubert_model.to(config.device)
if config.is_half:
hubert_model = hubert_model.half()
else:
hubert_model = hubert_model.float()
hubert_model.eval()
def vc_single(
sid=0,
input_audio_path=None,
f0_up_key=None,
f0_file=None,
f0_method=None,
file_index=None,
index_rate=None,
resample_sr=0,
rms_mix_rate=1,
protect=0.33,
hop_length=None,
output_path=None,
):
global tgt_sr, net_g, vc, hubert_model, version
if input_audio_path is None:
return "Please, load an audio!", None
f0_up_key = int(f0_up_key)
try: | audio = load_audio(input_audio_path, 16000) | 0 | 2023-12-10 21:09:41+00:00 | 12k |
Opt-Mucca/PySCIPOpt-ML | src/pyscipopt_ml/modelling/gradient_boosting/aggregate_tree_model.py | [
{
"identifier": "add_decision_tree_classifier_constr",
"path": "src/pyscipopt_ml/sklearn/decision_tree.py",
"snippet": "def add_decision_tree_classifier_constr(\n scip_model,\n decision_tree_classifier,\n input_vars,\n output_vars=None,\n unique_naming_prefix=\"\",\n epsilon=0.0,\n **kwargs,\n):\n \"\"\"Formulate decision_tree_classifier into a SCIP Model.\n\n The formulation predicts the values of output_vars using input_vars\n according to decision_tree_classifier.\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n decision_tree_classifier : :external+sklearn:py:class:`sklearn.tree.DecisionTreeClassifier`\n The decision tree classifier to insert as predictor.\n input_vars : list or np.ndarray\n Decision variables used as input for decision tree in model.\n output_vars : list or np.ndarray, optional\n Decision variables used as output for decision tree in model.\n unique_naming_prefix : str, optional\n A unique naming prefix that is used before all variable and constraint names. This parameter is important if\n the SCIP model is later printed to file and many predictors are added to the same SCIP model.\n epsilon : float, optional\n Small value used to impose strict inequalities for splitting nodes in\n MIP formulations.\n Returns\n -------\n DecisionTreeClassifierConstr\n Object containing information about what was added to scip_model to formulate decision_tree_classifier\n\n Note\n ----\n\n |VariablesDimensionsWarn|\n\n Warning\n -------\n\n Although decision trees with multiple outputs are tested they were never\n used in a non-trivial optimization model. It should be used with care at\n this point.\n \"\"\"\n return DecisionTreeConstr(\n scip_model,\n decision_tree_classifier,\n input_vars,\n output_vars,\n unique_naming_prefix,\n epsilon,\n True,\n **kwargs,\n )"
},
{
"identifier": "add_decision_tree_regressor_constr",
"path": "src/pyscipopt_ml/sklearn/decision_tree.py",
"snippet": "def add_decision_tree_regressor_constr(\n scip_model,\n decision_tree_regressor,\n input_vars,\n output_vars=None,\n unique_naming_prefix=\"\",\n epsilon=0.0,\n **kwargs,\n):\n \"\"\"Formulate decision_tree_regressor into a SCIP Model.\n\n The formulation predicts the values of output_vars using input_vars\n according to decision_tree_regressor.\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n decision_tree_regressor : :external+sklearn:py:class:`sklearn.tree.DecisionTreeRegressor`\n The decision tree regressor to insert as predictor.\n input_vars : list or np.ndarray\n Decision variables used as input for decision tree in model.\n output_vars : list or np.ndarray, optional\n Decision variables used as output for decision tree in model.\n unique_naming_prefix : str, optional\n A unique naming prefix that is used before all variable and constraint names. This parameter is important if\n the SCIP model is later printed to file and many predictors are added to the same SCIP model.\n epsilon : float, optional\n Small value used to impose strict inequalities for splitting nodes in\n MIP formulations.\n Returns\n -------\n DecisionTreeRegressorConstr\n Object containing information about what was added to scip_model to formulate decision_tree_regressor\n\n Note\n ----\n\n |VariablesDimensionsWarn|\n \"\"\"\n return DecisionTreeConstr(\n scip_model,\n decision_tree_regressor,\n input_vars,\n output_vars,\n unique_naming_prefix,\n epsilon,\n False,\n **kwargs,\n )"
},
{
"identifier": "AbstractPredictorConstr",
"path": "src/pyscipopt_ml/modelling/base_predictor_constraint.py",
"snippet": "class AbstractPredictorConstr(ABC):\n \"\"\"Base class to store all information of embedded ML model by :py:func`pyscipopt_ml.add_predictor_constr`.\n\n This class is the base class to store everything that is added to\n a SCIP model when a trained predictor is inserted into it. Depending on\n the type of the predictor, a class derived from it will be returned\n by :py:func:`pyscipopt_ml.add_predictor_constr`.\n\n Warning\n -------\n\n Users should usually never construct objects of this class or one of its derived\n classes. They are returned by the :py:func:`pyscipopt_ml.add_predictor_constr` and\n other functions.\n \"\"\"\n\n def __init__(\n self, scip_model, input_vars, output_vars=None, unique_naming_prefix=\"\", **kwargs\n ):\n self.scip_model = scip_model\n self.unique_naming_prefix = unique_naming_prefix\n self._validate(input_vars, output_vars)\n self._created_vars = []\n self._created_cons = []\n self._build_predictor_model(**kwargs)\n\n def _validate(self, input_vars, output_vars=None):\n \"\"\"Validate input and output variables (check shapes, reshape if needed).\"\"\"\n\n # Ensure the correct type of input and output is given\n if type(input_vars) not in [list, np.ndarray]:\n raise ParameterError(\n f\"Input variables are not type list or np.ndarray. They are type {type(input_vars)}.\"\n )\n if output_vars is not None:\n if not isinstance(output_vars, list) and not isinstance(output_vars, np.ndarray):\n raise ParameterError(\n f\"Output variables are not type list or np.ndarray. They are type {type(output_vars)}.\"\n )\n\n # Transform the type list to type np.ndarray\n if isinstance(input_vars, list):\n input_vars = np.array(input_vars, dtype=object)\n if isinstance(output_vars, list):\n output_vars = np.array(output_vars, dtype=object)\n\n # Change the dimension of the input variables if needed. (Always want number of data points first)\n if input_vars.ndim == 1:\n input_vars = input_vars.reshape((1, -1))\n if input_vars.ndim >= 3:\n input_vars = input_vars.reshape((input_vars.shape[0], -1))\n\n # In the case of the output being None, create the appropriate output variables here\n if output_vars is None:\n output_vars = self._create_output_vars(input_vars)\n\n # Change the dimensions of the output variables if needed (Always want the number of data points first)\n if output_vars.ndim == 1:\n if input_vars.shape[0] == 1:\n output_vars = output_vars.reshape((1, -1))\n else:\n output_vars = output_vars.reshape((-1, 1))\n\n # Ensure that the variable dimensions match that of the predictor\n if hasattr(self, \"input_size\") and input_vars.shape[-1] != self.input_size:\n raise ParameterError(\n f\"Input variables dimension don't conform with predictor {type(self)} \"\n + f\"Input variable dimensions: {input_vars.shape[-1]} != {self.input_size}\"\n )\n\n if hasattr(self, \"output_size\") and output_vars.shape[-1] != self.output_size:\n raise ParameterError(\n f\"Output variable dimensions don't conform with predictor {type(self)} \"\n + f\"Output variable dimensions: {output_vars.shape[-1]} != {self.output_size}\"\n )\n\n if output_vars.shape[0] != input_vars.shape[0]:\n raise ParameterError(\n \"Non-conforming dimension between input variables and output variables: \"\n + f\"{output_vars.shape[0]} != {input_vars.shape[0]}\"\n )\n\n self._input = input_vars\n self._output = output_vars\n\n def _build_predictor_model(self, **kwargs):\n self._mip_model(**kwargs)\n\n def print_stats(self, file=None):\n \"\"\"Print statistics on model additions stored by this class.\n\n This function prints detailed statistics on the variables\n and constraints that were added to the model.\n\n Arguments\n ---------\n\n file: None, optional\n Text stream to which output should be redirected. By default, this is sys.stdout.\n \"\"\"\n\n n_indicator_cons = 0\n n_sos_cons = 0\n n_linear_cons = 0\n\n created_cons = self._created_cons\n created_vars = self._created_vars\n if hasattr(self, \"_estimators\"):\n for estimator in self._estimators:\n created_cons += estimator._created_cons\n created_vars += estimator._created_vars\n if hasattr(self, \"_layers\"):\n for layer in self._layers:\n created_cons += layer._created_cons\n created_vars += layer._created_vars\n for cons_set in created_cons:\n it = np.nditer(cons_set, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n if isinstance(cons_set[it.multi_index], Constraint):\n cons_type = cons_set[it.multi_index].getConshdlrName()\n if cons_type == \"indicator\":\n n_indicator_cons += 1\n elif cons_type == \"SOS1\":\n n_sos_cons += 1\n elif cons_type == \"linear\":\n n_linear_cons += 1\n else:\n raise TypeError(\n f\"Cons {cons_set[it.multi_index]} is of unknown type {cons_type}\"\n )\n\n n_bin_vars = 0\n n_cont_vars = 0\n\n for var_set in created_vars:\n it = np.nditer(var_set, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n if isinstance(var_set[it.multi_index], Variable):\n var_type = var_set[it.multi_index].vtype()\n if var_type == \"BINARY\":\n n_bin_vars += 1\n elif var_type == \"CONTINUOUS\":\n n_cont_vars += 1\n else:\n raise TypeError(\n f\"Var {var_set[it.multi_index]} is of unknown type {var_type}\"\n )\n\n print(\n f\"Constraints created:\\n Linear {n_linear_cons}\\n Indicator {n_indicator_cons}\\n \"\n f\"SOS1 {n_sos_cons}\\n\"\n f\"Created (internal) variables:\\n Binary {n_bin_vars}\\n Continuous {n_cont_vars}\\n\"\n f\"Input Shape: {self.input.shape}\\nOutput Shape: {self.output.shape}\",\n file=file,\n )\n\n def _create_output_vars(self, input_vars):\n \"\"\"May be defined in derived class to create the output variables of predictor.\"\"\"\n if (not hasattr(self, \"_output\") or self._output is None) and (\n not hasattr(self, \"output_size\") or self.output_size is None\n ):\n raise AttributeError\n\n if not hasattr(self, \"_output\") or self._output is None:\n if hasattr(self, \"classification\"):\n if self.classification:\n vtype = \"B\"\n else:\n vtype = \"C\"\n else:\n vtype = \"C\"\n output_vars = create_vars(\n self.scip_model,\n (input_vars.shape[0], self.output_size),\n vtype,\n lb=None,\n ub=None,\n name_prefix=\"out\",\n )\n return output_vars\n else:\n return self._output\n\n @property\n def _has_solution(self):\n \"\"\"Returns true if we have a solution.\"\"\"\n if self.scip_model.getNSols() > 0:\n return True\n return False\n\n @abstractmethod\n def get_error(self, eps):\n \"\"\"Returns error in SCIP's solution with respect to prediction from input.\n\n Returns\n -------\n error : ndarray of same shape as\n :py:attr:`pyscipopt_ml.modelling.base_predictor_constr.AbstractPredictorConstr.output`\n Assuming that we have a solution for the input and output variables\n `x, y`. Returns the absolute value of the differences between `predictor.predict(x)` and\n `y`. Where predictor is the regression / classification model represented by this object.\n\n Raises\n ------\n NoSolution\n If the SCIP model has no solution (either was not optimized or is infeasible).\n \"\"\"\n ...\n\n @abstractmethod\n def _mip_model(self, **kwargs):\n \"\"\"Makes MIP model for the predictor.\"\"\"\n ...\n\n @property\n def input(self):\n \"\"\"Returns the input variables of embedded predictor.\n\n Returns\n -------\n output : np.ndarray\n \"\"\"\n return self._input\n\n @property\n def output(self):\n \"\"\"Output variables of embedded predictor.\n\n Returns\n -------\n output : np.ndarray\n \"\"\"\n return self._output\n\n @property\n def input_values(self):\n \"\"\"Returns the values for the input variables if a solution is known.\n\n Returns\n -------\n input_vals : np.ndarray\n\n Raises\n ------\n NoSolution\n If SCIP has no solution (either was not optimized or is infeasible).\n \"\"\"\n if not self._has_solution:\n raise NoSolution\n\n input_vals = np.zeros(self.input.shape)\n for i in range(self.input.shape[0]):\n for j in range(self.input.shape[1]):\n input_vals[i][j] = self.scip_model.getVal(self.input[i][j])\n\n return input_vals\n\n @property\n def output_values(self):\n \"\"\"Returns the values for the output variables if a solution is known.\n\n Returns\n -------\n output_value : np.ndarray\n\n Raises\n ------\n NoSolution\n If SCIP has no solution (either was not optimized or is infeasible).\n \"\"\"\n if not self._has_solution:\n raise NoSolution\n\n output_vals = np.zeros(self.output.shape)\n for i in range(self.output.shape[0]):\n for j in range(self.output.shape[1]):\n output_vals[i][j] = self.scip_model.getVal(self.output[i][j])\n\n return output_vals\n\n def __str__(self):\n return self._name"
},
{
"identifier": "argmax_bound_formulation",
"path": "src/pyscipopt_ml/modelling/classification/argmax_model.py",
"snippet": "def argmax_bound_formulation(scip_model, _input, output, unique_naming_prefix, one_dim_center=0.5):\n \"\"\"\n Create constraints that represent the output of a gradient boosted tree given that the individual decision\n trees have already been modelled. The constraints ensure binary output of a single class.\n\n The formulation is different depending on the number of classes. In the case of there being two samples:\n\n Let c be the regression input \\reals^{2}, and z the binary output {0, 1}^{2}\n .. math::\n\n \\begin{align*}\n z_{1} : x_{1} >= x_{2}\n z_{2} : x_{2} >= x_{1}\n \\sum z_{i} == 1\n \\end{align*}\n\n for the case of arbitrary classes the formulation below is used:\n\n Let x be the regression input \\reals^{n}, z the binary output\n {0, 1}^{n}, s the slack variables [0, inf]^{n}, and y the maximum over the input \\reals:\n\n .. math::\n\n \\begin{align*}\n x_{i} + s_{i} - y == 0 \\forall i \\in N\n SOS1(z_{i}, s_{i}) \\forall i \\in N\n \\sum z_{i} == 1\n \\end{align*}\n\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n _input : np.ndarray\n The (potentially aggregated) output variables from the regression variant of a predictor, which are now\n input to the argmax formulation.\n output : np.ndarray\n The output variables of the (classification) predictor\n unique_naming_prefix : str, optional\n A unique naming prefix that is used before all variable and constraint names. This parameter is important if\n the SCIP model is later printed to file and many predictors are added to the same SCIP model.\n one_dim_center : float, optional\n The value for which the 1-D argmax is centred around. Normally this is 0.5 for a single binary.\n Returns\n -------\n created_vars : list\n A list containing np.ndarray of PySCIPOpt variables that were created for the argmax formulation\n created_cons : list\n A list containing np.ndarray of PySCIPOpt constraints that we created for the argmax formulation\n \"\"\"\n\n assert (\n _input.shape == output.shape\n ), f\"Input and output dimensions do not match. {_input.shape} != {output.shape}\"\n\n # get the in and out dimensions\n n_samples = _input.shape[0]\n outdim = output.shape[-1]\n\n # Separate the formulation into cases\n if outdim == 1:\n name_prefix = unique_naming_prefix + \"argmax\"\n bin_vars = create_vars(scip_model, shape=(n_samples,), vtype=\"B\", name_prefix=name_prefix)\n\n # Create additional constraints\n output_equal_cons = np.zeros((n_samples,), dtype=object)\n output_under_half = np.zeros((n_samples,), dtype=object)\n output_over_half = np.zeros((n_samples,), dtype=object)\n\n # Now populate the constraints\n for i in range(n_samples):\n name = unique_naming_prefix + f\"out_eq_{i}\"\n output_equal_cons[i] = scip_model.addCons(output[i][0] == bin_vars[i], name=name)\n name = unique_naming_prefix + f\"out_ub_{i}\"\n output_under_half[i] = scip_model.addConsIndicator(\n _input[i][0] <= one_dim_center, bin_vars[i], activeone=False, name=name\n )\n name = unique_naming_prefix + f\"out_lb_{i}\"\n output_under_half[i] = scip_model.addConsIndicator(\n -_input[i][0] <= -one_dim_center, bin_vars[i], name=name\n )\n\n return [bin_vars], [output_equal_cons, output_under_half, output_over_half]\n\n elif outdim == 2:\n # Create additional variables\n name_prefix = unique_naming_prefix + \"argmax\"\n max_bin_vars = create_vars(\n scip_model, shape=(n_samples, outdim), vtype=\"B\", name_prefix=name_prefix\n )\n\n # Create additional constraints\n output_equal_cons = np.zeros((n_samples, outdim), dtype=object)\n indicator_cons = np.zeros((n_samples, outdim), dtype=object)\n sum_bin_cons = np.zeros((n_samples,), dtype=object)\n\n # Now populate the constraints\n for i in range(n_samples):\n name = unique_naming_prefix + f\"out_eq_{i}_0\"\n output_equal_cons[i][0] = scip_model.addCons(\n output[i][0] == max_bin_vars[i][0], name=name\n )\n name = unique_naming_prefix + f\"out_eq_{i}_1\"\n output_equal_cons[i][1] = scip_model.addCons(\n output[i][1] == max_bin_vars[i][1], name=name\n )\n name = unique_naming_prefix + f\"indicator_argmax_{i}_0\"\n indicator_cons[i][0] = scip_model.addConsIndicator(\n -_input[i][0] <= -_input[i][1], max_bin_vars[i][0], name=name\n )\n name = unique_naming_prefix + f\"indicator_argmax_{i}_1\"\n indicator_cons[i][1] = scip_model.addConsIndicator(\n -_input[i][1] <= -_input[i][0], max_bin_vars[i][1], name=name\n )\n name = unique_naming_prefix + f\"sum_bin_{i}\"\n sum_bin_cons[i] = scip_model.addCons(\n quicksum(max_bin_vars[i][j] for j in range(outdim)) == 1, name=name\n )\n return [max_bin_vars], [output_equal_cons, indicator_cons, sum_bin_cons]\n else:\n # Create additional variables that are needed for classification\n name_prefix = unique_naming_prefix + \"argmax\"\n max_bin_vars = create_vars(\n scip_model, shape=(n_samples, outdim), vtype=\"B\", name_prefix=name_prefix\n )\n name_prefix = unique_naming_prefix + \"slack_argmax\"\n slack_vars = create_vars(\n scip_model, shape=(n_samples, outdim), vtype=\"C\", lb=0, name_prefix=name_prefix\n )\n name_prefix = unique_naming_prefix + \"max_val\"\n max_val_vars = create_vars(\n scip_model, shape=(n_samples,), vtype=\"C\", lb=None, ub=None, name_prefix=name_prefix\n )\n\n # Create additional constraints that are needed for classification\n output_equal_cons = np.zeros((n_samples, outdim), dtype=object)\n sum_zero_cons = np.zeros((n_samples, outdim), dtype=object)\n sos_slack_bin_cons = np.zeros((n_samples, outdim), dtype=object)\n sum_bin_cons = np.zeros((n_samples,), dtype=object)\n\n for i in range(n_samples):\n for j in range(outdim):\n name = unique_naming_prefix + f\"out_eq_{i}_{j}\"\n output_equal_cons[i][j] = scip_model.addCons(\n output[i][j] == max_bin_vars[i][j], name=name\n )\n name = unique_naming_prefix + f\"slack_zero_eq_{i}_{j}\"\n sum_zero_cons[i][j] = scip_model.addCons(\n _input[i][j] + slack_vars[i][j] - max_val_vars[i] == 0, name=name\n )\n name = unique_naming_prefix + f\"sos_slack_bin_{i}_{j}\"\n sos_slack_bin_cons[i][j] = scip_model.addConsSOS1(\n [slack_vars[i][j], max_bin_vars[i][j]], name=name\n )\n\n name = unique_naming_prefix + f\"sum_bin_{i}\"\n sum_bin_cons[i] = scip_model.addCons(\n quicksum(max_bin_vars[i][j] for j in range(outdim)) == 1, name=name\n )\n\n return [max_bin_vars, max_val_vars], [\n output_equal_cons,\n sum_zero_cons,\n sos_slack_bin_cons,\n sum_bin_cons,\n ]"
},
{
"identifier": "leaf_formulation",
"path": "src/pyscipopt_ml/modelling/decision_tree/decision_tree_model.py",
"snippet": "def leaf_formulation(\n scip_model, _input, output, tree, unique_naming_prefix, epsilon, classification=False\n):\n \"\"\"Formulate decision tree using 'leaf' formulation\n\n We have one variable per leaf of the tree and a series of indicator constraints to\n define when that leaf is reached.\n\n The first step of the procedure is to derive input bounds for each leaf of the decision tree. These bounds will\n dictate for which input values the leaf can be reached. For a single sample, let x \\reals^{nI} be the input,\n z {0,1}^{nL} be binary variables representing if a leaf is reached or not, and y \\reals^{nO} be the output.\n\n .. math::\n\n \\begin{align*}\n z_{i} -> x_{j} \\geq leaf_lb[i][j] \\forall i \\in nL, j \\in nI\n z_{i} -> x_{j} \\leq leaf_ub[i][j] \\forall i \\in nL, j \\in nI\n if classification:\n y_{j} == \\sum z_{i} \\forall i \\in nL \\text{, where class j is output of z_i}\n \\sum y_{k} == 1\n else:\n z_{i} -> y_{k} = leaf_value[i][j] \\forall i \\in nL, k \\in nO\n \\sum z_{i} == 1\n \\end{align*}\n\n \"\"\"\n\n # Create names for items we want to access frequently\n n_samples = _input.shape[0]\n n_features = tree[\"n_features\"]\n outdim = output.shape[-1]\n\n # Collect leaf nodes\n leaf_ids = tree[\"children_left\"] <= -1\n n_leafs = sum(leaf_ids)\n name_prefix = unique_naming_prefix + \"leaf\"\n leaf_vars = create_vars(\n scip_model, shape=(n_samples, n_leafs), vtype=\"B\", lb=0, name_prefix=name_prefix\n )\n\n # Calculate bounds for each leaf node\n (node_lb, node_ub) = compute_leafs_bounds(tree, epsilon, scip_model.infinity())\n\n # Create empty constraint objects\n output_class_sum_leaf_cons = np.zeros((n_samples, outdim), dtype=object)\n indicator_output_cons = np.zeros((n_samples, n_leafs, outdim, 2), dtype=object)\n indicator_leaf_lb = np.zeros((n_samples, n_leafs, n_features), dtype=object)\n indicator_leaf_ub = np.zeros((n_samples, n_leafs, n_features), dtype=object)\n\n # Iterate over all leaf nodes (They are the non-zero entries in leaf_ids)\n for i in range(n_samples):\n leafs_per_class = [0 for _ in range(outdim)]\n for j, node in enumerate(leaf_ids.nonzero()[0]):\n fixed_var = False\n # Fix the leaf variable to 0 if the input bounds do not allow the leaf to be reached\n for feature in range(n_features):\n if (\n _input[i][feature].getLbOriginal() > node_ub[feature][node]\n or _input[i][feature].getUbOriginal() < node_lb[feature][node]\n ):\n scip_model.fixVar(leaf_vars[i][j], 0)\n fixed_var = True\n break\n # If the leaf could be reached, then add two sets of indicator constraints.\n # The first will enforce that a leaf node is only selected if the input values result in such a leaf.\n # The second force the appropriate value output by the leaf to be selected\n if not fixed_var:\n for feature in range(n_features):\n name_lb = unique_naming_prefix + f\"indicator_lb_{i}_{j}_{feature}\"\n name_ub = unique_naming_prefix + f\"indicator_ub_{i}_{j}_{feature}\"\n feat_lb = node_lb[feature, node]\n feat_ub = node_ub[feature, node]\n if (\n feat_lb > -scip_model.infinity()\n and _input[i][feature].getLbOriginal() < feat_lb\n ):\n indicator_leaf_lb[i][j][feature] = scip_model.addConsIndicator(\n -_input[i][feature] <= -feat_lb, leaf_vars[i][j], name=name_lb\n )\n if (\n feat_ub < scip_model.infinity()\n and _input[i][feature].getUbOriginal() > feat_ub\n ):\n indicator_leaf_ub[i][j][feature] = scip_model.addConsIndicator(\n _input[i][feature] <= feat_ub, leaf_vars[i][j], name=name_ub\n )\n # Iterate over the final output shape (num_outputs)\n # In the case of classification (num_classes), simply force the most frequent class to be selected\n if classification:\n value = int(np.argmax(tree[\"value\"][node][0]))\n if outdim == 1:\n if value == 1:\n leafs_per_class[0] += leaf_vars[i][j]\n else:\n leafs_per_class[value] += leaf_vars[i][j]\n else:\n for k in range(outdim):\n name_ub = unique_naming_prefix + f\"indicator_output_{i}_{j}_{k}_0\"\n name_lb = unique_naming_prefix + f\"indicator_output_{i}_{j}_{k}_1\"\n value = tree[\"value\"][node][k][0]\n indicator_output_cons[i][j][k][0] = scip_model.addConsIndicator(\n output[i][k] <= value, leaf_vars[i][j], name=name_ub\n )\n indicator_output_cons[i][j][k][1] = scip_model.addConsIndicator(\n -output[i][k] <= -value, leaf_vars[i][j], name=name_lb\n )\n # Add constraints that ensure the correct class is selected depending on the leaf\n if classification:\n for j in range(outdim):\n name = f\"class_leaf_{i}_{j}\"\n output_class_sum_leaf_cons[i][j] = scip_model.addCons(\n output[i][j] == leafs_per_class[j], name=name\n )\n\n # Now add the constraints that only one leaf can be selected.\n # In the case of classification there is an additional constraint that only one class can be selected\n leaf_sum_cons = np.zeros(n_samples, dtype=object)\n for i in range(n_samples):\n name = unique_naming_prefix + f\"sum_leafs_{i}\"\n leaf_sum_cons[i] = scip_model.addCons(\n quicksum(leaf_vars[i][j] for j in range(leaf_vars.shape[-1])) == 1, name=name\n )\n\n # Finally set potentially stronger global bounds on the output variables (in the case of regression)\n if not classification:\n max_vals = [np.max(tree[\"value\"][:, j, :]) for j in range(outdim)]\n min_vals = [np.min(tree[\"value\"][:, j, :]) for j in range(outdim)]\n for i in range(n_samples):\n for j in range(outdim):\n if output[i][j].getLbOriginal() < min_vals[j]:\n scip_model.chgVarLb(output[i][j], min_vals[j])\n if output[i][j].getUbOriginal() > max_vals[j]:\n scip_model.chgVarUb(output[i][j], max_vals[j])\n\n # Now return the added constraints and variables\n if classification:\n return [leaf_vars], [\n indicator_leaf_lb,\n indicator_leaf_ub,\n output_class_sum_leaf_cons,\n leaf_sum_cons,\n ]\n else:\n return [leaf_vars], [\n indicator_leaf_lb,\n indicator_leaf_ub,\n indicator_output_cons,\n leaf_sum_cons,\n ]"
},
{
"identifier": "create_vars",
"path": "src/pyscipopt_ml/modelling/var_utils.py",
"snippet": "def create_vars(scip_model, shape, vtype, lb=None, ub=None, name_prefix=\"\"):\n \"\"\"\n Create PySCIPOpt variables in a numpy.ndarray of a given shape.\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n shape : tuple\n The shape of the numpy array that will be constructed\n vtype : 'C' | 'B' | 'I'\n Whether the variables will be continuous, binary, or integer\n lb : float or int or None, optional\n The lower bound of the variables\n ub : float or int or None, optional\n The upper bound of the variables\n name_prefix : str, optional\n The naming prefix used for these variables\n\n Returns\n -------\n scip_vars : np.ndarray\n A np.ndarray with shape (shape) that contains uniquely names variables all of which are the specified type\n \"\"\"\n\n scip_vars = np.zeros(shape, dtype=object)\n it = np.nditer(scip_vars, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n idx_list = str(it.multi_index).strip(\")\").strip(\"(\").split(\",\")\n idx_string = \"\"\n for idx in idx_list:\n if idx == \"\":\n continue\n idx_string += f\"_{int(idx)}\"\n name = name_prefix + idx_string\n scip_vars[it.multi_index] = scip_model.addVar(vtype=vtype, lb=lb, ub=ub, name=name)\n return scip_vars"
}
] | import numpy as np
from ...sklearn.decision_tree import (
add_decision_tree_classifier_constr,
add_decision_tree_regressor_constr,
)
from ..base_predictor_constraint import AbstractPredictorConstr
from ..classification.argmax_model import argmax_bound_formulation
from ..decision_tree import leaf_formulation
from ..var_utils import create_vars | 9,971 | The output dimension of each decision tree
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
epsilon : float
The epsilon that is used for each decision tree model. See #TODO: Decision tree modelling path
classification : bool
Whether the individual decision trees (i.e. estimators) are classification trees
Returns
-------
estimators : list
A list of :py:class`pyscipopt_ml.modelling.aggregate_tree_model.TreeEstimator`
"""
estimators = []
for i in range(n_estimators):
for j in range(outdim):
unique_prefix = unique_naming_prefix + f"{i}_{j}"
estimators.append(
TreeEstimator(
scip_model,
trees[i][j],
_input,
tree_vars[:, i, j].reshape((-1, 1)),
unique_prefix,
epsilon,
classification,
**kwargs,
)
)
return estimators
def create_sklearn_tree_estimators(
scip_model,
predictor,
_input,
n_samples,
outdim,
unique_naming_prefix,
classification,
gbdt_or_rf="gbdt",
**kwargs,
):
"""
Create individual estimators for each decision tree for decision tree based ensemble predictors from SKLearn.
Parameters
----------
scip_model : PySCIPOpt Model
The SCIP Model where the predictor should be inserted.
predictor : GradientBoostingClassifier | GradientBoostingRegressor | RandomForestClassifier | RandomForestRegressor
The Sklearn predictor that we are modelling
_input : np.ndarray
The input variables into each decision tree (i.e. estimator)
n_samples : int
The number of samples as input
outdim : int
The number of outputs of each estimator
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
classification : bool
Whether the individual decision trees (i.e. estimators) are classification trees
gbdt_or_rf : "gbdt" | "rf"
Whether the predictor is for gradient boosting decision trees or random forests.
Returns
-------
estimators : list
A list of :py:class`pyscipopt_ml.modelling.aggregate_tree_model.TreeEstimator`
tree_vars : np.ndarray
A np.ndarray of created PySCIPopt vars
"""
# Create variables to represent the output of each decision tree (i.e. estimator)
shape = (n_samples, predictor.n_estimators, outdim)
tree_vars = create_vars(
scip_model, shape=shape, vtype="C", lb=None, name_prefix=unique_naming_prefix + "tree_var"
)
# Create each estimator. In the case of GBDT, there are (n_estimators, outdim) many estimators, while for RF
# there are (outdim,) many estimators. In the case of GBDT for classification each individual DT is regression.
estimators = []
if gbdt_or_rf == "gbdt":
for i in range(predictor.n_estimators_):
for j in range(outdim):
unique_prefix = unique_naming_prefix + f"{i}_{j}"
tree = predictor.estimators_[i][j]
estimators.append(
add_decision_tree_regressor_constr(
scip_model,
tree,
_input,
tree_vars[:, i, j].reshape((-1, 1)),
unique_prefix,
**kwargs,
)
)
elif gbdt_or_rf == "rf":
for i in range(predictor.n_estimators):
tree = predictor.estimators_[i]
unique_prefix = unique_naming_prefix + f"{i}"
if classification:
estimators.append(
add_decision_tree_classifier_constr(
scip_model, tree, _input, tree_vars[:, i, :], unique_prefix, **kwargs
)
)
else:
estimators.append(
add_decision_tree_regressor_constr(
scip_model, tree, _input, tree_vars[:, i, :], unique_prefix, **kwargs
)
)
return estimators, tree_vars
| """ Utilities for modelling gradient boosting decision trees and random forest constraints """
def aggregated_estimator_formulation(
scip_model,
_input,
output,
tree_vars,
trees,
constant,
lr,
n_estimators,
unique_naming_prefix,
epsilon,
aggr,
classification,
**kwargs,
):
"""
Creates the model that represents the aggregation of estimators into a single output.
This function is used exclusively for the case where the estimators are decision trees, and the larger
predictor is either a gradient boosting decision tree or random forest.
Parameters
----------
scip_model : PySCIPOpt Model
The SCIP Model where the predictor should be inserted.
_input : np.ndarray
The input variables that are passed to each decision tree
output : np.ndarray
The output variables of the predictor
tree_vars : np.ndarray
The PySCIPOpt variables that have been created to represent the output of each decision tree (i.e. estimator)
trees : list
A list of lists containing dictionary information that completely describe each decision tree (i.e. estimator)
constant : np.ndarray
An array of constant shift values that are added to the output values of each decision tree (i.e. estimator)
lr : float or int
The learning rate used while training. For GBDT / RF this scales the output of each tree
n_estimators : int
The number of decision trees (i.e. estimators)
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
epsilon : float
The epsilon that is used for each decision tree model. See
:py:func:`pyscipopt_ml.modelling.decision_tree.leaf_formulation`.
aggr : str, "sum" or "avg"
The aggregation method used in the formulation. Either the estimators are averages or summed.
classification : bool
Whether the aggregated output of each decision tree (i.e. estimator) should be used for classification.
Returns
-------
estimators : list
A list of :py:class`pyscipopt_ml.modelling.aggregate_tree_model.TreeEstimator`
created_vars : list
A list containing all created PySCIPOpt vars
created_cons : list
A list containing all created PySCIPOpt cons
"""
# Get the number of samples and output dimension
n_samples = _input.shape[0]
outdim = output.shape[-1]
# Create the individual tree estimators
estimators = create_tree_estimators(
scip_model,
_input,
tree_vars,
trees,
n_estimators,
outdim,
unique_naming_prefix,
epsilon,
False,
**kwargs,
)
# Aggregate the trees over the output dimension
aggregate_tree_output = aggregate_estimator_outputs(tree_vars, lr, constant, aggr=aggr)
# Formulate the appropriate constraints
created_vars, created_cons = create_aggregation_constraints(
scip_model,
aggregate_tree_output,
output,
n_samples,
outdim,
unique_naming_prefix,
classification,
)
return estimators, created_vars, created_cons
def create_aggregation_constraints(
scip_model,
aggregate_tree_output,
output,
n_samples,
outdim,
unique_naming_prefix,
classification,
):
"""
Creates the variables and constraints that link the output of the predictor itself and the aggregation of each
estimator.
Parameters
----------
scip_model : PySCIPOpt Model
The SCIP Model where the predictor should be inserted.
aggregate_tree_output : np.ndarray
The aggregated output variables of each decision tree
output : np.ndarray
The output variables of the predictor
n_samples : int
The number of samples
outdim : int
The number of outputs of each decision tree (i.e. estimator)
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
classification : bool
Whether the aggregated output of each decision tree (i.e. estimator) should be used for classification.
Returns
-------
created_vars : list
A list containing all created PySCIPOpt vars
created_cons : list
A list containing all created PySCIPOpt cons
"""
# Formulate the appropriate constraints
created_cons = []
created_vars = []
if not classification:
sum_tree_cons = np.zeros((n_samples, outdim), dtype=object)
for i in range(n_samples):
for j in range(outdim):
name = unique_naming_prefix + f"tree_sum_{i}_{j}"
sum_tree_cons[i][j] = scip_model.addCons(
output[i][j] == aggregate_tree_output[i][j], name=name
)
created_cons.append(sum_tree_cons)
else:
new_vars, new_cons = argmax_bound_formulation(
scip_model, aggregate_tree_output, output, unique_naming_prefix
)
for added_var in new_vars:
created_vars.append(added_var)
for added_cons in new_cons:
created_cons.append(added_cons)
return created_vars, created_cons
def aggregate_estimator_outputs(_output, lr, constant, aggr="sum"):
"""
Aggregate the output of individual estimators into a single expression for each output dimension.
This function is needed for models with multiple estimators, e.g. gradient boosting decision trees and
random forests.
The output after aggregation can then be used as input for argmax classification.
Parameters
----------
_output : np.ndarray
The output variables from each individual estimator (e.g. decision tree)
lr : float
The learning rate used for training and which is used to scale the output
constant : np.ndarray
The constant term that is added to the aggregation
aggr : "sum" or "avg"
Aggregation type ("sum" or "avg"). "Sum" for gradient boosting decision trees. "avg" for random forests.
Returns
-------
aggregated_output : np.ndarray
The new aggregated output per dimension over all estimators. Traditionally a sum over one dimension.
"""
assert aggr in [
"sum",
"avg",
], f"Aggregation type {aggr} is neither sum or avg. No model exists."
assert (
_output.ndim == 3
), f"Aggregating estimator outputs of invalid dimension. {_output.ndim} != 3"
n_samples = _output.shape[0]
outdim = _output.shape[-1]
n_estimators = _output.shape[1]
aggregated_output = np.zeros((n_samples, outdim), dtype=object)
for i in range(n_samples):
for j in range(outdim):
sum_expr = constant[j]
for k in range(n_estimators):
scale = 1 if aggr == "sum" else n_estimators
sum_expr += lr * _output[i][k][j] / scale
aggregated_output[i][j] = sum_expr
return aggregated_output
def create_tree_estimators(
scip_model,
_input,
tree_vars,
trees,
n_estimators,
outdim,
unique_naming_prefix,
epsilon,
classification,
**kwargs,
):
"""
Creates individual tree estimator models for each decision tree.
Parameters
----------
scip_model : PySCIPOpt Model
The SCIP Model where the predictor should be inserted.
_input : np.ndarray
The input variables that are passed to each decision tree
tree_vars : np.ndarray
The PySCIPOpt variables that have been created to represent the output of each decision tree (i.e. estimator)
trees : list
A list of lists containing dictionary information that completely describe each decision tree (i.e. estimator)
n_estimators : int
The number of decision trees (i.e. estimators)
outdim : int
The output dimension of each decision tree
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
epsilon : float
The epsilon that is used for each decision tree model. See #TODO: Decision tree modelling path
classification : bool
Whether the individual decision trees (i.e. estimators) are classification trees
Returns
-------
estimators : list
A list of :py:class`pyscipopt_ml.modelling.aggregate_tree_model.TreeEstimator`
"""
estimators = []
for i in range(n_estimators):
for j in range(outdim):
unique_prefix = unique_naming_prefix + f"{i}_{j}"
estimators.append(
TreeEstimator(
scip_model,
trees[i][j],
_input,
tree_vars[:, i, j].reshape((-1, 1)),
unique_prefix,
epsilon,
classification,
**kwargs,
)
)
return estimators
def create_sklearn_tree_estimators(
scip_model,
predictor,
_input,
n_samples,
outdim,
unique_naming_prefix,
classification,
gbdt_or_rf="gbdt",
**kwargs,
):
"""
Create individual estimators for each decision tree for decision tree based ensemble predictors from SKLearn.
Parameters
----------
scip_model : PySCIPOpt Model
The SCIP Model where the predictor should be inserted.
predictor : GradientBoostingClassifier | GradientBoostingRegressor | RandomForestClassifier | RandomForestRegressor
The Sklearn predictor that we are modelling
_input : np.ndarray
The input variables into each decision tree (i.e. estimator)
n_samples : int
The number of samples as input
outdim : int
The number of outputs of each estimator
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
classification : bool
Whether the individual decision trees (i.e. estimators) are classification trees
gbdt_or_rf : "gbdt" | "rf"
Whether the predictor is for gradient boosting decision trees or random forests.
Returns
-------
estimators : list
A list of :py:class`pyscipopt_ml.modelling.aggregate_tree_model.TreeEstimator`
tree_vars : np.ndarray
A np.ndarray of created PySCIPopt vars
"""
# Create variables to represent the output of each decision tree (i.e. estimator)
shape = (n_samples, predictor.n_estimators, outdim)
tree_vars = create_vars(
scip_model, shape=shape, vtype="C", lb=None, name_prefix=unique_naming_prefix + "tree_var"
)
# Create each estimator. In the case of GBDT, there are (n_estimators, outdim) many estimators, while for RF
# there are (outdim,) many estimators. In the case of GBDT for classification each individual DT is regression.
estimators = []
if gbdt_or_rf == "gbdt":
for i in range(predictor.n_estimators_):
for j in range(outdim):
unique_prefix = unique_naming_prefix + f"{i}_{j}"
tree = predictor.estimators_[i][j]
estimators.append(
add_decision_tree_regressor_constr(
scip_model,
tree,
_input,
tree_vars[:, i, j].reshape((-1, 1)),
unique_prefix,
**kwargs,
)
)
elif gbdt_or_rf == "rf":
for i in range(predictor.n_estimators):
tree = predictor.estimators_[i]
unique_prefix = unique_naming_prefix + f"{i}"
if classification:
estimators.append(
add_decision_tree_classifier_constr(
scip_model, tree, _input, tree_vars[:, i, :], unique_prefix, **kwargs
)
)
else:
estimators.append(
add_decision_tree_regressor_constr(
scip_model, tree, _input, tree_vars[:, i, :], unique_prefix, **kwargs
)
)
return estimators, tree_vars
| class TreeEstimator(AbstractPredictorConstr): | 2 | 2023-12-10 20:28:22+00:00 | 12k |
camenduru/MotionDirector-hf | models/unet_3d_condition.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "models/unet_3d_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n attentions = []\n temp_attentions = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(\n self,\n hidden_states,\n temb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n \n if self.gradient_checkpointing:\n hidden_states = cross_attn_g_c(\n attn, \n temp_attn, \n resnet, \n temp_conv, \n hidden_states, \n encoder_hidden_states, \n cross_attention_kwargs, \n temb, \n num_frames,\n inverse_temp=True\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n\n if num_frames > 1:\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "models/unet_3d_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n attentions = []\n temp_attentions = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n # TODO(Patrick, William) - attention mask is not used\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n hidden_states = cross_attn_g_c(\n attn, \n temp_attn, \n resnet, \n temp_conv, \n hidden_states, \n encoder_hidden_states, \n cross_attention_kwargs, \n temb, \n num_frames,\n inverse_temp=True\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n\n if num_frames > 1:\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "models/unet_3d_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(self, hidden_states, temb=None, num_frames=1):\n output_states = ()\n\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n if self.gradient_checkpointing:\n hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "models/unet_3d_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=True,\n upcast_attention=False,\n ):\n super().__init__()\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1\n )\n ]\n attentions = []\n temp_attentions = []\n\n for _ in range(num_layers):\n attentions.append(\n Transformer2DModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n def forward(\n self,\n hidden_states,\n temb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n if self.gradient_checkpointing:\n hidden_states = up_down_g_c(\n self.resnets[0], \n self.temp_convs[0], \n hidden_states, \n temb, \n num_frames\n )\n else:\n hidden_states = self.resnets[0](hidden_states, temb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)\n \n for attn, temp_attn, resnet, temp_conv in zip(\n self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n if self.gradient_checkpointing:\n hidden_states = cross_attn_g_c(\n attn, \n temp_attn, \n resnet, \n temp_conv, \n hidden_states, \n encoder_hidden_states, \n cross_attention_kwargs, \n temb, \n num_frames\n )\n else:\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n \n if num_frames > 1:\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "models/unet_3d_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n self.gradient_checkpointing = False\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, num_frames=1):\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if num_frames > 1:\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "models/unet_3d_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=True,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n):\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "models/unet_3d_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=True,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n):\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "transformer_g_c",
"path": "models/unet_3d_blocks.py",
"snippet": "def transformer_g_c(transformer, sample, num_frames):\n sample = g_c(custom_checkpoint(transformer, mode='temp'), \n sample, num_frames, use_reentrant=False\n )['sample']\n\n return sample"
}
] | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.transformer_temporal import TransformerTemporalModel
from .unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
transformer_g_c
)
import torch
import torch.nn as nn
import torch.utils.checkpoint | 7,573 | """
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, it will skip the normalization and activation layers in post-processing
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
):
super().__init__()
self.sample_size = sample_size
self.gradient_checkpointing = False
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=attention_head_dim,
in_channels=block_out_channels[0],
num_layers=1,
)
# class embedding
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| # Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved.
# Copyright 2023 The ModelScope Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, it will skip the normalization and activation layers in post-processing
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
):
super().__init__()
self.sample_size = sample_size
self.gradient_checkpointing = False
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=attention_head_dim,
in_channels=block_out_channels[0],
num_layers=1,
)
# class embedding
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| down_block = get_down_block( | 5 | 2023-12-11 04:51:39+00:00 | 12k |
Yingyue-L/Mamba-LLaVA | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 9,446 | assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name']
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
self._attn_bias_initialized = True
if self.attn_impl == 'flash':
return (self.attn_bias, attention_mask)
if self.attn_bias is not None:
self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
attn_bias = self.attn_bias
if self.prefix_lm:
assert isinstance(attn_bias, torch.Tensor)
assert isinstance(prefix_mask, torch.Tensor)
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
if self.attn_uses_sequence_id and sequence_id is not None:
assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name'] | MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config) | 11 | 2023-12-09 09:39:13+00:00 | 12k |
Theia-4869/MoSA | train.py | [
{
"identifier": "get_cfg",
"path": "src/configs/config.py",
"snippet": "def get_cfg():\n \"\"\"\n Get a copy of the default config.\n \"\"\"\n return _C.clone()"
},
{
"identifier": "loader",
"path": "src/data/loader.py",
"snippet": "_TORCH_BASIC_DS = {\n \"cifar100\": CIFAR100Dataset,\n 'aircraft': AircraftDataset,\n \"food101\": Food101Dataset,\n}\n_DATASET_CATALOG = {\n \"CUB\": CUB200Dataset,\n \"OxfordFlowers\": FlowersDataset,\n \"StanfordCars\": CarsDataset,\n \"StanfordDogs\": DogsDataset,\n \"nabirds\": NabirdsDataset,\n}\ndef _construct_loader(cfg, split, batch_size, shuffle, drop_last):\ndef construct_train_loader(cfg):\ndef construct_trainval_loader(cfg):\ndef construct_test_loader(cfg):\ndef construct_val_loader(cfg, batch_size=None):\ndef shuffle(loader, cur_epoch):"
},
{
"identifier": "Evaluator",
"path": "src/engine/evaluator.py",
"snippet": "class Evaluator():\n \"\"\"\n An evaluator with below logics:\n\n 1. find which eval module to use.\n 2. store the eval results, pretty print it in log file as well.\n \"\"\"\n\n def __init__(\n self,\n ) -> None:\n self.results = defaultdict(dict)\n self.iteration = -1\n self.threshold_end = 0.5\n\n def update_iteration(self, iteration: int) -> None:\n \"\"\"update iteration info\"\"\"\n self.iteration = iteration\n\n def update_result(self, metric: str, value: Union[float, dict]) -> None:\n if self.iteration > -1:\n key_name = \"epoch_\" + str(self.iteration)\n else:\n key_name = \"final\"\n if isinstance(value, float):\n self.results[key_name].update({metric: value})\n else:\n if metric in self.results[key_name]:\n self.results[key_name][metric].update(value)\n else:\n self.results[key_name].update({metric: value})\n\n def classify(self, probs, targets, test_data, multilabel=False):\n \"\"\"\n Evaluate classification result.\n Args:\n probs: np.ndarray for num_data x num_class, predicted probabilities\n targets: np.ndarray for multilabel, list of integers for single label\n test_labels: map test image ids to a list of class labels\n \"\"\"\n if not targets:\n raise ValueError(\n \"When evaluating classification, need at least give targets\")\n\n if multilabel:\n self._eval_multilabel(probs, targets, test_data)\n else:\n self._eval_singlelabel(probs, targets, test_data)\n\n def _eval_singlelabel(\n self,\n scores: np.ndarray,\n targets: List[int],\n eval_type: str\n ) -> None:\n \"\"\"\n if number of labels > 2:\n top1 and topk (5 by default) accuracy\n if number of labels == 2:\n top1 and rocauc\n \"\"\"\n acc_dict = singlelabel.compute_acc_auc(scores, targets)\n\n log_results = {\n k: np.around(v * 100, decimals=2) for k, v in acc_dict.items()\n }\n save_results = acc_dict\n\n self.log_and_update(log_results, save_results, eval_type)\n\n def _eval_multilabel(\n self,\n scores: np.ndarray,\n targets: np.ndarray,\n eval_type: str\n ) -> None:\n num_labels = scores.shape[-1]\n targets = multilabel.multihot(targets, num_labels)\n\n log_results = {}\n ap, ar, mAP, mAR = multilabel.compute_map(scores, targets)\n f1_dict = multilabel.get_best_f1_scores(\n targets, scores, self.threshold_end)\n\n log_results[\"mAP\"] = np.around(mAP * 100, decimals=2)\n log_results[\"mAR\"] = np.around(mAR * 100, decimals=2)\n log_results.update({\n k: np.around(v * 100, decimals=2) for k, v in f1_dict.items()})\n save_results = {\n \"ap\": ap, \"ar\": ar, \"mAP\": mAP, \"mAR\": mAR, \"f1\": f1_dict\n }\n self.log_and_update(log_results, save_results, eval_type)\n\n def log_and_update(self, log_results, save_results, eval_type):\n log_str = \"\"\n for k, result in log_results.items():\n if not isinstance(result, np.ndarray):\n log_str += f\"{k}: {result:.2f}\\t\"\n else:\n log_str += f\"{k}: {list(result)}\\t\"\n logger.info(f\"Classification results with {eval_type}: {log_str}\")\n # save everything\n self.update_result(\"classification\", {eval_type: save_results})"
},
{
"identifier": "Trainer",
"path": "src/engine/trainer.py",
"snippet": "class Trainer():\n \"\"\"\n a trainer with below logics:\n\n 1. Build optimizer, scheduler\n 2. Load checkpoints if provided\n 3. Train and eval at each epoch\n \"\"\"\n def __init__(\n self,\n cfg: CfgNode,\n args,\n model: nn.Module,\n evaluator: Evaluator,\n device: torch.device,\n ) -> None:\n self.cfg = cfg\n self.args = args\n self.model = model\n self.device = device\n\n # solver related\n logger.info(\"Setting up the optimizer...\")\n self.optimizer = make_optimizer([self.model], cfg.SOLVER)\n self.scheduler = make_scheduler(self.optimizer, cfg.SOLVER)\n self.cls_criterion = build_loss(self.cfg)\n\n self.checkpointer = Checkpointer(\n self.model,\n save_dir=cfg.OUTPUT_DIR,\n save_to_disk=True\n )\n\n if len(cfg.MODEL.WEIGHT_PATH) > 0:\n # only use this for vtab in-domain experiments\n checkpointables = [key for key in self.checkpointer.checkpointables if key not in [\"head.last_layer.bias\", \"head.last_layer.weight\"]]\n self.checkpointer.load(cfg.MODEL.WEIGHT_PATH, checkpointables)\n logger.info(f\"Model weight loaded from {cfg.MODEL.WEIGHT_PATH}\")\n\n self.evaluator = evaluator\n self.cpu_device = torch.device(\"cpu\")\n\n def forward_one_batch(self, inputs, targets, is_train, merge=False):\n \"\"\"Train a single (full) epoch on the model using the given\n data loader.\n\n Args:\n X: input dict\n targets\n is_train: bool\n Returns:\n loss\n outputs: output logits\n \"\"\"\n # move data to device\n inputs = inputs.to(self.device, non_blocking=True) # (batchsize, 2048)\n targets = targets.to(self.device, non_blocking=True) # (batchsize, )\n\n if self.cfg.DBG:\n logger.info(f\"shape of inputs: {inputs.shape}\")\n logger.info(f\"shape of targets: {targets.shape}\")\n\n # forward\n with torch.set_grad_enabled(is_train):\n if self.cfg.MODEL.ADAPTER.DEEPREG or self.cfg.MODEL.LORA.DEEPREG:\n outputs, hidden_states = self.model(inputs, mid=True)\n outputs_additional = None\n if self.cfg.MODEL.ADAPTER.ADDITIONAL or self.cfg.MODEL.LORA.ADDITIONAL:\n outputs_additional, hidden_states_additional = self.model(inputs, mid=True)\n else:\n outputs = self.model(inputs)\n outputs_additional = None\n if self.cfg.MODEL.ADAPTER.ADDITIONAL or self.cfg.MODEL.LORA.ADDITIONAL:\n outputs_additional = self.model(inputs)\n _, predicted = torch.max(outputs, 1)\n accuracy = (predicted == targets).sum() * 100.0 / targets.size(0)\n if self.cfg.DBG:\n logger.info(\n \"shape of model output: {}, targets: {}\".format(\n outputs.shape, targets.shape))\n\n if self.cls_criterion.is_local() and is_train:\n self.model.eval()\n loss = self.cls_criterion(\n outputs, targets, self.cls_weights,\n self.model, inputs\n )\n elif self.cls_criterion.is_local():\n return torch.tensor(1), outputs\n else:\n loss = self.cls_criterion(\n outputs, targets, self.cls_weights)\n if outputs_additional is not None:\n if self.cfg.MODEL.ADAPTER.ADD_WEIGHT > 0:\n loss_add = self.cls_criterion(outputs_additional, targets, self.cls_weights)\n loss += loss_add * self.cfg.MODEL.ADAPTER.ADD_WEIGHT\n if self.cfg.MODEL.ADAPTER.REG_WEIGHT > 0:\n loss_reg = symmetric_KL_loss(outputs, outputs_additional)\n loss += loss_reg * self.cfg.MODEL.ADAPTER.REG_WEIGHT\n if self.cfg.MODEL.ADAPTER.DEEPREG:\n loss_reg_deep = deepreg_MSE_loss(hidden_states, hidden_states_additional)\n loss += loss_reg_deep * self.cfg.MODEL.ADAPTER.REG_WEIGHT\n\n if loss == float('inf'):\n logger.info(\n \"encountered infinite loss, skip gradient updating for this batch!\"\n )\n return -1, -1\n elif torch.isnan(loss).any():\n logger.info(\n \"encountered nan loss, skip gradient updating for this batch!\"\n )\n return -1, -1\n\n # =======backward and optim step only if in training phase... =========\n if is_train:\n self.optimizer.zero_grad()\n loss.backward()\n \n if self.args.sparse_train:\n for k, p in self.model.named_parameters():\n if p.requires_grad and p.grad is not None:\n if hasattr(p, \"mask\"):\n p.grad.data *= p.mask\n elif hasattr(p, \"masks\"):\n idx = random.randint(0, self.cfg.PRUNER.NUM - 1)\n p.grad.data *= p.masks[idx]\n \n self.optimizer.step()\n\n return loss, accuracy, outputs\n\n def get_input(self, data):\n if not isinstance(data[\"image\"], torch.Tensor):\n for k, v in data.items():\n data[k] = torch.from_numpy(v)\n\n inputs = data[\"image\"].float()\n labels = data[\"label\"]\n return inputs, labels\n\n def train_classifier(self, train_loader, val_loader, test_loader):\n \"\"\"\n Train a classifier using epoch\n \"\"\"\n # save the model prompt if required before training\n self.model.eval()\n\n # setup training epoch params\n total_epoch = self.cfg.SOLVER.TOTAL_EPOCH\n total_data = len(train_loader)\n best_epoch = -1\n best_metric = 0\n best_val_acc = 0\n best_test_acc = 0\n log_interval = self.cfg.SOLVER.LOG_EVERY_N\n\n losses = AverageMeter('Loss', ':.4e')\n accuracy = AverageMeter('Accuracy', ':.4e')\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n\n self.cls_weights = train_loader.dataset.get_class_weights(\n self.cfg.DATA.CLASS_WEIGHTS_TYPE)\n # logger.info(f\"class weights: {self.cls_weights}\")\n patience = 0 # if > self.cfg.SOLVER.PATIENCE, stop training\n\n for epoch in range(total_epoch):\n # reset averagemeters to measure per-epoch results\n losses.reset()\n accuracy.reset()\n batch_time.reset()\n data_time.reset()\n\n lr = self.scheduler.get_lr()[0]\n logger.info(\n \"Training {} / {} epoch, with learning rate {}\".format(\n epoch + 1, total_epoch, lr\n )\n )\n\n # Enable training mode\n self.model.train()\n\n end = time.time()\n\n for idx, input_data in enumerate(train_loader):\n if self.cfg.DBG and idx == 20:\n # if debugging, only need to see the first few iterations\n break\n \n X, targets = self.get_input(input_data)\n # logger.info(X.shape)\n # logger.info(targets.shape)\n # measure data loading time\n data_time.update(time.time() - end)\n\n # if self.cfg.MODEL.ADAPTER.MOSA:\n # train_loss, train_acc, _ = self.forward_one_batch(X, targets, True, True)\n train_loss, train_acc, _ = self.forward_one_batch(X, targets, True)\n\n if train_loss == -1:\n # continue\n return None\n\n losses.update(train_loss.item(), X.shape[0])\n accuracy.update(train_acc.item(), X.shape[0])\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # log during one batch\n if (idx + 1) % log_interval == 0:\n seconds_per_batch = batch_time.val\n eta = datetime.timedelta(seconds=int(\n seconds_per_batch * (total_data - idx - 1) + seconds_per_batch*total_data*(total_epoch-epoch-1)))\n logger.info(\n \"\\tTraining {}/{}. train loss: {:.4f},\".format(\n idx + 1,\n total_data,\n train_loss\n )\n + \"\\t{:.4f} s / batch. (data: {:.2e}). ETA={}, \".format(\n seconds_per_batch,\n data_time.val,\n str(eta),\n )\n + \"max mem: {:.1f} GB \".format(gpu_mem_usage())\n )\n logger.info(\n \"Epoch {} / {}: \".format(epoch + 1, total_epoch)\n + \"avg data time: {:.2e}, avg batch time: {:.4f}, \".format(\n data_time.avg, batch_time.avg)\n + \"average train loss: {:.4f}\".format(losses.avg))\n if self.args.use_wandb:\n wandb.log({\"train_loss\": losses.avg, \"train_acc\": accuracy.avg, \"learning_rate\": lr}, step=epoch)\n # update lr, scheduler.step() must be called after optimizer.step() according to the docs: https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate # noqa\n self.scheduler.step()\n\n # Enable eval mode\n self.model.eval()\n if self.cfg.MODEL.TRANSFER_TYPE == \"mosa\":\n if self.cfg.MODEL.ADAPTER.MOE:\n self.model.enc.merge(self.cfg.MODEL.ADAPTER.MERGE)\n elif self.cfg.MODEL.TRANSFER_TYPE == \"mosl\":\n if self.cfg.MODEL.LORA.MOE:\n self.model.enc.merge(self.cfg.MODEL.LORA.MERGE)\n\n # eval at each epoch for single gpu training\n self.evaluator.update_iteration(epoch)\n self.eval_classifier(val_loader, \"val\", epoch, epoch == total_epoch - 1)\n if test_loader is not None:\n self.eval_classifier(test_loader, \"test\", epoch, epoch == total_epoch - 1)\n\n # check the patience\n t_name = \"val_\" + val_loader.dataset.name\n try:\n curr_acc = self.evaluator.results[f\"epoch_{epoch}\"][\"classification\"][t_name][\"top1\"]\n curr_val_acc = self.evaluator.results[f\"epoch_{epoch}\"][\"classification\"][t_name][\"top1\"]\n if test_loader is not None:\n curr_test_acc = self.evaluator.results[f\"epoch_{epoch}\"][\"classification\"][t_name.replace(\"val\", \"test\")][\"top1\"]\n except KeyError:\n return\n \n if curr_val_acc > best_val_acc:\n best_val_acc = curr_val_acc\n if test_loader is not None and curr_test_acc > best_test_acc:\n best_test_acc = curr_test_acc\n for name in os.listdir(self.cfg.OUTPUT_DIR):\n dir = os.path.join(self.cfg.OUTPUT_DIR, name)\n if os.path.isdir(dir) and name[:5] == \"test_\":\n shutil.rmtree(dir)\n os.makedirs(os.path.join(self.cfg.OUTPUT_DIR, f'test_{best_test_acc*100:.2f}'))\n if self.args.use_wandb:\n if test_loader is not None:\n wandb.log({\"best_val_acc\": best_val_acc, \"best_test_acc\": best_test_acc}, step=epoch)\n else:\n wandb.log({\"best_val_acc\": best_val_acc}, step=epoch)\n\n if curr_acc > best_metric:\n best_metric = curr_acc\n best_epoch = epoch + 1\n logger.info(\n f'Best epoch {best_epoch}: best metric: {best_metric:.3f}')\n for name in os.listdir(self.cfg.OUTPUT_DIR):\n dir = os.path.join(self.cfg.OUTPUT_DIR, name)\n if os.path.isdir(dir) and name[:4] == \"val_\":\n shutil.rmtree(dir)\n if test_loader is not None:\n os.makedirs(os.path.join(self.cfg.OUTPUT_DIR, f'val_{curr_test_acc*100:.2f}'))\n else:\n os.makedirs(os.path.join(self.cfg.OUTPUT_DIR, f'val_{best_metric*100:.2f}'))\n patience = 0\n else:\n patience += 1\n if patience >= self.cfg.SOLVER.PATIENCE:\n logger.info(\"No improvement. Breaking out of loop.\")\n break\n\n # save the last checkpoints\n # if self.cfg.MODEL.SAVE_CKPT:\n # Checkpointer(\n # self.model,\n # save_dir=self.cfg.OUTPUT_DIR,\n # save_to_disk=True\n # ).save(\"last_model\")\n\n @torch.no_grad()\n def eval_classifier(self, data_loader, prefix, epoch, save=False):\n \"\"\"evaluate classifier\"\"\"\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n accuracy = AverageMeter('Accuracy', ':.4e')\n\n log_interval = self.cfg.SOLVER.LOG_EVERY_N\n test_name = prefix + \"_\" + data_loader.dataset.name\n total = len(data_loader)\n\n # initialize features and target\n total_logits = []\n total_targets = []\n\n for idx, input_data in enumerate(data_loader):\n end = time.time()\n X, targets = self.get_input(input_data)\n # measure data loading time\n data_time.update(time.time() - end)\n\n if self.cfg.DBG:\n logger.info(\"during eval: {}\".format(X.shape))\n loss, acc, outputs = self.forward_one_batch(X, targets, False)\n if loss == -1:\n return\n losses.update(loss, X.shape[0])\n accuracy.update(acc, X.shape[0])\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n\n if (idx + 1) % log_interval == 0:\n logger.info(\n \"\\tTest {}/{}. loss: {:.3f}, {:.4f} s / batch. (data: {:.2e})\".format( # noqa\n idx + 1,\n total,\n losses.val,\n batch_time.val,\n data_time.val\n ) + \"max mem: {:.5f} GB \".format(gpu_mem_usage())\n )\n\n # targets: List[int]\n total_targets.extend(list(targets.numpy()))\n total_logits.append(outputs)\n logger.info(\n f\"Inference ({prefix}):\"\n + \"avg data time: {:.2e}, avg batch time: {:.4f}, \".format(\n data_time.avg, batch_time.avg)\n + \"average loss: {:.4f}\".format(losses.avg))\n if self.args.use_wandb:\n wandb.log({\"{}_loss\".format(prefix): losses.avg, \"{}_acc\".format(prefix): accuracy.avg}, step=epoch)\n if self.model.side is not None:\n logger.info(\n \"--> side tuning alpha = {:.4f}\".format(self.model.side_alpha))\n # total_testimages x num_classes\n joint_logits = torch.cat(total_logits, dim=0).cpu().numpy()\n self.evaluator.classify(\n joint_logits, total_targets,\n test_name, self.cfg.DATA.MULTILABEL,\n )\n\n # save the probs and targets\n if save and self.cfg.MODEL.SAVE_CKPT:\n out = {\"targets\": total_targets, \"joint_logits\": joint_logits}\n out_path = os.path.join(\n self.cfg.OUTPUT_DIR, f\"{test_name}_logits.pth\")\n torch.save(out, out_path)\n logger.info(\n f\"Saved logits and targets for {test_name} at {out_path}\")"
},
{
"identifier": "build_model",
"path": "src/models/build_model.py",
"snippet": "def build_model(cfg):\n \"\"\"\n build model here\n \"\"\"\n assert (\n cfg.MODEL.TYPE in _MODEL_TYPES.keys()\n ), \"Model type '{}' not supported\".format(cfg.MODEL.TYPE)\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n\n # Construct the model\n train_type = cfg.MODEL.TYPE\n model = _MODEL_TYPES[train_type](cfg)\n\n log_model_info(model, verbose=cfg.DBG)\n model, device = load_model_to_device(model, cfg)\n logger.info(f\"Device used for model: {device}\")\n\n return model, device"
},
{
"identifier": "build_pruner",
"path": "src/utils/build_pruner.py",
"snippet": "def build_pruner(cfg):\n \"\"\"\n build pruner here\n \"\"\"\n assert (\n cfg.PRUNER.TYPE in _PRUNER_TYPES.keys()\n ), \"Model type '{}' not supported\".format(cfg.PRUNER.TYPE)\n\n # Construct the pruner\n prune_type = cfg.PRUNER.TYPE\n pruner = _PRUNER_TYPES[prune_type](cfg)\n\n return pruner"
},
{
"identifier": "log_pruned_model_info",
"path": "src/utils/build_pruner.py",
"snippet": "def log_pruned_model_info(model, verbose=False):\n \"\"\"Logs pruned model info\"\"\"\n if verbose:\n logger.info(f\"Classification Model:\\n{model}\")\n model_total_params = sum(p.numel() for p in model.parameters())\n model_grad_params = sum(int(p.mask.sum()) if hasattr(p, 'mask') else p.numel() for p in model.parameters() if p.requires_grad)\n logger.info(\"Total Parameters: {0}\\t Gradient Parameters: {1}\".format(\n model_total_params, model_grad_params))\n logger.info(\"tuned percent:%.3f\"%(model_grad_params/model_total_params*100))"
},
{
"identifier": "PathManager",
"path": "src/utils/file_io.py",
"snippet": ""
},
{
"identifier": "default_argument_parser",
"path": "launch.py",
"snippet": "def default_argument_parser():\n \"\"\"\n create a simple parser to wrap around config file\n \"\"\"\n parser = argparse.ArgumentParser(description=\"visual-prompt\")\n parser.add_argument(\n \"--config-file\", default=\"\", metavar=\"FILE\", help=\"path to config file\")\n parser.add_argument(\n \"--train-type\", default=\"\", help=\"training types\")\n parser.add_argument(\n \"--sparse-train\", default=False, action=\"store_true\", help=\"sparse training\")\n parser.add_argument(\n \"--use-wandb\", action=\"store_true\", help=\"use wandb to log\")\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n return parser"
},
{
"identifier": "logging_train_setup",
"path": "launch.py",
"snippet": "def logging_train_setup(args, cfg) -> None:\n output_dir = cfg.OUTPUT_DIR\n if output_dir:\n PathManager.mkdirs(output_dir)\n\n logger = logging.setup_logging(\n cfg.NUM_GPUS, get_world_size(), output_dir, name=\"MOSA\")\n\n # Log basic information about environment, cmdline arguments, and config\n rank = get_rank()\n logger.info(\n f\"Rank of current process: {rank}. World size: {get_world_size()}\")\n logger.info(\"Environment info:\\n\" + collect_env_info())\n\n logger.info(\"Command line arguments: \" + str(args))\n if hasattr(args, \"config_file\") and args.config_file != \"\":\n logger.info(\n \"Contents of args.config_file={}:\\n{}\".format(\n args.config_file,\n PathManager.open(args.config_file, \"r\").read()\n )\n )\n # Show the config\n logger.info(\"Training with config:\")\n logger.info(pprint.pformat(cfg))\n # cudnn benchmark has large overhead.\n # It shouldn't be used considering the small size of typical val set.\n if not (hasattr(args, \"eval_only\") and args.eval_only):\n torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK"
}
] | import os
import torch
import warnings
import numpy as np
import random
import wandb
import src.utils.logging as logging
from random import randint
from time import sleep
from src.configs.config import get_cfg
from src.data import loader as data_loader
from src.engine.evaluator import Evaluator
from src.engine.trainer import Trainer
from src.models.build_model import build_model
from src.utils.build_pruner import build_pruner, log_pruned_model_info
from src.utils.file_io import PathManager
from launch import default_argument_parser, logging_train_setup | 8,655 | if p.requires_grad and "head" not in k:
score = pruner.score(p)
mask = pruner.prune(score)
p.mask = mask
elif cfg.MODEL.TRANSFER_TYPE == "lora" or cfg.MODEL.TRANSFER_TYPE == "mosl":
if cfg.MODEL.LORA.MOE:
for blk in model.enc.transformer.encoder.layer:
if cfg.MODEL.LORA.SHARE != "down":
if "q" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_A_q[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_A_q[i].weight.mask = m
if "k" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_A_k[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_A_k[i].weight.mask = m
if "v" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_A_v[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_A_v[i].weight.mask = m
if "o" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_A_o[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_A_o[i].weight.mask = m
if cfg.MODEL.LORA.SHARE != "up":
if "q" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_B_q[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_B_q[i].weight.mask = m
if "k" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_B_k[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_B_k[i].weight.mask = m
if "v" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_B_v[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_B_v[i].weight.mask = m
if "o" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_B_o[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_B_o[i].weight.mask = m
else:
for k, p in model.named_parameters():
if p.requires_grad and "head" not in k:
score = pruner.score(p)
mask = pruner.prune(score)
p.mask = mask
elif cfg.MODEL.TYPE == "swin":
if cfg.MODEL.ADAPTER.MOE:
for layer in model.enc.layers:
for blk in layer.blocks:
if cfg.MODEL.ADAPTER.SHARE != "down":
score = pruner.score(blk.mlp.adapter_down[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.mlp.adapter_down[i].weight.mask = m
score = pruner.score(blk.mlp.adapter_down[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.mlp.adapter_down[i].bias.mask = m
if cfg.MODEL.ADAPTER.SHARE != "up":
score = pruner.score(blk.mlp.adapter_up[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.mlp.adapter_up[i].weight.mask = m
score = pruner.score(blk.mlp.adapter_up[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.mlp.adapter_up[i].bias.mask = m
else:
for k, p in model.named_parameters():
if p.requires_grad and "head" not in k:
score = pruner.score(p)
mask = pruner.prune(score)
p.mask = mask
log_pruned_model_info(model, verbose=cfg.DBG)
# for k, p in model.named_parameters():
# if p.requires_grad:
# print(k, p.shape)
# raise ValueError("stop here")
logger.info("Setting up Evalutator...")
evaluator = Evaluator()
logger.info("Setting up Trainer...")
trainer = Trainer(cfg, args, model, evaluator, cur_device)
if train_loader:
trainer.train_classifier(train_loader, val_loader, test_loader)
else:
print("No train loader presented. Exit")
if cfg.SOLVER.TOTAL_EPOCH == 0:
trainer.eval_classifier(test_loader, "test", 0)
def main(args):
"""main function to call from workflow"""
# set up cfg and args
cfg = setup(args)
# Perform training.
train(cfg, args)
if __name__ == '__main__':
| #!/usr/bin/env python3
"""
major actions here: fine-tune the features and evaluate different settings
"""
warnings.filterwarnings("ignore")
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# setup output dir
# output_dir / data_name / feature_name / lr_wd / run1
output_dir = cfg.OUTPUT_DIR
lr = cfg.SOLVER.BASE_LR
wd = cfg.SOLVER.WEIGHT_DECAY
bn = cfg.MODEL.ADAPTER.BOTTLENECK_SIZE
output_folder = os.path.join(
cfg.DATA.NAME, cfg.DATA.FEATURE, f"lr{lr}_bn{bn}")
# train cfg.RUN_N_TIMES times
count = 1
while count <= cfg.RUN_N_TIMES:
output_path = os.path.join(output_dir, output_folder, f"run{count}")
# pause for a random time, so concurrent process with same setting won't interfere with each other. # noqa
# sleep(randint(3, 30))
if not PathManager.exists(output_path):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
break
else:
count += 1
if count > cfg.RUN_N_TIMES:
raise ValueError(
f"Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more")
cfg.freeze()
return cfg
def get_loaders(cfg, logger):
logger.info("Loading training data (final training data for vtab)...")
if cfg.DATA.NAME.startswith("vtab-"):
train_loader = data_loader.construct_trainval_loader(cfg)
else:
train_loader = data_loader.construct_train_loader(cfg)
logger.info("Loading validation data...")
# not really needed for vtab
val_loader = data_loader.construct_val_loader(cfg)
logger.info("Loading test data...")
if cfg.DATA.NO_TEST:
logger.info("...no test data is constructed")
test_loader = None
else:
test_loader = data_loader.construct_test_loader(cfg)
return train_loader, val_loader, test_loader
def train(cfg, args):
# clear up residual cache from previous runs
if torch.cuda.is_available():
torch.cuda.empty_cache()
# main training / eval actions here
# fix the seed for reproducibility
if cfg.SEED is not None:
torch.manual_seed(cfg.SEED)
torch.cuda.manual_seed(cfg.SEED)
torch.cuda.manual_seed_all(cfg.SEED)
np.random.seed(cfg.SEED)
random.seed(cfg.SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# setup training env including loggers
logging_train_setup(args, cfg)
logger = logging.get_logger("MOSA")
if args.use_wandb:
wandb.init(
project='MOSA',
name='{}_{}_{}'.format(cfg.DATA.NAME, cfg.MODEL.TRANSFER_TYPE, cfg.MODEL.HYPER.HYPER),
config=cfg
)
train_loader, val_loader, test_loader = get_loaders(cfg, logger)
logger.info("Constructing models...")
model, cur_device = build_model(cfg)
if args.sparse_train:
logger.info("Constructing pruner...")
pruner = build_pruner(cfg)
# for k, p in model.named_parameters():
# if p.requires_grad:
# print(k, p.shape)
# raise ValueError("stop here")
if args.sparse_train:
logger.info("Pruning model...")
if cfg.MODEL.TYPE == "vit":
if cfg.MODEL.TRANSFER_TYPE == "adapter" or cfg.MODEL.TRANSFER_TYPE == "mosa":
if cfg.MODEL.ADAPTER.MOE:
for blk in model.enc.transformer.encoder.layer:
if cfg.MODEL.ADAPTER.SHARE != "down":
if cfg.MODEL.ADAPTER.STYLE == "AdaptFormer" or cfg.MODEL.ADAPTER.STYLE == "Pfeiffer":
score = pruner.score(blk.adapter_down[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_down[i].weight.mask = m
score = pruner.score(blk.adapter_down[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_down[i].bias.mask = m
elif cfg.MODEL.ADAPTER.STYLE == "Houlsby":
score = pruner.score(blk.adapter_down_attn[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_down_attn[i].weight.mask = m
score = pruner.score(blk.adapter_down_attn[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_down_attn[i].bias.mask = m
score = pruner.score(blk.adapter_down_ffn[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_down_ffn[i].weight.mask = m
score = pruner.score(blk.adapter_down_ffn[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_down_ffn[i].bias.mask = m
if cfg.MODEL.ADAPTER.SHARE != "up":
if cfg.MODEL.ADAPTER.STYLE == "AdaptFormer" or cfg.MODEL.ADAPTER.STYLE == "Pfeiffer":
score = pruner.score(blk.adapter_up[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_up[i].weight.mask = m
score = pruner.score(blk.adapter_up[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_up[i].bias.mask = m
elif cfg.MODEL.ADAPTER.STYLE == "Houlsby":
score = pruner.score(blk.adapter_up_attn[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_up_attn[i].weight.mask = m
score = pruner.score(blk.adapter_up_attn[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_up_attn[i].bias.mask = m
score = pruner.score(blk.adapter_up_ffn[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_up_ffn[i].weight.mask = m
score = pruner.score(blk.adapter_up_ffn[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.adapter_up_ffn[i].bias.mask = m
else:
for k, p in model.named_parameters():
if p.requires_grad and "head" not in k:
score = pruner.score(p)
mask = pruner.prune(score)
p.mask = mask
elif cfg.MODEL.TRANSFER_TYPE == "lora" or cfg.MODEL.TRANSFER_TYPE == "mosl":
if cfg.MODEL.LORA.MOE:
for blk in model.enc.transformer.encoder.layer:
if cfg.MODEL.LORA.SHARE != "down":
if "q" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_A_q[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_A_q[i].weight.mask = m
if "k" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_A_k[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_A_k[i].weight.mask = m
if "v" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_A_v[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_A_v[i].weight.mask = m
if "o" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_A_o[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_A_o[i].weight.mask = m
if cfg.MODEL.LORA.SHARE != "up":
if "q" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_B_q[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_B_q[i].weight.mask = m
if "k" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_B_k[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_B_k[i].weight.mask = m
if "v" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_B_v[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_B_v[i].weight.mask = m
if "o" in cfg.MODEL.LORA.MODE:
score = pruner.score(blk.attn.lora_B_o[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.attn.lora_B_o[i].weight.mask = m
else:
for k, p in model.named_parameters():
if p.requires_grad and "head" not in k:
score = pruner.score(p)
mask = pruner.prune(score)
p.mask = mask
elif cfg.MODEL.TYPE == "swin":
if cfg.MODEL.ADAPTER.MOE:
for layer in model.enc.layers:
for blk in layer.blocks:
if cfg.MODEL.ADAPTER.SHARE != "down":
score = pruner.score(blk.mlp.adapter_down[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.mlp.adapter_down[i].weight.mask = m
score = pruner.score(blk.mlp.adapter_down[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.mlp.adapter_down[i].bias.mask = m
if cfg.MODEL.ADAPTER.SHARE != "up":
score = pruner.score(blk.mlp.adapter_up[0].weight)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.mlp.adapter_up[i].weight.mask = m
score = pruner.score(blk.mlp.adapter_up[0].bias)
masks = pruner.divide(score)
for i, m in enumerate(masks):
blk.mlp.adapter_up[i].bias.mask = m
else:
for k, p in model.named_parameters():
if p.requires_grad and "head" not in k:
score = pruner.score(p)
mask = pruner.prune(score)
p.mask = mask
log_pruned_model_info(model, verbose=cfg.DBG)
# for k, p in model.named_parameters():
# if p.requires_grad:
# print(k, p.shape)
# raise ValueError("stop here")
logger.info("Setting up Evalutator...")
evaluator = Evaluator()
logger.info("Setting up Trainer...")
trainer = Trainer(cfg, args, model, evaluator, cur_device)
if train_loader:
trainer.train_classifier(train_loader, val_loader, test_loader)
else:
print("No train loader presented. Exit")
if cfg.SOLVER.TOTAL_EPOCH == 0:
trainer.eval_classifier(test_loader, "test", 0)
def main(args):
"""main function to call from workflow"""
# set up cfg and args
cfg = setup(args)
# Perform training.
train(cfg, args)
if __name__ == '__main__': | args = default_argument_parser().parse_args() | 8 | 2023-12-06 07:50:16+00:00 | 12k |
khwong-c/syn-magia | tests/core/test_signal.py | [
{
"identifier": "Input",
"path": "magia/core.py",
"snippet": "class Input(Signal):\n \"\"\"\n Representing an input signal.\n It has no driver, but it is driving other signals.\n It is used by both the module declaration and the module instance.\n \"\"\"\n\n def __init__(\n self,\n name: str, width: int, signed: bool = False,\n owner_instance: Optional[\"Instance\"] = None,\n **kwargs\n ):\n \"\"\"\n I/O ports must have name and width well-defined by designers.\n \"\"\"\n if name is None:\n raise ValueError(\"Input name is not set\")\n if width == 0:\n raise ValueError(\"Input width is not set\")\n\n super().__init__(name=name, width=width, signed=signed, **kwargs)\n self._config.signal_type = SignalType.INPUT\n self._config.owner_instance = owner_instance\n\n @property\n def net_name(self) -> str:\n \"\"\"\n Net Name of I/O must be the same with the name, even they are within an IOBundle\n \"\"\"\n return self.name\n\n def elaborate(self) -> str:\n \"\"\"\n Elaborate the input signal in the module declaration.\n :return: input logic (signed) [...]PORT_NAME\n \"\"\"\n port_decl = self.signal_decl().rstrip(\";\")\n return f\"input {port_decl}\"\n\n def copy(self, owner_instance: Optional[\"Instance\"] = None) -> \"Input\":\n \"\"\"\n Copy the input signal. Driver is discarded.\n I/O port can only be assigned to an instance, not a SignalBundle / IOBundle.\n :return: A new input signal with the same configuration.\n \"\"\"\n return Input(\n name=self.name,\n width=len(self),\n signed=self.signed,\n description=self.description,\n owner_instance=owner_instance,\n )"
},
{
"identifier": "Output",
"path": "magia/core.py",
"snippet": "class Output(Signal):\n \"\"\"\n Representing an output signal.\n They are the starting points when we elaborate the module.\n It is used by both the module declaration and the module instance.\n \"\"\"\n\n def __init__(\n self,\n name: str, width: int, signed: bool = False,\n owner_instance: Optional[\"Instance\"] = None,\n **kwargs\n ):\n \"\"\"\n I/O ports must have name and width well-defined by designers.\n \"\"\"\n if name is None:\n raise ValueError(\"Output name is not set\")\n if width == 0:\n raise ValueError(\"Output width is not set\")\n super().__init__(name=name, width=width, signed=signed, **kwargs)\n self._config.signal_type = SignalType.OUTPUT\n self._config.owner_instance = owner_instance\n\n @property\n def net_name(self) -> str:\n \"\"\"\n Net Name of I/O must be the same with the name, even they are within an IOBundle\n \"\"\"\n return self.name\n\n def elaborate(self) -> str:\n \"\"\"\n Elaborate the output signal in the module declaration.\n :return: output logic (signed) [...]PORT_NAME\n \"\"\"\n port_decl = self.signal_decl().rstrip(\";\")\n return f\"output {port_decl}\"\n\n def copy(self, owner_instance: Optional[\"Instance\"] = None, **kwargs) -> \"Output\":\n \"\"\"\n Copy the output signal. Driver is discarded.\n I/O port can only be assigned to an instance, not a SignalBundle / IOBundle.\n :return: A new output signal with the same configuration.\n \"\"\"\n return Output(\n name=self.name,\n width=len(self),\n signed=self.signed,\n description=self.description,\n owner_instance=owner_instance,\n )"
},
{
"identifier": "Signal",
"path": "magia/core.py",
"snippet": "class Signal(Synthesizable):\n \"\"\"\n The general signal class. It has drivers, which is another signal.\n It can also drive other signals / module instances.\n \"\"\"\n SINGLE_DRIVER_NAME: str = \"d\"\n _SIGNAL_DECL_TEMPLATE = Template(\"logic $signed $width $name;\")\n _SIGNAL_CONNECT_TEMPLATE = Template(\"always_comb\\n $name = $driver;\")\n _SIGNAL_ASSIGN_TEMPLATE = Template(\"assign $name = $driver;\")\n\n _new_signal_counter = count(0)\n\n def __init__(\n self,\n width: int = 0, signed: bool = False,\n name: Optional[str] = None,\n parent_bundle: Optional[\"SignalBundle\"] = None,\n description: Optional[str] = None,\n **kwargs\n ):\n if name is None:\n name = f\"net_{next(self._new_signal_counter)}\"\n\n super().__init__(**kwargs)\n self._config = SignalConfig(\n name=name,\n width=width,\n signed=signed,\n parent_bundle=parent_bundle,\n description=\"\" if description is None else description,\n )\n self._drivers = SignalDict()\n\n @property\n def net_name(self) -> str:\n \"\"\"\n Full name of a signal, used for elaboration.\n \"\"\"\n if self._config.parent_bundle is not None:\n if self._config.parent_bundle.name is not None:\n return f\"bundle_{self._config.parent_bundle.name}_{self.name}\"\n return f\"bundle_{id(self._config.parent_bundle)}_{self.name}\"\n return self.name\n\n @property\n def name(self) -> str:\n \"\"\"\n Short name of the signal, is used to identify the signal in a bundle / SignalDict\n \"\"\"\n return self._config.name\n\n @property\n def description(self) -> str:\n \"\"\"\n Description of the signal\n \"\"\"\n return self._config.description\n\n @property\n def type(self) -> SignalType:\n return self._config.signal_type\n\n @property\n def signed(self) -> bool:\n return self._config.signed\n\n def driver(self, driver_name: str = SINGLE_DRIVER_NAME) -> Optional[\"Signal\"]:\n \"\"\"\n Get the driver of the signal.\n :param driver_name: The name of the driver. Default to the single driver.\n :return: The driver signal.\n \"\"\"\n return self._drivers.get(driver_name)\n\n @property\n def drivers(self) -> list[\"Signal\"]:\n \"\"\"\n Get the drivers of the signal.\n :return: The driver signals.\n \"\"\"\n return list(self._drivers.values())\n\n @property\n def owner_instance(self) -> Optional[\"Instance\"]:\n \"\"\"\n Get the module instance that owns this signal.\n It is applicable to input / output signals only.\n \"\"\"\n return self._config.owner_instance\n\n def set_width(self, width: int):\n self._config.width = width\n return self\n\n def set_signed(self, signed: bool):\n self._config.signed = signed\n return self\n\n def set_name(self, name: str):\n self._config.name = name\n return self\n\n def with_signed(self, signed: bool) -> \"Signal\":\n \"\"\"\n Create a new signal with the same configuration, but with a different signedness.\n Connect the original signal to the new signal.\n\n New Signal is not added to the parent bundle.\n\n :return: A new signal with the same configuration.\n \"\"\"\n signal = Signal(\n width=len(self),\n signed=signed,\n parent_bundle=None,\n )\n signal <<= self\n return signal\n\n def with_width(self, width: int) -> \"Signal\":\n \"\"\"\n Create a new signal with the same configuration, but with a different width.\n Connect the original signal to the new signal.\n\n New Signal is not added to the parent bundle.\n\n :return: A new signal with the new configuration.\n \"\"\"\n if width == len(self):\n signal = Signal(\n width=width,\n signed=self.signed,\n parent_bundle=None,\n )\n signal <<= self\n return signal\n if width < len(self):\n return self[width - 1:]\n\n # Perform sign extension / padding according to the signedness of the signal\n padding_size = (width - len(self))\n if self.signed:\n return self[(-1,) * padding_size, :]\n return Constant(0, padding_size) @ self\n\n def signal_decl(self) -> str:\n \"\"\"\n Declare the signal in the module implementation.\n :return: logic (signed) [...]SIGNAL_NAME\n \"\"\"\n if self.net_name is None:\n raise ValueError(\"Signal name is not set\")\n if len(self) == 0:\n raise ValueError(\"Signal width is not set and cannot be inferred\")\n\n return self._SIGNAL_DECL_TEMPLATE.substitute(\n signed=\"signed\" if self.signed else \"\",\n width=f\"[{width - 1}:0]\" if (width := len(self)) > 1 else \"\",\n name=self.net_name,\n )\n\n def elaborate(self) -> str:\n signal_decl = self.signal_decl()\n\n # Ignore assignment signal if it is driven by an output of a module instance\n if self.driver().type != SignalType.OUTPUT:\n assignment = self._SIGNAL_ASSIGN_TEMPLATE.substitute(\n name=self.net_name,\n driver=self.driver().net_name,\n )\n return \"\\n\".join((signal_decl, assignment))\n return signal_decl\n\n def copy(self, parent_bundle: Optional[\"SignalBundle\"] = None, **kwargs) -> \"Signal\":\n \"\"\"\n Copy the signal. Driver is discarded.\n Signal can only be copied to a SignalBundle, not an IOBundle.\n :return: A new signal with the same configuration.\n \"\"\"\n return Signal(\n name=self.name,\n width=len(self),\n signed=self.signed,\n parent_bundle=parent_bundle,\n )\n\n def __ilshift__(self, other):\n \"\"\"\n Connect the signal with the driver.\n :param other: Driving Signal\n :return: Original Signal\n \"\"\"\n if isinstance(other, (int, bytes)):\n other = Constant(other, len(self), self.signed)\n if not isinstance(other, Signal):\n raise TypeError(f\"Cannot assign {type(other)} to drive {type(self)}\")\n if self._drivers.get(self.SINGLE_DRIVER_NAME) is not None:\n raise ValueError(f\"Multiple driver on Signal {self.name}.\")\n if self.type == SignalType.OUTPUT and self.owner_instance is not None:\n raise ValueError(\"Cannot drive output of a module instance.\")\n if other.type == SignalType.INPUT and other.owner_instance is not None:\n raise ValueError(\"Input of a module instance cannot drive other signal.\")\n if self.type == SignalType.INPUT and self.owner_instance is None:\n raise ValueError(\"Cannot drive the Input of a module type.\")\n if other.type == SignalType.OUTPUT and other.owner_instance is None:\n raise ValueError(\"Output of a module type cannot drive other signal.\")\n if self.type == SignalType.CONSTANT:\n raise ValueError(\"Constant signal cannot be driven.\")\n\n self._drivers[self.SINGLE_DRIVER_NAME] = other\n if len(self) == 0:\n self.set_width(len(other))\n elif len(other) == 0:\n other.set_width(len(self))\n return self\n\n def __add__(self, other) -> \"Signal\":\n return Operation.create(OPType.ADD, self, other)\n\n def __iadd__(self, other) -> \"Signal\":\n return self.__add__(other)\n\n def __sub__(self, other) -> \"Signal\":\n return Operation.create(OPType.MINUS, self, other)\n\n def __isub__(self, other) -> \"Signal\":\n return self.__sub__(other)\n\n def __neg__(self) -> \"Signal\":\n return Operation.create(\n OPType.MINUS,\n Constant(0, len(self), self.signed),\n self\n )\n\n def __mul__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.MUL, self, other)\n\n def __imul__(self, other) -> \"Signal\":\n return self.__mul__(other)\n\n def __eq__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.EQ, self, other)\n\n def __ne__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.NEQ, self, other)\n\n def __ge__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.GE, self, other)\n\n def __gt__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.GT, self, other)\n\n def __le__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.LE, self, other)\n\n def __lt__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.LT, self, other)\n\n def __and__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.AND, self, other)\n\n def __iand__(self, other) -> \"Signal\":\n return self.__and__(other)\n\n def __or__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.OR, self, other)\n\n def __ior__(self, other) -> \"Signal\":\n return self.__or__(other)\n\n def __xor__(self, other) -> \"Signal\":\n if isinstance(other, int):\n other = Constant(other, len(self), self.signed)\n return Operation.create(OPType.XOR, self, other)\n\n def __ixor__(self, other) -> \"Signal\":\n return self.__xor__(other)\n\n def __invert__(self) -> \"Signal\":\n return Operation.create(OPType.NOT, self, None)\n\n def __cmp__(self, other) -> \"Signal\":\n raise NotImplementedError(\"Comparison Operator is not implemented.\")\n\n def __lshift__(self, other) -> \"Signal\":\n if isinstance(other, int):\n op = Operation.create(OPType.LSHIFT, self, other)\n op._op_config.shifting = other\n return op\n raise NotImplementedError(\"Only Constant Shift is not implemented.\")\n\n def __rshift__(self, other) -> \"Signal\":\n if isinstance(other, int):\n op = Operation.create(OPType.RSHIFT, self, other)\n op._op_config.shifting = other\n return op\n raise NotImplementedError(\"Only Constant Shift is not implemented.\")\n\n def __irshift__(self, other) -> \"Signal\":\n raise NotImplementedError(\"`>>=` Operator is not defined.\")\n\n def __getitem__(self, item) -> \"Signal\":\n \"\"\" The Slicing Operator \"\"\"\n # Return the concatenation of the sliced signals\n # If multiple slices are provided.\n if isinstance(item, Iterable):\n sliced = [self[i] for i in item]\n concat = None\n for s in sliced:\n if concat is None:\n concat = s\n else:\n concat @= s\n return concat\n\n if isinstance(item, int):\n item = slice(item, item, None)\n if item is Ellipsis:\n item = slice(None, None, None)\n\n if not isinstance(item, slice):\n raise TypeError(f\"Cannot perform operation on {type(item)}\")\n if item.step is not None:\n raise ValueError(\"Slice step is not implement.\")\n\n return Operation.create(OPType.SLICE, self, item)\n\n def __matmul__(self, other) -> \"Signal\":\n \"\"\"\n Special operation for the `@` operator, which is the concatenation operator.\n \"\"\"\n if isinstance(other, Signal):\n return Operation.create(OPType.CONCAT, self, other)\n raise TypeError(f\"Cannot perform operation on {type(other)}\")\n\n def __imatmul__(self, other) -> \"Signal\":\n return self.__matmul__(other)\n\n def __len__(self):\n return self.width\n\n @property\n def width(self):\n return self._config.width\n\n def reg(\n self,\n clk: Optional[\"Input\"] = None,\n enable: Optional[\"Signal\"] = None,\n reset: Optional[\"Signal\"] = None,\n async_reset: Optional[\"Signal\"] = None,\n reset_value: Optional[Union[bytes, int]] = None,\n async_reset_value: Optional[Union[bytes, int]] = None,\n name: Optional[str] = None,\n ) -> \"Register\":\n \"\"\"\n Create a register from the signal.\n \"\"\"\n register = Register(\n width=len(self),\n enable=enable,\n reset=reset,\n async_reset=async_reset,\n reset_value=reset_value,\n async_reset_value=async_reset_value,\n clk=clk,\n signed=self.signed,\n name=name,\n )\n register <<= self\n return register\n\n def when(\n self,\n condition: \"Signal\",\n else_: Optional[\"Signal\"] = None,\n ) -> \"When\":\n \"\"\"\n Create a `Self if Condition else Else_` statement, similar to the ternary operator in C / Python.\n E.g. `gated = data.when(enable)`, `default_2 = data.when(enable, 2)`\n \"\"\"\n if else_ is None:\n else_ = 0\n return When(\n condition=condition,\n if_true=self,\n if_false=else_,\n )\n\n def case(self, cases: dict[int, Union[\"Signal\", int]], default: Optional[Union[\"Signal\", int]] = None, ) -> \"Case\":\n \"\"\"\n Create a `case` statement.\n \"\"\"\n return Case(\n selector=self,\n cases=cases,\n default=default,\n )\n\n def any(self) -> \"Signal\":\n \"\"\"\n Create an `any` statement.\n \"\"\"\n return Operation.create(OPType.ANY, self, None)\n\n def all(self) -> \"Signal\":\n \"\"\"\n Create an `all` statement.\n \"\"\"\n return Operation.create(OPType.ALL, self, None)\n\n def parity(self) -> \"Signal\":\n \"\"\"\n Create an `parity` statement.\n \"\"\"\n return Operation.create(OPType.PARITY, self, None)"
},
{
"identifier": "Elaborator",
"path": "magia/module.py",
"snippet": "class Elaborator:\n \"\"\"\n Elaborator is a helper class to elaborate modules.\n \"\"\"\n name_to_module: dict[str, Module] = {}\n\n def __init__(self):\n raise NotImplementedError(\"Elaborator is a helper class and should not be instantiated.\")\n\n @classmethod\n def to_dict(cls, *modules: Module) -> dict[str, str]:\n \"\"\"\n Elaborate all modules in the list.\n Each module will be elaborated only once and return the SystemVerilog code, plus a list of submodules\n Duplicated submodules will not be elaborated again.\n The elaboration is done recursively, until all submodules are elaborated.\n\n :param modules: The modules to be elaborated.\n :return: A dictionary of the SystemVerilog code for each module.\n \"\"\"\n cls.name_to_module = {}\n modules = list(modules)\n elaborated_modules: dict[str, str] = {}\n while modules:\n mod = modules.pop()\n cls.name_to_module[mod.name] = mod\n if mod.name not in elaborated_modules:\n sv_code, submodules = mod.elaborate()\n elaborated_modules[mod.name] = sv_code\n modules += submodules\n return elaborated_modules\n\n @classmethod\n def to_string(cls, *modules: Module) -> str:\n \"\"\"\n Elaborate all modules in the list and return the SystemVerilog code as a string.\n \"\"\"\n return \"\\n\\n\".join(cls.to_dict(*modules).values())\n\n @classmethod\n def to_file(cls, filename: PathLike, *modules: Module):\n \"\"\"\n Elaborate all modules in the list and write the SystemVerilog code to a file.\n \"\"\"\n sv_code = cls.to_string(*modules)\n Path(filename).write_text(sv_code)\n\n @classmethod\n def to_files(cls, output_dir: PathLike, /, *modules: Module, force: bool = False) -> list[Path]:\n \"\"\"\n Elaborate all modules in the list and write the SystemVerilog code to files.\n The files are written to the output directory.\n\n :param output_dir: The output directory.\n :param modules: The modules to be elaborated.\n :param force: If True, files in the output directory will be overwritten if it exists.\n\n :return: A list of Path objects of the files written.\n \"\"\"\n output_dir = Path(output_dir)\n if not force and output_dir.is_dir() and any(output_dir.iterdir()):\n raise FileExistsError(f\"Directory {output_dir} already exists and not empty.\")\n output_dir.mkdir(parents=True, exist_ok=True)\n\n result = cls.to_dict(*modules)\n result_by_file: dict[str, list[str]] = {}\n\n # Categorize by output file\n for fname, sv_code in result.items():\n module = cls.name_to_module[fname]\n output_file = f\"{fname}.sv\" if module.output_file is None else module.output_file\n\n if output_file not in result_by_file:\n result_by_file[output_file] = []\n result_by_file[output_file].append(sv_code)\n\n # Write to files\n for fname, sv_codes in result_by_file.items():\n sv_code = \"\\n\\n\".join(sv_codes)\n Path(output_dir, fname).write_text(sv_code)\n\n return [Path(output_dir, fname) for fname in result_by_file]\n\n @staticmethod\n def file(fname: PathLike):\n \"\"\"\n Return a class decorator to register the filename of generated code to a Module.\n The effect is employed by the `to_files` method only.\n\n Example:\n @Elaborator.file(\"adder.sv\")\n class Adder(Module):\n ...\n\n Elaborator.to_files(\"/tmp/output\", Adder(name=\"adder1\"), Adder(name=\"adder2\"))\n # Result in /tmp/output/adder.sv with 2 adders.\n \"\"\"\n fname = Path(fname)\n\n def decorator(cls: type[Module]):\n cls.output_file = fname\n return cls\n\n return decorator"
},
{
"identifier": "Module",
"path": "magia/module.py",
"snippet": "class Module(Synthesizable):\n \"\"\"\n A module is a collection of signals and operations. It can also include other modules.\n The module is the base class of specialized modules.\n Developers can define the generic behavior of the module in a dynamic way,\n while each `Module` objects is a specialized module initialized with specific parameters.\n\n The SystemVerilog Keyword `parameters` is not used here.\n It is because we can generate the code for the specialized module with parametrized values hard-coded.\n\n The module can be instantiated with the `instance` method.\n\n Designers shall implement the circuit logic in the `__init__` method.\n However, we highly recommend designers to extract the logic implementation into a seperated method.\n e.g.\n def __init__(self, **kwargs):\n self.io += Input(\"a\", 8)\n self.io += Output(\"q\", 8)\n self.implement()\n\n def implement(self):\n self.io.q <<= self.io.a + 1\n \"\"\"\n _MOD_DECL_TEMPLATE = Template(\"module $name (\\n$io\\n);\")\n _new_module_counter = count(0)\n output_file: Optional[PathLike] = None\n\n def __init__(self, name: Optional[str] = None, **kwargs):\n super().__init__(**kwargs)\n\n # Get the arguments passed to the __init__ method of the inherited class\n # === DON'T REFACTOR BELOW. We are inspecting the stack and refactoring will affect the result ===\n children_local = inspect.stack(0)[1].frame.f_locals\n children_class = children_local.get(\"__class__\")\n func_signature = inspect.signature(children_class.__init__) if children_class else {}\n self._mod_params = OrderedDict(**{\n arg: children_local[arg]\n for arg, param in func_signature.parameters.items()\n if param.kind not in (param.VAR_KEYWORD, param.VAR_POSITIONAL) and arg != \"self\"\n })\n # === DON'T REFACTOR ABOVE ===\n\n if name is None:\n name = f\"{self.__class__.__name__}_{next(self._new_module_counter)}\"\n\n self._config = ModuleConfig(\n module_class=type(self),\n name=name,\n )\n self.io = IOBundle()\n\n def validate(self) -> list[Exception]:\n undriven_outputs = [\n output.net_name\n for output in self.io.outputs\n if output.driver() is None\n ]\n if undriven_outputs:\n return [\n ValueError(\"Output not driven\", output)\n for output in undriven_outputs\n ]\n return []\n\n def mod_declaration(self) -> str:\n mod_decl = self._MOD_DECL_TEMPLATE.substitute(\n name=self.name,\n io=\",\\n\".join(\n port.elaborate()\n for port in self.io.inputs + self.io.outputs\n ),\n )\n return \"\\n\".join((mod_decl, self._module_elab_doc))\n\n def elaborate(self) -> tuple[str, set[\"Module\"]]:\n \"\"\"\n Trace nets and operations from output ports\n This method generates the SystemVerilog code for the module.\n\n :return: The SystemVerilog code for the module, and the list of submodules of the instance in the module.\n \"\"\"\n violations = self.validate()\n if violations:\n raise ValueError(f\"Module {self.name} is not valid.\", violations)\n\n mod_decl = self.mod_declaration()\n\n signals, insts = self.trace()\n\n mod_impl = [\n inst.elaborate()\n for inst in insts\n ]\n mod_impl += [\n signal.elaborate()\n for signal in signals\n ]\n\n mod_impl = \"\\n\".join(mod_impl)\n\n mod_output_assignment = \"\\n\".join(\n Signal._SIGNAL_ASSIGN_TEMPLATE.substitute(\n name=output.net_name,\n driver=output.driver().net_name,\n )\n for output in self.io.outputs\n )\n\n extra_code = self.post_elaborate()\n\n mod_end = \"endmodule\"\n\n sv_code = \"\\n\".join((mod_decl, mod_impl, mod_output_assignment, extra_code, mod_end))\n submodules = {inst.module for inst in insts}\n\n return sv_code, submodules\n\n def post_elaborate(self) -> str:\n \"\"\"\n Override this method to add extra code to the module.\n The code will be added after the elaboration of the module.\n\n Adding assertions to the module is a typical use case.\n\n :return: The extra code to be added to the module.\n \"\"\"\n _ = self # Stub to avoid IDE/Lint warning\n return \"\"\n\n def trace(self) -> tuple[list[Union[Signal, Memory]], list[\"Instance\"]]:\n \"\"\"\n Trace nets and instances from output ports\n \"\"\"\n traced_sig_id: set[int] = set()\n traced_inst_id: set[int] = set()\n traced_signal: list[Union[Signal, Memory]] = []\n traced_inst: list[Instance] = []\n sig_to_be_traced: dict[int, Signal] = {}\n\n for output in self.io.outputs:\n sig_to_be_traced |= {\n id(sig): sig\n for sig in output.drivers\n }\n while sig_to_be_traced:\n next_trace = {}\n for signal_id, signal in sig_to_be_traced.items():\n\n # Tracing Instances with Output connected\n if signal.type == SignalType.OUTPUT:\n inst: Optional[Instance] = signal.owner_instance\n if inst is not None and id(inst) not in traced_inst_id:\n traced_inst_id.add(id(inst))\n traced_inst.append(inst)\n\n # The Input port of the instance is skipped\n # We will go directly to the driver as it must be driven by another signal.\n input_drivers = [i.driver() for i in inst.inputs.values()]\n next_trace |= {\n id_sig: sig\n for sig in input_drivers\n if (id_sig := id(sig)) not in traced_sig_id\n }\n elif signal.type != SignalType.INPUT and signal_id not in traced_sig_id:\n traced_sig_id.add(signal_id)\n traced_signal.append(signal)\n\n next_trace |= {\n id_sig: sig\n for sig in signal.drivers\n if sig.type not in (SignalType.INPUT,)\n and (id_sig := id(sig)) not in traced_sig_id\n }\n\n if signal.type == SignalType.MEMORY:\n signal: MemorySignal\n if id(signal.memory) not in traced_sig_id:\n traced_sig_id.add(id(signal.memory))\n traced_signal.append(signal.memory)\n\n next_trace |= {\n id_sig: sig\n for sig in signal.memory.drivers\n if (id_sig := id(sig)) not in traced_sig_id\n }\n\n sig_to_be_traced = next_trace\n\n traced_signal.reverse()\n traced_inst.reverse()\n\n # Check if we have name conflict on the signals and instances\n sig_name_counter = Counter(sig.net_name for sig in traced_signal)\n inst_name_counter = Counter(inst.name for inst in traced_inst)\n sig_conflicts = [name for name, cnt in sig_name_counter.items() if cnt > 1]\n inst_conflicts = [name for name, cnt in inst_name_counter.items() if cnt > 1]\n if sig_conflicts:\n raise ValueError(f\"Signal name conflict: {sig_conflicts}\")\n if inst_conflicts:\n raise ValueError(f\"Instance name conflict: {inst_conflicts}\")\n\n return traced_signal, traced_inst\n\n def instance(\n self, name: Optional[str] = None,\n io: Optional[dict[str, Signal]] = None\n ) -> \"Instance\":\n \"\"\"\n Create an instance of the module\n :return: The created instance\n \"\"\"\n return Instance(\n module=self,\n name=name,\n io=io,\n )\n\n @property\n def name(self) -> str:\n return self._config.name\n\n @property\n def params(self) -> dict[str, object]:\n \"\"\"\n Return the parameters used to specialize this module.\n \"\"\"\n return self._mod_params\n\n @property\n def _module_elab_doc(self) -> str:\n \"\"\"\n Generate the summary of a module and register it to the module.\n It will be written into the SystemVerilog code during elaboration.\n \"\"\"\n doc = self._module_doc_str\n\n if self.params:\n doc += \"\\nModule Parameters:\\n\"\n doc += \"-----------------\\n\"\n doc += \"\\n\".join(\n f\"{k}: {v}\"\n for k, v in self.params.items()\n ) + \"\\n\"\n\n if doc:\n doc = f\"/*\\n{doc}*/\\n\"\n return doc\n\n @property\n def _module_doc_str(self) -> str:\n doc = inspect.getdoc(self.__class__)\n if doc is None or doc == inspect.getdoc(Module):\n return \"\"\n if not doc.endswith(\"\\n\"):\n return doc + \"\\n\"\n return doc\n\n @cached_property\n def _module_init_param_doc(self) -> dict[str, str]:\n params = [(k, f\"{k}:\") for k in self._mod_params]\n doc = inspect.getdoc(self.__init__)\n if doc is None:\n return []\n\n result_doc = {}\n possible_param = [line.strip() for line in doc.split(\"\\n\") if \":\" in line]\n for line in possible_param:\n for param, sep in params:\n if sep in line:\n result_doc[param] = line.split(sep, 1)[-1].strip()\n return result_doc\n\n @property\n def spec(self) -> dict[str, object]:\n \"\"\"\n Return the \"Specification\" of a specialized Module.\n It is a dictionary which can be further processed.\n \"\"\"\n return {\n \"name\": self.name,\n \"description\": self._module_doc_str.strip(),\n \"parameters\": [\n {\n \"name\": k,\n \"value\": v,\n \"description\": self._module_init_param_doc.get(k, \"\"),\n }\n for k, v in self.params.items()\n ],\n \"ports\": [\n {\n \"name\": alias,\n \"direction\": signal.type.name,\n \"width\": len(signal),\n \"signed\": signal.signed,\n \"description\": signal.description,\n }\n for alias, signal in self.io.signals.items()\n ],\n }"
}
] | import random
import cocotb
import cocotb.clock
import tests.helper as helper
from pathlib import Path
from cocotb_test.simulator import run as sim_run
from magia import Elaborator, Input, Module, Output, Signal | 8,332 |
@cocotb.test()
async def adder_test(dut):
for _ in range(50):
a = random.randint(0, 0xF)
b = random.randint(0, 0xF)
dut.a.value = a
dut.b.value = b
await cocotb.clock.Timer(1, units="ns")
assert dut.q.value == (a + b)
class TestSignalManipulate:
TOP = "TopLevel"
def test_naming(self):
"""
Specifying a name for a signal should be reflected in the code generated
"""
|
@cocotb.test()
async def adder_test(dut):
for _ in range(50):
a = random.randint(0, 0xF)
b = random.randint(0, 0xF)
dut.a.value = a
dut.b.value = b
await cocotb.clock.Timer(1, units="ns")
assert dut.q.value == (a + b)
class TestSignalManipulate:
TOP = "TopLevel"
def test_naming(self):
"""
Specifying a name for a signal should be reflected in the code generated
"""
| class Top(Module): | 4 | 2023-12-12 22:50:43+00:00 | 12k |
batmanlab/DrasCLR | train.py | [
{
"identifier": "Encoder",
"path": "models/cnn3d.py",
"snippet": "class Encoder(nn.Module):\n\n def __init__(self, rep_dim, moco_dim, num_experts, num_coordinates):\n super(Encoder, self).__init__()\n self.rep_dim = rep_dim\n self.moco_dim = moco_dim\n self.num_experts = num_experts\n self.num_coordinates = num_coordinates\n self.conv1 = Conv3d(1, 8, kernel_size=3, stride=1, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn1 = nn.BatchNorm3d(8)\n self.act = nn.ELU()\n self.conv2 = Conv3d(8, 8, kernel_size=3, stride=2, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn2 = nn.BatchNorm3d(8)\n self.downsample1 = Block(8, 16, self.num_experts, self.num_coordinates)\n self.downsample2 = Block(16, 32, self.num_experts, self.num_coordinates)\n self.downsample3 = Block(32, 64, self.num_experts, self.num_coordinates)\n self.conv3 = Conv3d(64, 128, kernel_size=3, stride=1, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn3 = nn.BatchNorm3d(128)\n self.conv4 = Conv3d(128, rep_dim, kernel_size=3, stride=2, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn4 = nn.BatchNorm3d(rep_dim)\n self.fc = nn.Linear(rep_dim, moco_dim)\n\n def forward(self, x, loc):\n x = self.conv1(x, loc)\n x = self.bn1(x)\n x = self.act(x)\n x = self.conv2(x, loc)\n x = self.bn2(x)\n x = self.act(x)\n x = self.downsample1(x, loc)\n x = self.downsample2(x, loc)\n x = self.downsample3(x, loc)\n x = self.conv3(x, loc)\n x = self.bn3(x)\n x = self.act(x)\n x = self.conv4(x, loc)\n x = self.bn4(x)\n x = self.act(x)\n h = torch.flatten(x, 1)\n z = self.fc(h)\n return z, h"
},
{
"identifier": "DrasCLR",
"path": "models/builder.py",
"snippet": "class DrasCLR(nn.Module):\n\n def __init__(self, base_encoder, num_patch, rep_dim, moco_dim, num_experts, num_coordinates, K, m, T, mlp):\n \"\"\"\n dim: feature dimension (default: 128)\n K: queue size; number of negative keys (default: 65536)\n m: moco momentum of updating key encoder (default: 0.999)\n T: softmax temperature (default: 0.07)\n \"\"\"\n super(DrasCLR, self).__init__()\n\n self.K = K\n self.m = m\n self.T = T\n self.num_locs = num_patch # add the new dimension of number of locations\n\n # create the encoders\n # num_classes is the output fc dimension\n self.encoder_q = base_encoder(rep_dim=rep_dim, moco_dim=moco_dim, num_experts=num_experts, num_coordinates=num_coordinates)\n self.encoder_k = base_encoder(rep_dim=rep_dim, moco_dim=moco_dim, num_experts=num_experts, num_coordinates=num_coordinates)\n\n if mlp: # hack: brute-force replacement\n dim_mlp = self.encoder_q.fc.weight.shape[1]\n self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)\n self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)\n\n for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n param_k.data.copy_(param_q.data) # initialize\n param_k.requires_grad = False # not update by gradient\n\n # create the queue\n self.register_buffer(\"queue\", torch.randn(moco_dim, K, self.num_locs)) # the queue should be the size of (dim of reps) * (number of negative pairs) * (number of total locations)\n self.queue = nn.functional.normalize(self.queue, dim=0) # normalize patch representation\n self.register_buffer(\"queue_ptr\", torch.zeros(self.num_locs, dtype=torch.long)) # set pointer in buffer to 1 for each path location\n\n @torch.no_grad()\n def _momentum_update_key_encoder(self):\n \"\"\"\n Momentum update of the key encoder\n \"\"\"\n for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)\n\n @torch.no_grad()\n def _dequeue_and_enqueue(self, keys, patch_idx):\n # gather keys before updating queue\n keys = concat_all_gather(keys)\n\n batch_size = keys.shape[0]\n\n ptr = self.queue_ptr\n assert self.K % batch_size == 0 # for simplicity\n\n # replace the keys at ptr (dequeue and enqueue)\n self.queue[:, ptr[patch_idx]:ptr[patch_idx] + batch_size, patch_idx] = keys.T\n ptr[patch_idx] = (ptr[patch_idx] + batch_size) % self.K # move pointer\n\n self.queue_ptr = ptr\n\n @torch.no_grad()\n def _batch_shuffle_ddp(self, x):\n \"\"\"\n Batch shuffle, for making use of BatchNorm.\n *** Only support DistributedDataParallel (DDP) model. ***\n \"\"\"\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n\n num_gpus = batch_size_all // batch_size_this\n\n # random shuffle index\n idx_shuffle = torch.randperm(batch_size_all).cuda()\n\n # broadcast to all gpus\n torch.distributed.broadcast(idx_shuffle, src=0)\n\n # index for restoring\n idx_unshuffle = torch.argsort(idx_shuffle)\n\n # shuffled index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this], idx_unshuffle\n\n @torch.no_grad()\n def _batch_unshuffle_ddp(self, x, idx_unshuffle):\n \"\"\"\n Undo batch shuffle.\n *** Only support DistributedDataParallel (DDP) model. ***\n \"\"\"\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n\n num_gpus = batch_size_all // batch_size_this\n\n # restored index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this]\n\n def forward(self, patch_idx, pch_q, pch_k, ngb_q):\n \"\"\"\n Input:\n im_q: a batch of query images\n im_k: a batch of key images\n Output:\n logits, targets\n \"\"\"\n # compute query patch features\n q, h_q = self.encoder_q(pch_q[0], pch_q[1]) # queries: NxC, encoder needs to take both pathces and their locations as inputs\n q = nn.functional.normalize(q, dim=1)\n\n # compute query neighbor features\n ngb_flatten = ngb_q[0].reshape(-1, 32, 32, 32)\n loc_flatten = ngb_q[1].reshape(-1, 3)\n r, h_r = self.encoder_q(ngb_flatten[:, None, :, :, :], loc_flatten)\n r = nn.functional.normalize(r, dim=1)\n r = r.reshape(ngb_q[0].shape[0], ngb_q[0].shape[1], -1) # queries: N * R * C, samples * k-neighbors * channels\n\n # compute key features\n with torch.no_grad(): # no gradient to keys\n self._momentum_update_key_encoder() # update the key encoder\n\n # shuffle for making use of BN\n pch_k[0], idx_unshuffle = self._batch_shuffle_ddp(pch_k[0])\n\n k, h_k = self.encoder_k(pch_k[0], pch_k[1]) # keys: N * C\n k = nn.functional.normalize(k, dim=1)\n\n # undo shuffle\n k = self._batch_unshuffle_ddp(k, idx_unshuffle)\n\n # patch InfoNCE logits\n # Einstein sum is more intuitive\n # positive logits: N * 1\n l_pos_pch = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)\n # negative logits: N * K\n negs = self.queue[:,:,patch_idx].clone().detach() # compute negative logits for each path in the batch conditioned on their locations\n l_neg_pch = torch.einsum('nc,ck->nk', [q, negs])\n # logits: N * (1+K)\n logits_pch = torch.cat([l_pos_pch, l_neg_pch], dim=1)\n # apply temperature\n logits_pch /= self.T\n\n # neighbor InfoNCE logits\n # positive logits: N * 1\n l_pos_ngb = torch.einsum('nrc, nc->n', [r, k]).unsqueeze(-1)\n # negative logits: N * K\n l_neg_ngb = torch.einsum('nrc, ck->nk', [r, negs])\n # logits: N * (1+K)\n logits_ngb = torch.cat([l_pos_ngb, l_neg_ngb], dim=1)\n # apply temperature\n logits_ngb /= self.T\n\n # labels: positive key indicators\n labels = torch.zeros(logits_pch.shape[0], dtype=torch.long).cuda()\n\n # dequeue and enqueue\n self._dequeue_and_enqueue(k, patch_idx) # consider location for each patch in the batch\n\n return logits_pch, logits_ngb, labels"
},
{
"identifier": "COPD_dataset",
"path": "data/copd_patch.py",
"snippet": "class COPD_dataset(Dataset):\n\n def __init__(self, stage, args, patch_transforms=default_transform, neighbor_transforms=default_transform):\n self.stage = stage\n self.args = args\n self.root_dir = args.root_dir\n self.metric_dict = dict() # initialize metric dictionary\n self.patch_transforms = patch_transforms\n self.neighbor_transforms = neighbor_transforms\n\n # atlas patch locations, our refernce file can be found at ./preprocess/misc/atlas_patch_loc.npy\n self.patch_loc = np.load(self.args.root_dir + \"19676E_INSP_STD_JHU_COPD_BSpline_Iso1_patch_loc.npy\")\n # pairwise distance\n self.dists = pairwise_distances(self.patch_loc, metric='euclidean')\n # normalize patch locations\n self.patch_loc = (self.patch_loc / self.patch_loc.max(0)) * 2 - 1 # normalize position to [-1, 1]\n\n self.patch_idx = 0\n self.patch_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.patch_idx)+\".npy\")\n # top k nearest patches\n self.k_neighbor_idx = np.argsort(self.dists[self.patch_idx,:])[1: (self.args.k_neighbors+1)]\n neighbor_lst = []\n for k in range(self.args.k_neighbors):\n neighbor_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.k_neighbor_idx[k])+\".npy\")\n neighbor_lst.append(neighbor_data[None, :, :, :, :]) # 1 * 9179 * 32 * 32 * 32\n self.neighbor_data = np.concatenate(neighbor_lst, axis=0)\n del neighbor_lst\n\n if stage == 'training':\n # Specific to COPDGene dataset, you can change depends on your needs\n FILE = open(DATA_DIR + \"phase1_Final_10K/phase 1 Pheno/Final10000_Phase1_Rev_28oct16.txt\", \"r\")\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.args.label_name]\n race_idx = mylist.index(\"race\")\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n tmp = [mylist[idx] for idx in metric_idx]\n if \"\" in tmp:\n continue\n if self.args.nhw_only and mylist[race_idx] != \"1\":\n continue\n metric_list = []\n for i in range(len(metric_idx)):\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]] = metric_list\n FILE.close()\n\n if stage == 'testing':\n # Specific to COPDGene dataset, you can change depends on your needs\n self.label_name = self.args.label_name + self.args.label_name_set2\n FILE = open(DATA_DIR + \"phase1_Final_10K/phase 1 Pheno/Final10000_Phase1_Rev_28oct16.txt\", \"r\")\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.label_name]\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n tmp = [mylist[idx] for idx in metric_idx]\n if \"\" in tmp[:3]:\n continue\n metric_list = []\n for i in range(len(metric_idx)):\n if tmp[i] == \"\":\n metric_list.append(-1024)\n else:\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]] = metric_list + [-1024, -1024, -1024]\n FILE = open(DATA_DIR + \"CT_scan_datasets/CT_visual_scoring/COPDGene_CT_Visual_20JUL17.txt\", \"r\")\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.args.visual_score]\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n if mylist[0] not in self.metric_dict:\n continue\n tmp = [mylist[idx] for idx in metric_idx]\n metric_list = []\n for i in range(len(metric_idx)):\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]][\n -len(self.args.visual_score) - len(self.args.P2_Pheno):-len(self.args.P2_Pheno)] = metric_list\n FILE.close()\n FILE = open(\n DATA_DIR + 'P1-P2 First 5K Long Data/Subject-flattened- one row per subject/First5000_P1P2_Pheno_Flat24sep16.txt',\n 'r')\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.args.P2_Pheno]\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n if mylist[0] not in self.metric_dict:\n continue\n tmp = [mylist[idx] for idx in metric_idx]\n metric_list = []\n for i in range(len(metric_idx)):\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]][-len(self.args.P2_Pheno):] = metric_list\n FILE.close()\n\n self.sid_list = []\n for item in glob.glob(self.args.root_dir+\"patch/\"+\"*_patch.npy\"):\n if item.split('/')[-1][:6] not in self.metric_dict:\n continue\n self.sid_list.append(item.split('/')[-1][:-10])\n self.sid_list.sort()\n assert len(self.sid_list) == self.patch_data.shape[0]\n\n print(\"Fold: full\")\n self.sid_list = np.asarray(self.sid_list)\n self.sid_list_len = len(self.sid_list)\n print(stage+\" dataset size:\", self.sid_list_len)\n\n def set_patch_idx(self, patch_idx):\n self.patch_idx = patch_idx\n self.patch_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.patch_idx)+\".npy\")\n # top k nearest patches\n self.k_neighbor_idx = np.argsort(self.dists[self.patch_idx,:])[1: (self.args.k_neighbors+1)]\n neighbor_lst = []\n for k in range(self.args.k_neighbors):\n neighbor_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.k_neighbor_idx[k])+\".npy\")\n neighbor_lst.append(neighbor_data[None, :, :, :, :]) # 1 * 9179 * 32 * 32 * 32\n self.neighbor_data = np.concatenate(neighbor_lst, axis=0)\n del neighbor_lst\n\n def __len__(self):\n if self.stage == 'training':\n return self.sid_list_len * self.args.num_patch\n if self.stage == 'testing':\n return self.sid_list_len\n\n def __getitem__(self, idx):\n\n if self.stage == 'training':\n idx = idx % self.sid_list_len\n\n # patch data\n pch = self.patch_data[idx, :, :, :]\n pch = np.clip(pch, -1024, 240) # clip input intensity to [-1024, 240]\n pch = pch + 1024.\n pch = self.patch_transforms(pch[None, :, :, :])\n pch[0] = pch[0]/632.-1 # Normalize to [-1,1], 632=(1024+240)/2\n pch[1] = pch[1]/632.-1 # Normalize to [-1,1], 632=(1024+240)/2\n # patch location\n patch_loc_idx = self.patch_loc[self.patch_idx, :]\n\n # neighbor data\n ngb = self.neighbor_data[:, idx, :, :, :]\n ngb = np.clip(ngb, -1024, 240) # clip input intensity to [-1024, 240]\n ngb = ngb + 1024.\n ngb = self.neighbor_transforms(ngb)\n ngb = ngb/632.-1 # Normalize to [-1,1], 632=(1024+240)/2\n # neighbor location\n neighor_loc_idx = self.patch_loc[self.k_neighbor_idx, :]\n\n # labels\n key = self.sid_list[idx][:6]\n label = np.asarray(self.metric_dict[key])\n return key, pch, patch_loc_idx, ngb, neighor_loc_idx, label\n\n if self.stage == 'testing':\n sid = self.sid_list[idx]\n\n # read the entire image including 581 patches\n img = np.load(self.root_dir + \"patch/\" + sid + \"_patch.npy\")\n img = np.clip(img, -1024, 240) # clip input intensity to [-1024, 240]\n img = img + 1024.\n img = img[:, None, :, :, :] / 632. - 1 # Normalize to [-1,1], 632=(1024+240)/2\n\n # patch locations for all 581 patches\n patch_loc_idx = self.patch_loc\n\n # study id\n key = self.sid_list[idx][:6]\n\n # labels\n label = np.asarray(self.metric_dict[key]) # extract sid from the first 6 letters\n\n return sid, img, patch_loc_idx, label"
}
] | import os
import argparse
import builtins
import math
import random
import shutil
import time
import warnings
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import models.loader as DrasCLR_Loader
from tensorboard_logger import configure, log_value
from models.cnn3d import Encoder
from models.builder import DrasCLR
from data.copd_patch import COPD_dataset
from monai.transforms import Compose, RandGaussianNoise, RandAffine, Rand3DElastic, RandAdjustContrast | 7,395 | # define and create the experiment directory
exp_dir = os.path.join('./ssl_exp', args.exp_name)
if not os.path.isdir(exp_dir):
os.makedirs(exp_dir, exist_ok=True)
# save configurations to a dictionary
with open(os.path.join(exp_dir, 'configs.json'), 'w') as f:
json.dump(vars(args), f, indent=2)
f.close()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
print("Distributed:", args.distributed)
#ngpus_per_node = torch.cuda.device_count()
ngpus_per_node = args.npgus_per_node
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.rank == 0:
configure(os.path.join('./ssl_exp', args.exp_name))
# create patch-level encoder
model = DrasCLR(
Encoder,
args.num_patch, args.rep_dim, args.moco_dim, args.num_experts, \
args.num_coordinates, args.moco_k, args.moco_m, args.moco_t, args.mlp)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[args.gpu])
else:
raise NotImplementedError("GPU number is unknown.")
else:
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
checkpoint = os.path.join('./ssl_exp', args.exp_name, args.resume)
if os.path.isfile(checkpoint):
print("=> loading checkpoint '{}'".format(checkpoint))
if args.gpu is None:
checkpoint = torch.load(checkpoint)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(checkpoint, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(checkpoint))
exit()
# define augmentation
train_transform = define_augmentation(args, use_cuda=False)
|
parser = argparse.ArgumentParser(description='3D CT Images Self-Supervised Training Patch-level')
parser.add_argument('--arch', metavar='ARCH', default='custom')
parser.add_argument('--workers', default=0, type=int, metavar='N',
help='patch-level number of data loading workers (default: 0)')
parser.add_argument('--epochs', default=20, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch-size', default=64, type=int,
metavar='N',
help='patch-level mini-batch size (default: 32), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--schedule', default=[120, 160], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
parser.add_argument('--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest patch-level checkpoint (default: None)')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:10000', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=0, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_false',
help='use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--npgus-per-node', default=2, type=int,
help='number of gpus per node.')
# image data configs:
parser.add_argument('--stage', default='training', type=str,
help='stage: training or testing')
parser.add_argument('--num-patch', default=581, type=int,
help='total number of patches in the atlas image.')
parser.add_argument('--root-dir', default='/ocean/projects/asc170022p/lisun/copd/gnn_shared/data/patch_data_32_6_reg_mask/',
help='root directory of registered images in COPD dataset')
parser.add_argument('--label-name', default=["FEV1pp_utah", "FEV1_FVC_utah", "finalGold"], nargs='+',
help='phenotype label names')
parser.add_argument('--label-name-set2', default=["Exacerbation_Frequency", "MMRCDyspneaScor"], nargs='+',
help='phenotype label names')
parser.add_argument('--visual-score', default=["Emph_Severity", "Emph_Paraseptal"], nargs='+',
help='phenotype label names')
parser.add_argument('--P2-Pheno', default=["Exacerbation_Frequency_P2"], nargs='+',
help='phenotype label names')
parser.add_argument('--nhw-only', action='store_true',
help='only include white people')
parser.add_argument('--fold', default=0, type=int,
help='fold index of cross validation')
# MoCo specific configs:
parser.add_argument('--rep-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-k', default=4096, type=int,
help='queue size; number of negative keys (default: 4098)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--moco-t', default=0.2, type=float,
help='softmax temperature (default: 0.2)')
# options for moco v2
parser.add_argument('--mlp', action='store_false',
help='use mlp head')
parser.add_argument('--cos', action='store_false',
help='use cosine lr schedule')
# experiment configs
parser.add_argument('--adj-thres', default=0.18, type=float,
help='patch adjacent threshold (default: 0.18)')
parser.add_argument('--k-neighbors', default=2, type=int,
help='top k nearest neighbors of the anchor patch in the atlas image.')
parser.add_argument('--beta', default=1.0, type=float,
help='scaling factor of neighbor InfoNCE loss. (default: 1.0)')
parser.add_argument('--warm-up', default=0, type=int,
help='number of warm-up epochs before training neighbor contrastive loss.')
parser.add_argument('--num-experts', default=8, type=int,
help='number of experts in CondConv layer.')
parser.add_argument('--num-coordinates', default=1, type=int,
help='number of input coordinates.')
parser.add_argument('--augmentation', default='agc',
help='initials of augmentation including: (f)lip, (a)ffine, (e)lastic, (g)uassian, (c)ontrast.')
parser.add_argument('--exp-name', default='debug_patch', type=str,
help='experiment name')
def main():
# read configurations
args = parser.parse_args()
# define and create the experiment directory
exp_dir = os.path.join('./ssl_exp', args.exp_name)
if not os.path.isdir(exp_dir):
os.makedirs(exp_dir, exist_ok=True)
# save configurations to a dictionary
with open(os.path.join(exp_dir, 'configs.json'), 'w') as f:
json.dump(vars(args), f, indent=2)
f.close()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
print("Distributed:", args.distributed)
#ngpus_per_node = torch.cuda.device_count()
ngpus_per_node = args.npgus_per_node
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.rank == 0:
configure(os.path.join('./ssl_exp', args.exp_name))
# create patch-level encoder
model = DrasCLR(
Encoder,
args.num_patch, args.rep_dim, args.moco_dim, args.num_experts, \
args.num_coordinates, args.moco_k, args.moco_m, args.moco_t, args.mlp)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[args.gpu])
else:
raise NotImplementedError("GPU number is unknown.")
else:
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
checkpoint = os.path.join('./ssl_exp', args.exp_name, args.resume)
if os.path.isfile(checkpoint):
print("=> loading checkpoint '{}'".format(checkpoint))
if args.gpu is None:
checkpoint = torch.load(checkpoint)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(checkpoint, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(checkpoint))
exit()
# define augmentation
train_transform = define_augmentation(args, use_cuda=False)
| train_dataset = COPD_dataset('training', args, DrasCLR_Loader.TwoCropsTransform(train_transform), train_transform) | 2 | 2023-12-09 02:33:53+00:00 | 12k |
CHDers/Traffic-Flow-Prediction-with-Graph-Neural-Networks | traffic_prediction.py | [
{
"identifier": "LoadData",
"path": "traffic_dataset.py",
"snippet": "class LoadData(Dataset): # 这个就是把读入的数据处理成模型需要的训练数据和测试数据,一个一个样本能读取出来\n def __init__(self, data_path, num_nodes, divide_days, time_interval, history_length, train_mode):\n \"\"\"\n :param data_path: list, [\"graph file name\" , \"flow data file name\"], path to save the data file names.\n :param num_nodes: int, number of nodes.\n :param divide_days: list, [ days of train data, days of test data], list to divide the original data.\n :param time_interval: int, time interval between two traffic data records (mins).---5 mins\n :param history_length: int, length of history data to be used.\n :param train_mode: list, [\"train\", \"test\"].\n \"\"\"\n\n self.data_path = data_path\n self.num_nodes = num_nodes\n self.train_mode = train_mode\n self.train_days = divide_days[0] # 59-14 = 45, train_data\n self.test_days = divide_days[1] # 7*2 = 14 ,test_data\n self.history_length = history_length # 30/5 = 6, 历史长度为6\n self.time_interval = time_interval # 5 min\n\n self.one_day_length = int(24 * 60 / self.time_interval) # 一整天的数据量\n\n self.graph = get_adjacent_matrix(distance_file=data_path[0], num_nodes=num_nodes)\n\n self.flow_norm, self.flow_data = self.pre_process_data(data=get_flow_data(data_path[1]),\n norm_dim=1) # self.flow_norm为归一化的基\n\n def __len__(self): # 表示数据集的长度\n \"\"\"\n :return: length of dataset (number of samples).\n \"\"\"\n if self.train_mode == \"train\":\n return self.train_days * self.one_day_length - self.history_length # 训练的样本数 = 训练集总长度 - 历史数据长度\n elif self.train_mode == \"test\":\n return self.test_days * self.one_day_length # 每个样本都能测试,测试样本数 = 测试总长度\n else:\n raise ValueError(\"train mode: [{}] is not defined\".format(self.train_mode))\n\n def __getitem__(self, index): # 功能是如何取每一个样本 (x, y), index = [0, L1 - 1]这个是根据数据集的长度确定的\n \"\"\"\n :param index: int, range between [0, length - 1].\n :return:\n graph: torch.tensor, [N, N].\n data_x: torch.tensor, [N, H, D].\n data_y: torch.tensor, [N, 1, D].\n \"\"\"\n if self.train_mode == \"train\":\n index = index # 训练集的数据是从时间0开始的,这个是每一个流量数据,要和样本(x,y)区别\n elif self.train_mode == \"test\":\n index += self.train_days * self.one_day_length # 有一个偏移量\n else:\n raise ValueError(\"train mode: [{}] is not defined\".format(self.train_mode))\n\n data_x, data_y = LoadData.slice_data(self.flow_data, self.history_length, index, self.train_mode) # 这个就是样本(x,y)\n\n data_x = LoadData.to_tensor(data_x) # [N, H, D] # 转换成张量\n data_y = LoadData.to_tensor(data_y).unsqueeze(1) # [N, 1, D] # 转换成张量,在时间维度上扩维\n\n return {\"graph\": LoadData.to_tensor(self.graph), \"flow_x\": data_x, \"flow_y\": data_y} # 组成词典返回\n\n @staticmethod\n def slice_data(data, history_length, index, train_mode): # 根据历史长度,下标来划分数据样本\n \"\"\"\n :param data: np.array, normalized traffic data.\n :param history_length: int, length of history data to be used.\n :param index: int, index on temporal axis.\n :param train_mode: str, [\"train\", \"test\"].\n :return:\n data_x: np.array, [N, H, D].\n data_y: np.array [N, D].\n \"\"\"\n if train_mode == \"train\":\n start_index = index # 开始下标就是时间下标本身,这个是闭区间\n end_index = index + history_length # 结束下标,这个是开区间\n elif train_mode == \"test\":\n start_index = index - history_length # 开始下标,这个最后面贴图了,可以帮助理解\n end_index = index # 结束下标\n else:\n raise ValueError(\"train model {} is not defined\".format(train_mode))\n\n data_x = data[:, start_index: end_index] # 在切第二维,不包括end_index\n data_y = data[:, end_index] # 把上面的end_index取上\n\n return data_x, data_y\n\n @staticmethod\n def pre_process_data(data, norm_dim): # 预处理,归一化\n \"\"\"\n :param data: np.array,原始的交通流量数据\n :param norm_dim: int,归一化的维度,就是说在哪个维度上归一化,这里是在dim=1时间维度上\n :return:\n norm_base: list, [max_data, min_data], 这个是归一化的基.\n norm_data: np.array, normalized traffic data.\n \"\"\"\n norm_base = LoadData.normalize_base(data, norm_dim) # 计算 normalize base\n norm_data = LoadData.normalize_data(norm_base[0], norm_base[1], data) # 归一化后的流量数据\n\n return norm_base, norm_data # 返回基是为了恢复数据做准备的\n\n @staticmethod\n def normalize_base(data, norm_dim): # 计算归一化的基\n \"\"\"\n :param data: np.array, 原始的交通流量数据\n :param norm_dim: int, normalization dimension.归一化的维度,就是说在哪个维度上归一化,这里是在dim=1时间维度上\n :return:\n max_data: np.array\n min_data: np.array\n \"\"\"\n max_data = np.max(data, norm_dim, keepdims=True) # [N, T, D] , norm_dim=1, [N, 1, D], keepdims=True就保持了纬度一致\n min_data = np.min(data, norm_dim, keepdims=True)\n\n return max_data, min_data # 返回最大值和最小值\n\n @staticmethod\n def normalize_data(max_data, min_data, data): # 计算归一化的流量数据,用的是最大值最小值归一化法\n \"\"\"\n :param max_data: np.array, max data.\n :param min_data: np.array, min data.\n :param data: np.array, original traffic data without normalization.\n :return:\n np.array, normalized traffic data.\n \"\"\"\n mid = min_data\n base = max_data - min_data\n normalized_data = (data - mid) / base\n\n return normalized_data\n\n @staticmethod\n def recover_data(max_data, min_data, data): # 恢复数据时使用的,为可视化比较做准备的\n \"\"\"\n :param max_data: np.array, max data.\n :param min_data: np.array, min data.\n :param data: np.array, normalized data.\n :return:\n recovered_data: np.array, recovered data.\n \"\"\"\n mid = min_data\n base = max_data - min_data\n\n recovered_data = data * base + mid\n\n return recovered_data # 这个就是原始的数据\n\n @staticmethod\n def to_tensor(data):\n return torch.tensor(data, dtype=torch.float)"
},
{
"identifier": "Evaluation",
"path": "utils.py",
"snippet": "class Evaluation(object):\n def __init__(self):\n pass\n\n @staticmethod\n def mae_(target, output):\n return np.mean(np.abs(target - output))\n\n @staticmethod\n def mape_(target, output):\n return np.mean(np.abs(target - output) / (target + 5)) # 加5是因为target有可能为0,当然只要不太大,加几都行\n\n @staticmethod\n def rmse_(target, output):\n return np.sqrt(np.mean(np.power(target - output, 2)))\n\n @staticmethod\n def total(target, output):\n mae = Evaluation.mae_(target, output)\n mape = Evaluation.mape_(target, output)\n rmse = Evaluation.rmse_(target, output)\n\n return mae, mape, rmse"
},
{
"identifier": "visualize_result",
"path": "utils.py",
"snippet": "def visualize_result(h5_file, nodes_id, time_se, visualize_file):\n file_obj = h5py.File(h5_file, \"r\") # 获得文件对象,这个文件对象有两个keys:\"predict\"和\"target\"\n prediction = file_obj[\"predict\"][:][:, :, 0] # [N, T],切片,最后一维取第0列,所以变成二维了,要是[:, :, :1]那么维度不会缩减\n target = file_obj[\"target\"][:][:, :, 0] # [N, T],同上\n file_obj.close()\n\n plot_prediction = prediction[nodes_id][time_se[0]: time_se[1]] # [T1],将指定节点的,指定时间的数据拿出来\n plot_target = target[nodes_id][time_se[0]: time_se[1]] # [T1],同上\n\n plt.figure()\n plt.grid(True, linestyle=\"-.\", linewidth=0.5)\n plt.plot(np.array([t for t in range(time_se[1] - time_se[0])]), plot_prediction, ls=\"-\", marker=\" \", color=\"r\")\n plt.plot(np.array([t for t in range(time_se[1] - time_se[0])]), plot_target, ls=\"-\", marker=\" \", color=\"b\")\n\n plt.legend([\"prediction\", \"target\"], loc=\"upper right\")\n\n plt.axis([0, time_se[1] - time_se[0],\n np.min(np.array([np.min(plot_prediction), np.min(plot_target)])),\n np.max(np.array([np.max(plot_prediction), np.max(plot_target)]))])\n\n plt.savefig(visualize_file + \".png\")"
},
{
"identifier": "GCN",
"path": "gcnnet.py",
"snippet": "class GCN(nn.Module): # GCN模型,向空域的第一个图卷积\n def __init__(self, in_c, hid_c, out_c):\n super(GCN, self).__init__() # 表示继承父类的所有属性和方法\n self.linear_1 = nn.Linear(in_c, hid_c) # 定义一个线性层\n self.linear_2 = nn.Linear(hid_c, out_c) # 定义一个线性层\n self.act = nn.ReLU() # 定义激活函数\n\n def forward(self, data, device):\n graph_data = data[\"graph\"].to(device)[0] # [N, N] 邻接矩阵,并且将数据送入设备\n graph_data = GCN.process_graph(graph_data) # 变换邻接矩阵 \\hat A = D_{-1/2}*A*D_{-1/2}\n\n flow_x = data[\"flow_x\"].to(device) # [B, N, H, D] 流量数据\n\n B, N = flow_x.size(0), flow_x.size(1) # batch_size、节点数\n\n flow_x = flow_x.view(B, N, -1) # [B, N, H*D] H = 6, D = 1把最后两维缩减到一起了,这个就是把历史时间的特征放一起\n\n # 第一个图卷积层\n output_1 = self.linear_1(flow_x) # [B, N, hid_C],这个就是 WX,其中W是可学习的参数,X是输入的流量数据(就是flow_x)\n output_1 = self.act(torch.matmul(graph_data, output_1)) # [B, N, N] ,[B, N, hid_c],就是 \\hat AWX\n\n # 第二个图卷积层\n output_2 = self.linear_2(output_1) # WX\n output_2 = self.act(torch.matmul(graph_data, output_2)) # [B, N, 1, Out_C] , 就是 \\hat AWX\n\n return output_2.unsqueeze(2) # 第2维的维度扩张\n\n @staticmethod\n def process_graph(graph_data): # 这个就是在原始的邻接矩阵之上,再次变换,也就是\\hat A = D_{-1/2}*A*D_{-1/2}\n N = graph_data.size(0) # 获得节点的个数\n matrix_i = torch.eye(N, dtype=torch.float, device=graph_data.device) # 定义[N, N]的单位矩阵\n graph_data += matrix_i # [N, N] ,就是 A+I\n\n degree_matrix = torch.sum(graph_data, dim=1, keepdim=False) # [N],计算度矩阵,塌陷成向量,其实就是将上面的A+I每行相加\n degree_matrix = degree_matrix.pow(-1) # 计算度矩阵的逆,若为0,-1次方可能计算结果为无穷大的数\n degree_matrix[degree_matrix == float(\"inf\")] = 0. # 让无穷大的数为0\n\n degree_matrix = torch.diag(degree_matrix) # 转换成对角矩阵\n\n return torch.mm(degree_matrix, graph_data) # 返回 \\hat A=D^(-1) * A ,这个等价于\\hat A = D_{-1/2}*A*D_{-1/2}"
},
{
"identifier": "ChebNet",
"path": "chebnet.py",
"snippet": "class ChebNet(nn.Module): # 定义图网络的类\n def __init__(self, in_c, hid_c, out_c, K):\n \"\"\"\n :param in_c: int, number of input channels.\n :param hid_c: int, number of hidden channels.class\n :param out_c: int, number of output channels.\n :param K:\n \"\"\"\n super(ChebNet, self).__init__()\n self.conv1 = ChebConv(in_c=in_c, out_c=hid_c, K=K) # 第一个图卷积层\n self.conv2 = ChebConv(in_c=hid_c, out_c=out_c, K=K) # 第二个图卷积层\n self.act = nn.ReLU() # 激活函数\n\n def forward(self, data, device):\n graph_data = data[\"graph\"].to(device)[0] # [N, N]\n flow_x = data[\"flow_x\"].to(device) # [B, N, H, D] # B是batch size,N是节点数,H是历史数据长度,D是特征维度\n\n B, N = flow_x.size(0), flow_x.size(1)\n\n flow_x = flow_x.view(B, N, -1) # [B, N, H*D] H = 6, D = 1把最后两维缩减到一起了,这个就是把历史时间的特征放一起\n\n output_1 = self.act(self.conv1(flow_x, graph_data))\n output_2 = self.act(self.conv2(output_1, graph_data))\n\n return output_2.unsqueeze(2) # 在第2维度,也就是时间维度上做扩张"
},
{
"identifier": "GATNet",
"path": "gat.py",
"snippet": "class GATNet(nn.Module):\n def __init__(self, in_c, hid_c, out_c, n_heads):\n super(GATNet, self).__init__()\n self.subnet = GATSubNet(in_c, hid_c, out_c, n_heads)\n\n def forward(self, data, device):\n graph = data[\"graph\"][0].to(device) # [N, N]\n flow = data[\"flow_x\"] # [B, N, T, C]\n flow = flow.to(device) # 将流量数据送入设备\n\n B, N = flow.size(0), flow.size(1)\n flow = flow.view(B, N, -1) # [B, N, T * C]\n \"\"\"\n 上面是将这一段的时间的特征数据摊平做为特征,这种做法实际上忽略了时序上的连续性\n 这种做法可行,但是比较粗糙,当然也可以这么做:\n flow[:, :, 0] ... flow[:, :, T-1] 则就有T个[B, N, C]这样的张量,也就是 [B, N, C]*T\n 每一个张量都用一个SubNet来表示,则一共有T个SubNet,初始化定义 self.subnet = [GATSubNet(...) for _ in range(T)]\n 然后用nn.ModuleList将SubNet分别拎出来处理,参考多头注意力的处理,同理\n\n \"\"\"\n\n prediction = self.subnet(flow, graph).unsqueeze(2) # [B, N, 1, C],这个1加上就表示预测的是未来一个时刻\n\n return prediction"
}
] | import os
import time
import h5py
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import warnings
from torch.utils.data import DataLoader
from traffic_dataset import LoadData
from utils import Evaluation # 三种评价指标以及可视化类
from utils import visualize_result
from gcnnet import GCN
from chebnet import ChebNet
from gat import GATNet
from rich import print
from tqdm import tqdm | 7,301 | # 第三步:定义损失函数和优化器
criterion = nn.MSELoss() # 均方损失函数
# 没写学习率,表示使用的是默认的,也就是lr=1e-3
optimizer = optim.Adam(params=my_net.parameters())
# 第四步:训练+测试
# Train model
Epoch = 20 # 训练的次数
my_net.train() # 打开训练模式
for epoch in tqdm(range(Epoch), colour="green", desc="Train"):
epoch_loss = 0.0
count = 0
start_time = time.time()
# ["graph": [B, N, N] , "flow_x": [B, N, H, D], "flow_y": [B, N, 1, D]],一次把一个batch的训练数据取出来
for data in train_loader:
my_net.zero_grad() # 梯度清零
count += 1
# [B, N, 1, D],由于标签flow_y在cpu中,所以最后的预测值要放回到cpu中
predict_value = my_net(data, device).to(torch.device("cpu"))
# 计算损失,切记这个loss不是标量
loss = criterion(predict_value, data["flow_y"])
epoch_loss += loss.item() # 这里是把一个epoch的损失都加起来,最后再除训练数据长度,用平均loss来表示
loss.backward() # 反向传播
optimizer.step() # 更新参数
end_time = time.time()
print("Epoch: {:04d}, Loss: {:02.4f}, Time: {:02.2f} mins".format(epoch, 1000 * epoch_loss / len(train_data),
(end_time - start_time) / 60))
# Test Model
# 对于测试:
# 第一、除了计算loss之外,还需要可视化一下预测的结果(定性分析)
# 第二、对于预测的结果这里我使用了 MAE, MAPE, and RMSE 这三种评价标准来评估(定量分析)
my_net.eval() # 打开测试模式
with torch.no_grad(): # 关闭梯度
MAE, MAPE, RMSE = [], [], [] # 定义三种指标的列表
Target = np.zeros([307, 1, 1]) # [N, T, D],T=1 # 目标数据的维度,用0填充
Predict = np.zeros_like(Target) # [N, T, D],T=1 # 预测数据的维度
total_loss = 0.0
for data in test_loader: # 一次把一个batch的测试数据取出来
# 下面得到的预测结果实际上是归一化的结果,有一个问题是我们这里使用的三种评价标准以及可视化结果要用的是逆归一化的数据
# [B, N, 1, D],B是batch_size, N是节点数量,1是时间T=1, D是节点的流量特征
predict_value = my_net(data, device).to(torch.device("cpu"))
loss = criterion(predict_value, data["flow_y"]) # 使用MSE计算loss
total_loss += loss.item() # 所有的batch的loss累加
# 下面实际上是把预测值和目标值的batch放到第二维的时间维度,这是因为在测试数据的时候对样本没有shuffle,
# 所以每一个batch取出来的数据就是按时间顺序来的,因此放到第二维来表示时间是合理的.
predict_value = predict_value.transpose(0, 2).squeeze(
0) # [1, N, B(T), D] -> [N, B(T), D] -> [N, T, D]
target_value = data["flow_y"].transpose(0, 2).squeeze(
0) # [1, N, B(T), D] -> [N, B(T), D] -> [N, T, D]
performance, data_to_save = compute_performance(
predict_value, target_value, test_loader) # 计算模型的性能,返回评价结果和恢复好的数据
# 下面这个是每一个batch取出的数据,按batch这个维度进行串联,最后就得到了整个时间的数据,也就是
# [N, T, D] = [N, T1+T2+..., D]
Predict = np.concatenate([Predict, data_to_save[0]], axis=1)
Target = np.concatenate([Target, data_to_save[1]], axis=1)
MAE.append(performance[0])
MAPE.append(performance[1])
RMSE.append(performance[2])
print("Test Loss: {:02.4f}".format(1000 * total_loss / len(test_data)))
# 三种指标取平均
print("Performance: MAE {:2.2f} {:2.2f}% {:2.2f}".format(np.mean(MAE), np.mean(MAPE * 100), np.mean(RMSE)))
# 将第0行的0删除,因为开始定义的时候用0填充,但是时间是从1开始的
Predict = np.delete(Predict, 0, axis=1)
Target = np.delete(Target, 0, axis=1)
result_file = "GAT_result.h5"
file_obj = h5py.File(result_file, "w") # 将预测值和目标值保存到文件中,因为要多次可视化看看结果
file_obj["predict"] = Predict # [N, T, D]
file_obj["target"] = Target # [N, T, D]
def compute_performance(prediction, target, data): # 计算模型性能
# 下面的try和except实际上在做这样一件事:当训练+测试模型的时候,数据肯定是经过dataloader的,所以直接赋值就可以了
# 但是如果将训练好的模型保存下来,然后测试,那么数据就没有经过dataloader,是dataloader型的,需要转换成dataset型。
try:
dataset = data.dataset # 数据为dataloader型,通过它下面的属性.dataset类变成dataset型数据
except:
dataset = data # 数据为dataset型,直接赋值
# 下面就是对预测和目标数据进行逆归一化,recover_data()函数在上一小节的数据处理中
# flow_norm为归一化的基,flow_norm[0]为最大值,flow_norm[1]为最小值
# prediction.numpy()和target.numpy()是需要逆归一化的数据,转换成numpy型是因为 recover_data()函数中的数据都是numpy型,保持一致
prediction = LoadData.recover_data(
dataset.flow_norm[0], dataset.flow_norm[1], prediction.numpy())
target = LoadData.recover_data(
dataset.flow_norm[0], dataset.flow_norm[1], target.numpy())
# 对三种评价指标写了一个类,这个类封装在另一个文件中,在后面
mae, mape, rmse = Evaluation.total(
target.reshape(-1), prediction.reshape(-1)) # 变成常向量才能计算这三种指标
performance = [mae, mape, rmse]
recovered_data = [prediction, target]
return performance, recovered_data # 返回评价结果,以及恢复好的数据(为可视化准备的)
if __name__ == '__main__':
main()
# 可视化,在下面的 Evaluation()类中,这里是对应的GAT算法运行的结果,进行可视化
# 如果要对GCN或者chebnet进行可视化,只需要在第45行,注释修改下对应的算法即可
| # @Time : 2020/8/25
# @Author : LeronQ
# @github : https://github.com/LeronQ
# Pytorch-基于GCN/GAT/Chebnet图神经网络实现的交通流预测(附代码): https://blog.csdn.net/yilulvxing/article/details/110306999
# traffic_prediction.py
# 这个就是上一小节处理数据自己写的的类,封装在traffic_dataset.py文件中
warnings.filterwarnings('ignore')
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # 配置GPU,因为可能有多个GPU,这里用了第0号GPU
# 第一步:准备数据(上一节已经准备好了,这里只是调用而已,链接在最开头)
train_data = LoadData(data_path=["PeMS_04/PeMS04.csv", "PeMS_04/PeMS04.npz"], num_nodes=307, divide_days=[45, 14],
time_interval=5, history_length=6,
train_mode="train")
# num_workers是加载数据(batch)的线程数目
train_loader = DataLoader(
train_data, batch_size=32, shuffle=True, num_workers=4)
test_data = LoadData(data_path=["PeMS_04/PeMS04.csv", "PeMS_04/PeMS04.npz"], num_nodes=307, divide_days=[45, 14],
time_interval=5, history_length=6,
train_mode="test")
test_loader = DataLoader(test_data, batch_size=32,
shuffle=False, num_workers=4)
print("🚀🚀🚀 [italic bold green]数据加载完成!!!")
# SECTION: 第二步:定义模型(这里其实只是加载模型,关于模型的定义在下面单独写了,先假设已经写好)
my_net = GCN(in_c=6, hid_c=6, out_c=1) # 加载GCN模型
# my_net = ChebNet(in_c=6, hid_c=6, out_c=1, K=2) # 加载ChebNet模型
# my_net = GATNet(in_c=6 * 1, hid_c=6, out_c=1, n_heads=2) # 加载GAT模型
print(my_net)
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # 定义设备
my_net = my_net.to(device) # 模型送入设备
# 第三步:定义损失函数和优化器
criterion = nn.MSELoss() # 均方损失函数
# 没写学习率,表示使用的是默认的,也就是lr=1e-3
optimizer = optim.Adam(params=my_net.parameters())
# 第四步:训练+测试
# Train model
Epoch = 20 # 训练的次数
my_net.train() # 打开训练模式
for epoch in tqdm(range(Epoch), colour="green", desc="Train"):
epoch_loss = 0.0
count = 0
start_time = time.time()
# ["graph": [B, N, N] , "flow_x": [B, N, H, D], "flow_y": [B, N, 1, D]],一次把一个batch的训练数据取出来
for data in train_loader:
my_net.zero_grad() # 梯度清零
count += 1
# [B, N, 1, D],由于标签flow_y在cpu中,所以最后的预测值要放回到cpu中
predict_value = my_net(data, device).to(torch.device("cpu"))
# 计算损失,切记这个loss不是标量
loss = criterion(predict_value, data["flow_y"])
epoch_loss += loss.item() # 这里是把一个epoch的损失都加起来,最后再除训练数据长度,用平均loss来表示
loss.backward() # 反向传播
optimizer.step() # 更新参数
end_time = time.time()
print("Epoch: {:04d}, Loss: {:02.4f}, Time: {:02.2f} mins".format(epoch, 1000 * epoch_loss / len(train_data),
(end_time - start_time) / 60))
# Test Model
# 对于测试:
# 第一、除了计算loss之外,还需要可视化一下预测的结果(定性分析)
# 第二、对于预测的结果这里我使用了 MAE, MAPE, and RMSE 这三种评价标准来评估(定量分析)
my_net.eval() # 打开测试模式
with torch.no_grad(): # 关闭梯度
MAE, MAPE, RMSE = [], [], [] # 定义三种指标的列表
Target = np.zeros([307, 1, 1]) # [N, T, D],T=1 # 目标数据的维度,用0填充
Predict = np.zeros_like(Target) # [N, T, D],T=1 # 预测数据的维度
total_loss = 0.0
for data in test_loader: # 一次把一个batch的测试数据取出来
# 下面得到的预测结果实际上是归一化的结果,有一个问题是我们这里使用的三种评价标准以及可视化结果要用的是逆归一化的数据
# [B, N, 1, D],B是batch_size, N是节点数量,1是时间T=1, D是节点的流量特征
predict_value = my_net(data, device).to(torch.device("cpu"))
loss = criterion(predict_value, data["flow_y"]) # 使用MSE计算loss
total_loss += loss.item() # 所有的batch的loss累加
# 下面实际上是把预测值和目标值的batch放到第二维的时间维度,这是因为在测试数据的时候对样本没有shuffle,
# 所以每一个batch取出来的数据就是按时间顺序来的,因此放到第二维来表示时间是合理的.
predict_value = predict_value.transpose(0, 2).squeeze(
0) # [1, N, B(T), D] -> [N, B(T), D] -> [N, T, D]
target_value = data["flow_y"].transpose(0, 2).squeeze(
0) # [1, N, B(T), D] -> [N, B(T), D] -> [N, T, D]
performance, data_to_save = compute_performance(
predict_value, target_value, test_loader) # 计算模型的性能,返回评价结果和恢复好的数据
# 下面这个是每一个batch取出的数据,按batch这个维度进行串联,最后就得到了整个时间的数据,也就是
# [N, T, D] = [N, T1+T2+..., D]
Predict = np.concatenate([Predict, data_to_save[0]], axis=1)
Target = np.concatenate([Target, data_to_save[1]], axis=1)
MAE.append(performance[0])
MAPE.append(performance[1])
RMSE.append(performance[2])
print("Test Loss: {:02.4f}".format(1000 * total_loss / len(test_data)))
# 三种指标取平均
print("Performance: MAE {:2.2f} {:2.2f}% {:2.2f}".format(np.mean(MAE), np.mean(MAPE * 100), np.mean(RMSE)))
# 将第0行的0删除,因为开始定义的时候用0填充,但是时间是从1开始的
Predict = np.delete(Predict, 0, axis=1)
Target = np.delete(Target, 0, axis=1)
result_file = "GAT_result.h5"
file_obj = h5py.File(result_file, "w") # 将预测值和目标值保存到文件中,因为要多次可视化看看结果
file_obj["predict"] = Predict # [N, T, D]
file_obj["target"] = Target # [N, T, D]
def compute_performance(prediction, target, data): # 计算模型性能
# 下面的try和except实际上在做这样一件事:当训练+测试模型的时候,数据肯定是经过dataloader的,所以直接赋值就可以了
# 但是如果将训练好的模型保存下来,然后测试,那么数据就没有经过dataloader,是dataloader型的,需要转换成dataset型。
try:
dataset = data.dataset # 数据为dataloader型,通过它下面的属性.dataset类变成dataset型数据
except:
dataset = data # 数据为dataset型,直接赋值
# 下面就是对预测和目标数据进行逆归一化,recover_data()函数在上一小节的数据处理中
# flow_norm为归一化的基,flow_norm[0]为最大值,flow_norm[1]为最小值
# prediction.numpy()和target.numpy()是需要逆归一化的数据,转换成numpy型是因为 recover_data()函数中的数据都是numpy型,保持一致
prediction = LoadData.recover_data(
dataset.flow_norm[0], dataset.flow_norm[1], prediction.numpy())
target = LoadData.recover_data(
dataset.flow_norm[0], dataset.flow_norm[1], target.numpy())
# 对三种评价指标写了一个类,这个类封装在另一个文件中,在后面
mae, mape, rmse = Evaluation.total(
target.reshape(-1), prediction.reshape(-1)) # 变成常向量才能计算这三种指标
performance = [mae, mape, rmse]
recovered_data = [prediction, target]
return performance, recovered_data # 返回评价结果,以及恢复好的数据(为可视化准备的)
if __name__ == '__main__':
main()
# 可视化,在下面的 Evaluation()类中,这里是对应的GAT算法运行的结果,进行可视化
# 如果要对GCN或者chebnet进行可视化,只需要在第45行,注释修改下对应的算法即可 | visualize_result(h5_file="GAT_result.h5", | 2 | 2023-12-05 07:25:35+00:00 | 12k |
nickruggeri/hypergraph-message-passing | test/model/conftest.py | [
{
"identifier": "hye_list_to_binary_incidence",
"path": "src/data/conversion.py",
"snippet": "def hye_list_to_binary_incidence(\n hye_list: list[tuple[int]], shape: tuple[int] | None = None\n) -> sparse.coo_array:\n \"\"\"Convert a list of hyperedges into a scipy sparse COO array.\n The hyperedges need to be list of integers, representing nodes, starting from 0.\n If no shape is provided, this is inferred from the hyperedge list as (N, E).\n N is the number of nodes, given by the maximum integer observed in the hyperedge\n list plus one (since the node index starts from 0).\n E is the number of hyperedges in the list.\n If not None, the shape can only specify a tuple (N', E') where N' is greater or\n equal than the N inferred from the hyperedge list, and E' is greater or equal than\n the number of hyperedges in the list.\n\n Parameters\n ----------\n hye_list: the list of hyperedges.\n Every hyperedge is represented as a tuple of integer nodes.\n shape: the shape of the adjacency matrix, passed to the array constructor.\n If None, it is inferred.\n\n Returns\n -------\n The binary adjacency matrix representing the hyperedges.\n \"\"\"\n rows = []\n columns = []\n for j, hye in enumerate(hye_list):\n # If there are repeated nodes in the hyperedge, count them once\n set_hye = set(hye)\n rows.extend(list(set_hye))\n columns.extend([j] * len(set_hye))\n\n inferred_N = max(rows) + 1\n inferred_E = len(hye_list)\n if shape is not None:\n if shape[0] < inferred_N or shape[1] < inferred_E:\n raise ValueError(\n \"Provided shape incompatible with configurations hyperedge list.\"\n )\n else:\n shape = (inferred_N, inferred_E)\n\n data = np.ones_like(rows)\n\n return sparse.coo_array((data, (rows, columns)), shape=shape, dtype=np.uint8)"
},
{
"identifier": "BinaryHypergraph",
"path": "src/data/representation/binary_hypergraph.py",
"snippet": "class BinaryHypergraph(ABC):\n \"\"\"Abstract class for the representation of hypergraphs with binary hyperedges.\"\"\"\n\n N: int # Number of nodes.\n E: int # Number of hyperedges.\n max_hye_size: int # Maximum size of the hyperedges in the hypergraph.\n hye_count: dict[\n int, int\n ] # Hyperedges divided by hyperedge size, as (key, value) pairs: (size, count).\n\n @abstractmethod\n def get_repr(self) -> Any:\n \"\"\"Return the internal representation of the hypergraph.\"\"\"\n\n @abstractmethod\n def get_binary_incidence_matrix(self) -> Any:\n \"\"\"Return the incidence matrix B with only zeros and ones.\"\"\"\n\n @abstractmethod\n def __iter__(self) -> Iterable[Any]:\n \"\"\"Create an iterable that yields the hyperedges.\"\"\"\n\n def sub_hyg(self, *args: Any) -> BinaryHypergraph:\n \"\"\"Return a sub-hypergraph representation.\"\"\"\n raise NotImplementedError(f\"Not implemented for instance of {self.__class__}\")\n\n def save_to_txt(self, file_path: str | Path) -> None:\n file_path = Path(file_path)\n\n with open(file_path, \"w\") as hye_file:\n for hye, _ in self:\n hye_file.write(\" \".join(map(str, hye)) + \"\\n\")\n\n def load_from_txt(self, *args, **kwargs) -> Any:\n \"\"\"Load the hypergraph from external sources.\"\"\"\n raise NotImplementedError(f\"Not implemented for instance of {self.__class__}\")\n\n def max_hye_size_select(self, max_size: int) -> BinaryHypergraph:\n \"\"\"Return a sub-hypergraph where hyperedges with size exceeding the one\n specified are discarded.\n\n Parameters\n ----------\n max_size: maximum hyperedge size allowed.\n\n Returns\n -------\n The sub-hypergraph where the hyperedges bigger than max_size are discarded.\n \"\"\"\n incidence = self.get_binary_incidence_matrix()\n sizes = incidence.sum(axis=0)\n hye_idx = np.arange(self.E)[sizes <= max_size]\n return self.sub_hyg(hye_idx)"
},
{
"identifier": "IncidenceHypergraph",
"path": "src/data/representation/incidence_hypergraph.py",
"snippet": "class IncidenceHypergraph(BinaryHypergraph):\n \"\"\"Representation of a binary hypergraph via its incidence matrix.\n The incidence matrix B is of size N x E, with N number of nodes in the hypergraph\n and E number of hyperedges. For each hyperedge e, the column of B with index e\n contains ones for the nodes belonging to the hyperedge e, zeros for all other nodes.\n \"\"\"\n\n def __init__(\n self,\n B: np.ndarray | sparse.spmatrix,\n sort_indices: bool = True,\n ):\n \"\"\"\n Parameters\n ----------\n B: incidence matrix, of shape (N, E).\n sort_indices: sort the indices in the internal sparse matrix representation.\n \"\"\"\n self.B = self._check_and_convert_incidence(B, sort_indices)\n self.N, self.E = self.B.shape\n\n hye_lengths = self.B.sum(axis=0)\n hye_counter = dict(Counter(hye_lengths))\n self.hye_count = hye_counter\n self.max_hye_size = max(hye_counter.keys())\n\n def get_repr(self) -> TYPE_INCIDENCE:\n return self.B\n\n def get_binary_incidence_matrix(self) -> TYPE_INCIDENCE:\n return self.B\n\n def sub_hyg(\n self,\n hyperedge_idx: np.ndarray | None = None,\n ) -> IncidenceHypergraph:\n \"\"\"Produce a sub-hypergraph where only the specified hyperedges are present.\n\n Parameters\n ----------\n hyperedge_idx: the list of the hyperedges to keep, specified by their indices.\n\n Returns\n -------\n The sub-hypergraph instance.\n \"\"\"\n if hyperedge_idx is None:\n return self\n\n B = self.B[:, hyperedge_idx]\n\n return IncidenceHypergraph(B)\n\n def __iter__(self) -> Iterable[np.ndarray]:\n return incidence_matrix_to_hye(self.B)\n\n def __str__(self):\n return f\"{self.__class__.__name__} with N={self.N}, E={self.E}\"\n\n @classmethod\n def load_from_txt(\n cls,\n hye_file: str | Path,\n N: int | None = None,\n ) -> IncidenceHypergraph:\n \"\"\"Load a IncidenceHypergraph instance from a txt file, containing the list of\n hyperedges.\n\n Parameters\n ----------\n hye_file: text file containing the hyperedges.\n N: number of nodes in the hypergraph.\n\n Returns\n -------\n An instance of IncidenceHypergraph.\n \"\"\"\n with open(hye_file, \"r\") as file:\n hye = (map(int, line.split(\" \")) for line in file.readlines())\n\n return cls.load_from_hye_list(hye, N)\n\n @classmethod\n def load_from_hye_list(\n cls, hye_list: list[Iterable[int]], N: int | None\n ) -> IncidenceHypergraph:\n hye = list(set(tuple(sorted(set(hyperedge))) for hyperedge in hye_list))\n shape = (N, len(hye)) if N else None\n B = hye_list_to_binary_incidence(hye, shape=shape)\n\n return IncidenceHypergraph(B)\n\n @staticmethod\n def _check_and_convert_incidence(\n incidence: np.ndarray | sparse.spmatrix, sort_indices: bool\n ) -> TYPE_INCIDENCE:\n incidence = TYPE_INCIDENCE(incidence)\n # When converting to other sparse types, repeated entries are summed. In such\n # case, there could be entries different from 1. Set them to 1.\n # Similarly, if a weighted matrix is provided as configurations, flatten all non-zero\n # entries to 1.\n if not np.all(incidence.data == 1):\n warnings.warn(\n \"The configurations matrix contains elements different from 0 and 1. \"\n \"All non-zero elements will be converted to 1.\"\n )\n incidence = incidence > 0\n\n if not np.all(incidence.data == 1):\n raise ValueError(\"The incidence matrix can only contain 1 and 0 values.\")\n\n if sort_indices:\n incidence.sort_indices()\n\n return incidence"
},
{
"identifier": "HypergraphBlockModel",
"path": "src/model/hypergraph_block_model.py",
"snippet": "class HypergraphBlockModel:\n \"\"\"Hypergraph version of the Stochastic Block Model, introduced in\n\n \"Message Passing on Hypergraphs: Detectability, Phase Transitions, and Higher-Order\n Information\", Ruggeri et al.\n\n\n This probabilistic model for hypergraphs partitions the nodes into K hard\n communities, specified by an array of assignments t. The communities interact\n through a symmetric affinity matrix p, with shape (K, K). Together, the community\n assignments t and the affinity matrix p define the Bernoulli probability of the\n single hyperedges to be observed or not.\n \"\"\"\n\n def __init__(\n self,\n n: np.ndarray | None,\n p: np.ndarray | None,\n N: int,\n K: int,\n max_hye_size: int | None,\n ) -> None:\n r\"\"\"Stochastic Block Model for Hypergraphs.\n This version of SBM considers, for every node i, hard community assignments\n :math::`t_i`, i.e. categorical assignments to one out of K communities.\n Together with a (K, K) affinity matrix, these two parameters define the\n likelihood for unweighted hypergraphs (i.e. hyperedges have weights in {0, 1}).\n A prior :math::`n=(n_1, \\ldots, n_K)` for the community assignments can also be\n specified.\n\n Parameters\n ----------\n n: array of prior parameters for the communities.\n If specified, this array is used as initialization for EM inference,\n otherwise it is initialized at random.\n The array has length K equal to the number of communities, and specifies the\n categorical prior probabilities.\n p: symmetric matrix of community interaction probabilities.\n If specified, this matrix is used as initialization for EM inference,\n otherwise it is initialized at random.\n The matrix has shape (K, K), where K is the number of communities, and\n contains the inter and intra-community interaction probabilities,\n constrained to the [0, 1] interval.\n N: number of nodes.\n K: number of communities.\n max_hye_size: maximum size of the hyperedges D.\n Notice that this quantity is used to infer probabilistic quantities in the\n model, but is not checked against input hypergraphs.\n \"\"\"\n\n # Model related attributes\n self._check_params(n, p, K, N, max_hye_size)\n self.n = n.copy() if n is not None else None\n self.p = p.copy() if p is not None else None\n self.N = N\n self.K = K\n self.max_hye_size: int = max_hye_size if max_hye_size is not None else N\n\n # Quantities inferred after message passing.\n # log of the messages from hyperedges to nodes. Stored as lists of sparse\n # matrices. For every hyperedge e and node i, the matrix at position a in the\n # list contains the messages from e to i, for community assignment a.\n self.log_hye_to_node: list[TYPE_HYE_TO_NODE] | None = None\n # log of the messages from nodes to hyperedges.\n # They are encoded similarly to the messages above.\n self.log_node_to_hye: list[TYPE_NODE_TO_HYE] | None = None\n # Other quantities, log-marginals and external field\n self.log_marginals: np.ndarray | None = None\n self.external_field: np.ndarray | None = None\n\n # Training diagnostics.\n self.training_iter: int | None = None\n self.n_diff: list[float] = []\n self.c_diff: list[float] = []\n self.log_marginal_diff: list[list[float]] = []\n\n # Random number generator.\n self.rng: np.random.Generator = np.random.default_rng()\n\n @property\n def c(self):\n \"\"\"Return the rescaled affinity matrix c, defined as\n .. math::\n c = N p\n where N is the number of nodes and p the affinity matrix.\n \"\"\"\n return self.p * self.N\n\n def em_inference(\n self,\n hypergraph: IncidenceHypergraph,\n em_iter: int = 20,\n em_thresh: float = 1e-5,\n mp_iter: int = 2000,\n mp_thresh: float = 1e-5,\n mp_patience: int = 50,\n seed: int | None = None,\n dirichlet_alpha: float | None = None,\n dropout: float = 0.99,\n ) -> None:\n \"\"\"Perform Expectation Maximization (EM) inference on a hypergraph.\n The inference routine consist of alternating message passing, where the\n community assignments :math::`t_i` are inferred, and updates to the global\n parameters, i.e. the affinity matrix w and community priors n.\n If the affinity w or priors n are provided at initialization of the model, these\n are not inferred, but kept fixed.\n\n Parameters\n ----------\n hypergraph: hypergraph to perform inference on.\n em_iter: maximum number of EM iterations.\n One iteration consists of the message passing routine plus the global\n parameter updates.\n em_thresh: threshold for EM convergence.\n The threshold is computed over the absolute difference of the community\n priors and the affinity matrix between two consecutive EM iterations.\n mp_iter: maximum number of message passing iterations.\n mp_thresh: threshold for message passing convergence.\n The threshold is computed over the absolute difference of the log-marginals\n between two consecutive iterations.\n mp_patience: number of steps below the mp_thresh.\n After a number of consecutive iterations, specified by patience, with an\n absolute change in log-marginals below the mp_thresh, the message passing\n procedure is stopped.\n seed: random seed.\n dirichlet_alpha: parameter for the Dirichlet distribution.\n Utilized for the initialization of the messages, which are drawn from a\n uniform Dirichlet distribution with parameter alpha.\n If None, alpha is chosen automatically.\n dropout: dropout rate.\n The dropout rate it the number of randomly discarded updates in the messages\n and marginals. At every iteration of message passing, these discarded values\n are kept at the previous iteration value.\n \"\"\"\n if seed is not None:\n self.rng = np.random.default_rng(seed)\n self._check_hypergraph_vs_model_params(hypergraph)\n\n if self.n is None:\n fixed_n = False\n self._random_init_n()\n logging.info(f\"Initialized n prior:\\n{self.n}\")\n else:\n fixed_n = True\n\n if self.p is None:\n fixed_p = False\n self._random_init_p()\n logging.info(f\"Initialized rescaled affinity c=N*p:\\n{self.c}\")\n else:\n fixed_p = True\n\n for it in range(em_iter):\n logging.info(f\"EM iteration {it}\")\n\n # Local parameters: message passing.\n self.parallel_message_passing(\n hypergraph,\n mp_iter=mp_iter,\n mp_thresh=mp_thresh,\n patience=mp_patience,\n warm_start=True,\n seed=None, # keep the current random number generator unaltered.\n dirichlet_alpha=dirichlet_alpha,\n dropout=dropout,\n )\n\n # Global parameters: EM updates.\n if not fixed_n or not fixed_p:\n logging.info(\"\\tUpdates of priors n and affinity p...\")\n if not fixed_n:\n old_n = self.n.copy()\n self.n = self.updated_community_prior()\n self.n_diff.append(np.abs(old_n - self.n).sum())\n logging.info(\n f\"\\tCommunity prior:\\n{self.n}\"\n \"\\n\\tDifference from previous iteration: \"\n f\"{self.n_diff[-1]}\"\n )\n if not fixed_p:\n old_c = self.c.copy()\n self.p = self.updated_affinity_matrix(hypergraph)\n self.c_diff.append(np.abs(old_c - self.c).sum())\n logging.info(\n f\"\\tRescaled affinity matrix c=N*p:\\n{self.c}\"\n \"\\n\\tDifference from previous iteration:\"\n f\"{self.c_diff[-1]}\"\n )\n\n self.training_iter = it + 1\n\n if not fixed_n or not fixed_p:\n param_diff = 0.0\n if not fixed_n:\n param_diff += self.n_diff[-1]\n if not fixed_p:\n param_diff += self.c_diff[-1]\n if param_diff <= em_thresh:\n logging.info(\n \"Expectation-maximization threshold passed. \"\n \"inference terminated.\"\n )\n break\n\n def parallel_message_passing(\n self,\n hypergraph: IncidenceHypergraph,\n mp_iter: int = 2000,\n mp_thresh: float = 1.0e-5,\n dirichlet_alpha: float | None = None,\n dropout: float = 0.99,\n patience: int = 50,\n seed: int | None = None,\n warm_start: bool = True,\n ) -> None:\n \"\"\"Perform message passing inference of the node assignments.\n\n Parameters\n ----------\n hypergraph: a hypergraph.\n mp_iter: maximum number of message passing iterations.\n mp_thresh: threshold for message passing convergence.\n The threshold is computed over the absolute difference of the log-marginals\n between two consecutive iterations.\n dirichlet_alpha: parameter for the Dirichlet distribution.\n Utilized for the initialization of the messages, which are drawn from a\n uniform Dirichlet distribution with parameter alpha.\n If None, alpha is chosen automatically.\n dropout: dropout rate.\n The dropout rate it the number of randomly discarded updates in the messages\n and marginals. At every iteration of message passing, these discarded values\n are kept at the previous iteration value.\n patience: number of steps below the mp_thresh.\n After a number of consecutive iterations, specified by patience, with an\n absolute change in log-marginals below the mp_thresh, the message passing\n procedure is stopped.\n seed: random seed.\n warm_start: whether to re-initialize the messages and marginal beliefs.\n \"\"\"\n logging.info(\"\\tMessage passing...\")\n if seed is not None:\n self.rng = np.random.default_rng(seed)\n self._check_hypergraph_vs_model_params(hypergraph)\n\n all_messages_init = (\n self.log_hye_to_node is not None\n and self.log_node_to_hye is not None\n and self.log_marginals is not None\n and self.external_field is not None\n )\n\n if not warm_start or not all_messages_init:\n alpha = 10.0 * self.K if dirichlet_alpha is None else dirichlet_alpha\n self._init_message_passing(hypergraph, dirichlet_alpha=alpha)\n logging.debug(\n f\"\\t\\tInitialized hye to node:\\n{self.log_hye_to_node[0].data[:5]}\"\n )\n logging.debug(\n f\"\\t\\tInitialized node to hye:\\n{self.log_node_to_hye[0].data[:5]}\"\n )\n logging.debug(f\"\\t\\tInitialized marginals:\\n{self.log_marginals[:5]}\")\n logging.debug(f\"\\t\\tInitialized external field:\\n{self.external_field}\")\n\n self.log_marginal_diff.append(list())\n patience_count = 0\n for i in range(mp_iter):\n old_log_marginals = self.log_marginals.copy()\n self._parallel_message_passing_step(hypergraph, dropout)\n self.log_marginal_diff[-1].append(\n np.abs(old_log_marginals - self.log_marginals).sum()\n )\n logging.info(\n f\"\\t\\tMP step {i} - difference in log-marginals from previous iter: \"\n f\"{self.log_marginal_diff[-1][-1]}\"\n )\n\n if self.log_marginal_diff[-1][-1] <= mp_thresh:\n patience_count += 1\n else:\n patience_count = 0\n\n if patience_count == patience:\n logging.info(\n \"\\tMessage passing threshold passed. Message passing terminated.\"\n )\n break\n\n def _parallel_message_passing_step(\n self,\n hypergraph: IncidenceHypergraph,\n dropout: float = 0.99,\n ) -> None:\n \"\"\"Perform one step of message passing, updating the messages from nodes to\n factors, the messages from factors to nodes, the marginal probabilities and\n external field.\"\"\"\n inc = hypergraph.get_binary_incidence_matrix()\n\n # Update node to hye.\n new_node_to_hye = [None] * self.K\n for assignment in range(self.K):\n col_sum = self.log_hye_to_node[assignment].sum(axis=1)\n assert col_sum.shape == (self.N,)\n col_sum += np.log(self.n[assignment]) - self.external_field[assignment]\n col_sum = col_sum.reshape((self.N, 1))\n new_node_to_hye[assignment] = (\n TYPE_HYE_TO_NODE(inc * col_sum) - self.log_hye_to_node[assignment]\n )\n\n norm = sparse_reduce_lse(*new_node_to_hye)\n for assignment in range(self.K):\n new_node_to_hye[assignment].data -= norm.data\n new_node_to_hye[assignment].data = np.clip(\n new_node_to_hye[assignment].data, a_min=CLIP_MIN, a_max=CLIP_MAX\n )\n\n # TODO dropout could be made more efficient here. Do it or not?\n if dropout > 0:\n non_dropout_mask = (\n self.rng.random(len(self.log_node_to_hye[0].data)) >= dropout\n )\n for assignment in range(self.K):\n self.log_node_to_hye[assignment].data[\n non_dropout_mask\n ] = new_node_to_hye[assignment].data[non_dropout_mask]\n else:\n for assignment in range(self.K):\n self.log_node_to_hye[assignment].data = new_node_to_hye[assignment].data\n\n logging.debug(f\"\\t\\tUpdated node to hye:\\n{self.log_node_to_hye[0].data[:5]}\")\n\n # Update hye to node.\n if dropout > 0:\n non_dropout_mask = (\n self.rng.random(len(self.log_hye_to_node[0].data)) >= dropout\n )\n else:\n non_dropout_mask = None\n new_hye_to_node = [\n TYPE_HYE_TO_NODE(x)\n for x in compute_psi_dynamic_programming(\n hypergraph=hypergraph,\n model=self,\n mask=non_dropout_mask,\n )\n ]\n\n norm = sparse_reduce_lse(*new_hye_to_node)\n for assignment in range(self.K):\n new_hye_to_node[assignment].data -= norm.data\n new_hye_to_node[assignment].data = np.clip(\n new_hye_to_node[assignment].data, a_min=CLIP_MIN, a_max=CLIP_MAX\n )\n\n for assignment in range(self.K):\n self.log_hye_to_node[assignment].data[non_dropout_mask] = new_hye_to_node[\n assignment\n ].data\n\n logging.debug(f\"\\t\\tUpdated hye to node:\\n{self.log_hye_to_node[0].data[:5]}\")\n\n # Update marginals.\n new_marginals = []\n for assignment in range(self.K):\n col_sum = self.log_hye_to_node[assignment].sum(axis=1)\n assert col_sum.shape == (self.N,)\n col_sum += np.log(self.n[assignment]) - self.external_field[assignment]\n new_marginals.append(col_sum)\n new_marginals = np.stack(new_marginals, axis=1)\n assert new_marginals.shape == (self.N, self.K)\n\n new_marginals = new_marginals - special.logsumexp(\n new_marginals, axis=1, keepdims=True\n )\n new_marginals = np.clip(new_marginals, a_min=CLIP_MIN, a_max=CLIP_MAX)\n\n if dropout > 0:\n non_dropout_mask = self.rng.random(self.N) >= dropout\n self.log_marginals[non_dropout_mask] = new_marginals[non_dropout_mask]\n else:\n self.log_marginals = new_marginals\n\n logging.debug(f\"\\t\\tUpdated marginals:\\n{self.log_marginals[:5]}\")\n\n # Update external field.\n lse_term = special.logsumexp(\n a=self.log_marginals.reshape((self.N, self.K, 1)),\n b=self.c.reshape(1, self.K, self.K),\n axis=(0, 1),\n )\n assert lse_term.shape == (self.K,)\n\n C_prime = compute_C_prime(self.max_hye_size)\n self.external_field = C_prime / self.N * np.exp(lse_term)\n logging.debug(f\"\\t\\tUpdated external field:\\n{self.external_field}\")\n\n def updated_community_prior(self) -> np.ndarray:\n \"\"\"Parameter updates for the community priors n during EM inference.\n\n Returns\n -------\n The updated array of community priors.\n \"\"\"\n assignments = self.community_assignments()\n comm, counts = np.unique(assignments, return_counts=True)\n\n n = np.zeros(self.K)\n n[comm] = counts / self.N\n return np.clip(n, a_min=1.0e-20, a_max=1.0)\n\n def updated_affinity_matrix(self, hypergraph: IncidenceHypergraph) -> np.ndarray:\n \"\"\"Parameter updates for the affinity matrix p during EM inference.\n\n Parameters\n ----------\n hypergraph: a hypergraph.\n\n Returns\n -------\n The updated affinity matrix.\n \"\"\"\n # Numerator.\n pi, interactions = self.hye_pi(hypergraph, return_interactions=True)\n numerator = np.tensordot(\n interactions, 1 / np.clip(pi, a_min=1.0e-20, a_max=None), axes=(0, 0)\n )\n assert numerator.shape == (self.K, self.K)\n\n # Denominator.\n C_prime = compute_C_prime(self.max_hye_size)\n denominator = (\n self.N * C_prime * (self.N * np.outer(self.n, self.n) - np.diag(self.n))\n )\n\n p = self.p * 2 * numerator / denominator\n return np.clip(p, a_min=1e-20, a_max=0.99)\n\n def community_assignments(self):\n marginals = self.log_marginals\n return np.argmax(marginals, axis=1)\n\n def compute_external_field(self) -> np.array:\n r\"\"\"Compute the approximate external field, defined as\n .. math::\n h(t_i) :=\n \\frac{C'}{N}\n \\sum_{j \\in V} \\sum_{t_j} c_{t_i t_j} q_j(t_j)\n where\n .. math::\n C' = \\sum_{d=2}^D \\binom{N-2}{d-2} \\frac{1}{\\kappa_d}\n\n Returns\n -------\n The external field h.\n \"\"\"\n log_marginals = self.log_marginals\n c = self.c\n K = self.K\n N = self.N\n C_prime = compute_C_prime(self.max_hye_size)\n\n external_field = special.logsumexp(\n a=log_marginals.reshape(N, 1, K), b=c.reshape(1, K, K), axis=(0, 2)\n )\n assert external_field.shape == (K,)\n return C_prime / N * np.exp(external_field)\n\n def single_hye_pi(self, assignments: Iterable[int]) -> float:\n r\"\"\"Compute the hyperedge unnormalized probability.\n For a hyperedge e and community assignments t, the unnormalized probability is\n given by\n .. math::\n \\pi_e := \\sum_{i < j \\in e} p_{t_i t_j}\n\n Parameters\n ----------\n assignments: community assignments.\n This array contains the community assignments :math::`t_i` (with values\n between 0 and K-1, where K is the number of communities) for all nodes i in\n the hyperedge.\n\n Returns\n -------\n The value of :math::`\\pi_e`.\n \"\"\"\n K = self.K\n hye_comm_counts = [0] * K\n counts = Counter(assignments)\n for comm, count in counts.items():\n hye_comm_counts[comm] = count\n\n return hyperedge_pi(hye_comm_counts, self.p)\n\n def hye_pi(\n self, hypergraph: IncidenceHypergraph, return_interactions: bool = False\n ) -> np.ndarray | tuple[np.ndarray, np.ndarray]:\n r\"\"\"Compute the hyperedge unnormalized probabilities for all the hyperedges in\n the hypergraph. For a hyperedge e, the unnormalized probability has form\n .. math::\n \\pi_e := \\sum_{i <j \\in e} p_{t_i t_j}\n with p affinity matrix and :math::`t_i` community assignment of node i.\n\n Parameters\n ----------\n hypergraph: the input hypergraph.\n return_interactions: whether to optionally return the tensor of community\n interactions within hyperedges, defined as, for any hyperedge e and\n communities a, b:\n .. math::\n \\#_{ab}^{(e)} := \\sum_{i <j \\in e} \\delta_{t_i a} \\delta_{t_j b}\n where :math::`\\delta_{xy}` is the Dirac delta, equal to 1 if :math::`x=y`,\n else 0.\n The tensor :math::`\\#` has shape (E, K, K), with E number of hyperedges and\n K number of communities.\n Returns\n -------\n The array of :math::`\\pi_e` values. Optionally, the tensor of :math::`\\#`\n values.\n \"\"\"\n E = hypergraph.E\n K = self.K\n p = self.p\n incidence = hypergraph.get_binary_incidence_matrix()\n\n onehot_assignments = np.zeros((self.N, K))\n onehot_assignments[np.arange(self.N), self.community_assignments()] = 1\n\n counts = incidence.transpose() @ onehot_assignments\n assert counts.shape == (E, K)\n del onehot_assignments\n\n interactions = counts.reshape(E, 1, K) * counts.reshape(E, K, 1)\n interactions[:, np.arange(K), np.arange(K)] = counts * (counts - 1) / 2\n assert interactions.shape == (E, K, K)\n del counts\n\n pi = 0.5 * (\n np.sum(interactions * p.reshape(1, K, K), axis=(1, 2))\n + np.inner(interactions[:, np.arange(K), np.arange(K)], np.diagonal(p))\n )\n\n if return_interactions:\n return pi, interactions\n return pi\n\n def free_energy(self, hypergraph: IncidenceHypergraph) -> float:\n \"\"\"Compute the free energy of a hypergraph utilizing the message passing\n cavity approximations. The free energy, often denoted as :math::`F = -log Z`,\n corresponds to the negative log-normalizing constant of the Boltzmann\n distribution. Z is also called the evidence of the probabilistic model.\n\n Parameters\n ----------\n hypergraph: hypergraph.\n\n Returns\n -------\n The log-likelihood value.\n \"\"\"\n self._check_hypergraph_vs_model_params(hypergraph)\n K = self.K\n N = self.N\n external_field = self.compute_external_field()\n ones = np.ones(hypergraph.E)\n log_marginals = self.log_marginals\n hye_dims = hypergraph.get_binary_incidence_matrix().sum(axis=0)\n\n # Node-related addends.\n f_i = [\n x.tocsc().dot(ones) - external_field[k]\n for k, x in enumerate(\n compute_psi_dynamic_programming(hypergraph=hypergraph, model=self)\n )\n ]\n assert len(f_i) == K\n assert all(x.shape == (N,) for x in f_i)\n f_i = np.vstack(f_i).T\n assert f_i.shape == (N, K)\n f_i = special.logsumexp(a=f_i, b=self.n.reshape(1, -1), axis=1)\n f_i_sum = f_i.sum()\n\n # Edge-related addends.\n # First addend.\n first_addend = compute_psi_tilde_dynamic_programming(\n hypergraph=hypergraph, model=self\n )\n first_addend = ((hye_dims - 1) * first_addend).sum()\n\n # Second addend.\n log_marginal_sum = special.logsumexp(log_marginals, axis=0)\n cross_log_marginal_sum = log_marginal_sum.reshape(\n (1, K)\n ) + log_marginal_sum.reshape((K, 1))\n assert cross_log_marginal_sum.shape == (K, K)\n\n cross_log_marginals = log_marginals.reshape((N, 1, K)) + log_marginals.reshape(\n (N, K, 1)\n )\n assert cross_log_marginals.shape == (N, K, K)\n cross_log_marginals = special.logsumexp(cross_log_marginals, axis=0)\n\n second_addend = special.logsumexp(\n a=np.hstack([cross_log_marginal_sum, cross_log_marginals]),\n b=np.hstack([self.c, -self.c]),\n )\n second_addend = np.exp(second_addend)\n second_addend *= compute_C_third(self.max_hye_size) / (2 * N)\n\n f_e_sum = first_addend + second_addend\n\n return -f_i_sum + f_e_sum\n\n @staticmethod\n def _check_params(\n n: np.ndarray, p: np.ndarray, K: int, N: int, max_hye_size: int | None\n ) -> None:\n \"\"\"Check the correctness of the initialization parameters.\"\"\"\n # Check coherence between n and p.\n if n is not None:\n if not np.allclose(n.sum(), 1):\n raise ValueError(\n \"The prior parameters n for the community distribution do not \"\n \"sum to 1.\"\n )\n if np.any(n < 0):\n raise ValueError(\n \"The prior parameters n for the community distribution contain \"\n \"negative values.\"\n )\n if len(n.shape) != 1:\n raise ValueError(\n \"The array of prior parameters n is not one-dimensional.\"\n )\n if n.shape != (K,):\n raise ValueError(\n \"The array of prior parameters n has dimension different from the \"\n \"number of communities K.\"\n )\n\n if p is not None:\n if not np.all(p == p.T):\n raise ValueError(\"The probability matrix p is not symmetric.\")\n\n if np.any(p > 1) or np.any(p < 0):\n raise ValueError(\n \"The probability matrix p contains values outside \"\n \"the (0, 1) interval.\"\n )\n\n if p.shape != (K, K):\n raise ValueError(\"The matrix p has shape different from (K, K).\")\n\n if p is not None and n is not None:\n if not p.shape == (K, K):\n raise ValueError(\n \"The shapes of n and p do not match. They need to be respectively \"\n \"(K,) and (K, K) for some integer K.\"\n )\n\n # Check coherence between N and max_hye_size.\n if max_hye_size is not None and max_hye_size < 2:\n raise ValueError(\"The max_hye_size cannot be lower than 2.\")\n\n if max_hye_size is not None and max_hye_size > N:\n raise ValueError(\n \"max_hye_size cannot be higher than the number of nodes N.\"\n )\n\n def _check_hypergraph_vs_model_params(\n self, hypergraph: IncidenceHypergraph\n ) -> None:\n \"\"\"Check that the model parameters are coherent with an input hypergraph.\"\"\"\n if hypergraph.N != self.N:\n raise ValueError(\n \"The input hypergraph has a different number of nodes \"\n \"than the value specified for the model.\"\n )\n\n if hypergraph.max_hye_size > self.max_hye_size:\n raise ValueError(\n \"The input hypergraph contains hyperedges bigger than the max_hye_size \"\n \"specified in the model.\"\n )\n\n def _random_init_n(self) -> None:\n \"\"\"Random initialization of the community priors n.\"\"\"\n self.n = self.rng.dirichlet(alpha=[100] * self.K)\n\n def _random_init_p(self) -> None:\n \"\"\"Random initialization of the affinity matrix p.\"\"\"\n K = self.K\n N = self.N\n\n p = np.ones((K, K)) / (10 * (K - 1))\n p += self.rng.random((K, K)) / 50\n p = np.triu(p, 1) + np.triu(p, 1).T\n np.fill_diagonal(p, 1.0 + self.rng.random(K) / 50)\n p /= N\n p = np.clip(p, a_min=1e-10, a_max=1.0)\n\n self.p = p\n\n def _init_message_passing(\n self,\n hypergraph: IncidenceHypergraph,\n dirichlet_alpha: float = 10.0,\n ) -> None:\n r\"\"\"Random initialization of the messages, marginal beliefs, and external field.\n The initialization is performed to respect the fixed-point conditions given by\n the message passing equations.\n\n Parameters\n ----------\n hypergraph: a hypergraph.\n dirichlet_alpha: parameter to initialize the messages and marginal beliefs.\n These are drawn from a Dirichlet distribution with a uniform parameter array\n :math::`(\\alpha, \\ldots, \\alpha)` with length the number of communities.\n \"\"\"\n incidence = hypergraph.get_binary_incidence_matrix()\n\n def random_prob_init():\n beliefs = [incidence.copy().astype(float) for _ in range(self.K)]\n vals = self.rng.dirichlet(\n [dirichlet_alpha] * len(beliefs), size=len(beliefs[0].data)\n )\n for i, belief in enumerate(beliefs):\n belief.data *= vals[:, i]\n\n return beliefs\n\n # Random initialization of messages from nodes to hyperedges.\n log_node_to_hye = random_prob_init()\n for belief in log_node_to_hye:\n belief.data = np.log(belief.data)\n self.log_node_to_hye = [TYPE_NODE_TO_HYE(mat) for mat in log_node_to_hye]\n\n # Random initialization of the marginal beliefs.\n marginals = self.rng.dirichlet([dirichlet_alpha] * self.K, size=self.N)\n assert marginals.shape == (self.N, self.K)\n self.log_marginals = np.log(marginals)\n\n # Compute external field from marginals.\n self.external_field = self.compute_external_field()\n\n # Infer hye to node as ratio of marginals and noe to hye\n log_hye_to_node = []\n for assignment in range(self.K):\n log_hye_to_node.append(\n TYPE_HYE_TO_NODE(\n incidence * self.log_marginals[:, assignment].reshape(self.N, 1)\n )\n - self.log_node_to_hye[assignment]\n )\n\n normalizer = sparse_reduce_lse(*log_hye_to_node)\n for assignment in range(self.K):\n log_hye_to_node[assignment].data -= normalizer.data\n self.log_hye_to_node = log_hye_to_node"
}
] | import itertools
import os
import numpy as np
import pytest
from pathlib import Path
from typing import Dict, Tuple
from dotenv import load_dotenv
from src.data.conversion import hye_list_to_binary_incidence
from src.data.representation.binary_hypergraph import BinaryHypergraph
from src.data.representation.incidence_hypergraph import IncidenceHypergraph
from src.model.hypergraph_block_model import HypergraphBlockModel | 10,249 |
load_dotenv()
TEST_DATA_DIR = Path(os.environ["TEST_DATA_DIR"])
########################################################################################
# Some blockmodels.
p_vals = [
np.array([[0.1, 0.2, 0.0], [0.2, 0.0, 0.9], [0.0, 0.9, 0.0]]),
np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
),
np.array(
[
[1.0, 0.0],
[0.0, 1.0],
]
),
np.array(
[
[0.9, 0.1, 0.0],
[0.1, 1.0, 0.0],
[0.0, 0.0, 0.23],
]
),
]
N_vals = [2, 5, 10, 100]
def _all_models():
for p, N in itertools.product(p_vals, N_vals):
n = np.ones(len(p)) / len(p)
n[-1] += 1 - n.sum() # sum to 1, avoid numerical errors.
assert n.sum() == 1
|
load_dotenv()
TEST_DATA_DIR = Path(os.environ["TEST_DATA_DIR"])
########################################################################################
# Some blockmodels.
p_vals = [
np.array([[0.1, 0.2, 0.0], [0.2, 0.0, 0.9], [0.0, 0.9, 0.0]]),
np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
),
np.array(
[
[1.0, 0.0],
[0.0, 1.0],
]
),
np.array(
[
[0.9, 0.1, 0.0],
[0.1, 1.0, 0.0],
[0.0, 0.0, 0.23],
]
),
]
N_vals = [2, 5, 10, 100]
def _all_models():
for p, N in itertools.product(p_vals, N_vals):
n = np.ones(len(p)) / len(p)
n[-1] += 1 - n.sum() # sum to 1, avoid numerical errors.
assert n.sum() == 1 | yield HypergraphBlockModel(n, p, N, len(p), max_hye_size=None) | 3 | 2023-12-06 22:01:38+00:00 | 12k |
kramerlab/PeerLearning | run_peer.py | [
{
"identifier": "DQNPeer",
"path": "dqn_peer.py",
"snippet": "class DQNPeer(make_peer_class(DQN)):\n \"\"\"\n A DQN version to be used with peer learning. Therefore, it features\n a critic function\n \"\"\"\n def critic(self, observations, actions):\n q_values = self.q_net(observations).reshape(len(actions), -1, 1)\n tmp = q_values[range(len(actions)), actions, :]\n return tmp, tmp # SAC critic outputs multiple values, so this need\n # to do the same\n\n def get_action(self, *args, **kwargs):\n action, _ = super().get_action(*args, **kwargs)\n return action.reshape(-1), _"
},
{
"identifier": "PeerGroup",
"path": "peer.py",
"snippet": "class PeerGroup:\n \"\"\" A group of peers who train together. \"\"\"\n def __init__(self, peers, use_agent_values=False, init_agent_values=200.,\n lr=0.95, switch_ratio=0, use_advantage=False,\n max_peer_epochs=1_000_000_000):\n \"\"\"\n :param peers: An iterable of peer agents\n :param lr: The learning rate for trust and agent values\n :param switch_ratio: switch_ratio == 0 means no switching\n :param use_advantage: use advantage instead of value for AV updates\n \"\"\"\n self.peers = peers\n self.lr = lr\n self.switch_ratio = switch_ratio\n self.active_peer = None # index of currently learning peer\n self.solo_epoch = False\n self.use_advantage = use_advantage\n self.max_peer_epochs = max_peer_epochs\n\n if use_agent_values:\n self.agent_values = np.full(len(peers), init_agent_values,\n dtype=np.float32)\n key = \"agent_values\"\n\n for peer in peers:\n peer.n_peers = len(peers)\n peer.group = self\n\n # setup agent values\n if use_agent_values:\n peer.peer_values[key] = self.agent_values # noqa (Eq. 6)\n peer.peer_value_functions[key] = self._update_agent_values\n\n def _update_agent_values(self, batch_size=10):\n \"\"\" Updates the agent values with samples from the peers' buffers\"\"\"\n targets = np.zeros_like(self.peers, dtype=np.float32)\n counts = np.zeros_like(self.peers, dtype=np.float32)\n\n for peer in self.peers:\n bs = batch_size // len(self.peers)\n # reward, action, peer, new_obs, old_obs\n if peer.buffer is not None:\n batch = peer.buffer.sample(bs)\n if batch is None: # buffer not sufficiently full\n return\n\n obs = np.array([b[3] for b in batch]).reshape(bs, -1)\n v = peer.value(obs)\n\n if self.use_advantage:\n # previous observations\n prev_obs = np.array([b[4] for b in batch]).reshape(bs, -1)\n prev_v = peer.value(prev_obs)\n else:\n prev_v = np.zeros_like(v) # no advantage (see Eq. 5)\n\n for i in range(len(batch)): # Eq. 8\n target = (batch[i][0] + peer.gamma * v[i]) - prev_v[i]\n counts[batch[i][2]] += 1\n targets[batch[i][2]] += target\n\n # ensure counts are >= 1, don't change these values\n targets[counts == 0] = self.agent_values[counts == 0]\n counts[counts == 0] = 1\n\n targets /= counts\n self.agent_values += self.lr * (targets - self.agent_values) # Eq. 7\n\n def learn(self, n_epochs, max_epoch_len, callbacks, **kwargs):\n \"\"\" The outer peer learning routine. \"\"\"\n assert len(callbacks) == len(self.peers)\n # more solo epochs\n boost_single = 0 < self.switch_ratio < 1\n if boost_single:\n self.switch_ratio = 1 / self.switch_ratio\n\n self.solo_epoch = False\n peer_epochs = 0\n for i in range(n_epochs):\n # don't do peer learning forever\n if peer_epochs < self.max_peer_epochs:\n # ratio of 0 never performs a solo episode\n if (i % (1 + self.switch_ratio) == 1) ^ boost_single:\n self.solo_epoch = True\n else:\n peer_epochs += 1\n else: # budget spent\n self.solo_epoch = True\n\n for p, peer, callback in zip(it.count(), self.peers, callbacks):\n self.active_peer = p\n peer.learn(self.solo_epoch, total_timesteps=max_epoch_len,\n callback=callback, tb_log_name=f\"Peer{p}\",\n reset_num_timesteps=False,\n log_interval=None, **kwargs)\n # update epoch for temperature decay\n peer.epoch += 1\n\n self.active_peer = None\n\n def __len__(self):\n return len(self.peers)"
},
{
"identifier": "make_peer_class",
"path": "peer.py",
"snippet": "def make_peer_class(cls: Type[OffPolicyAlgorithm]):\n \"\"\" Creates a mixin with the corresponding algorithm class.\n :param cls: The learning algorithm (needs to have a callable critic).\n :return: The mixed in peer agent class.\n \"\"\"\n\n class Peer(cls, ABC):\n \"\"\" Abstract Peer class\n needs to be mixed with a suitable algorithm. \"\"\"\n def __init__(self, temperature, temp_decay, algo_args, env,\n use_trust=False, use_critic=False, init_trust_values=200,\n buffer_size=1000, follow_steps=10, seed=None,\n use_trust_buffer=True, solo_training=False,\n peers_sample_with_noise=False,\n sample_random_actions=False, sample_from_suggestions=True,\n epsilon=0.0, env_args=None, only_follow_peers=False):\n if env_args is None:\n env_args = {}\n super(Peer, self).__init__(**algo_args,\n env=make_env(env, **env_args),\n seed=seed)\n # create noise matrix on the correct device\n if hasattr(self.actor, \"reset_noise\"):\n self.actor.reset_noise(self.env.num_envs)\n\n self.solo_training = solo_training\n self.init_values = dict()\n # store all peer values, e.g., trust and agent values in a dict\n self.peer_values = dict()\n # store corresponding functions as well\n self.peer_value_functions = dict()\n\n self.buffer = SuggestionBuffer(buffer_size)\n self.followed_peer = None\n self.__n_peers = None\n self.group = None\n self.epoch = 0\n\n if sample_random_actions:\n epsilon = 1.0\n\n if not solo_training:\n # all peers suggest without noise\n self.peers_sample_with_noise = peers_sample_with_noise\n # actions are sampled instead of taken greedily\n self.sample_actions = sample_from_suggestions\n self.epsilon = epsilon\n self.use_critic = use_critic\n\n if use_trust:\n self.trust_values = np.array([])\n self.init_values[\"trust\"] = init_trust_values\n self.peer_value_functions[\"trust\"] = self._update_trust\n\n self.use_buffer_for_trust = use_trust_buffer\n\n # sampling parameters\n self.temperature = temperature\n self.temp_decay = temp_decay\n\n self.follow_steps = follow_steps\n self.steps_followed = 0\n\n self.only_follow_peers = only_follow_peers\n\n @property\n def n_peers(self):\n return self.__n_peers\n\n @n_peers.setter\n def n_peers(self, n_peers):\n self.__n_peers = n_peers\n\n # Also reset the trust values\n if \"trust\" in self.init_values.keys():\n self.trust_values = np.full(self.__n_peers,\n self.init_values[\"trust\"],\n dtype=np.float32)\n self.peer_values[\"trust\"] = self.trust_values\n\n def critique(self, observations, actions) -> np.array:\n \"\"\" Evaluates the actions with the critic. \"\"\"\n with torch.no_grad():\n a = torch.as_tensor(actions, device=self.device)\n o = torch.as_tensor(observations, device=self.device)\n\n # Compute the next Q values: min over all critic targets\n q_values = torch.cat(self.critic(o, a), dim=1) # noqa\n q_values, _ = torch.min(q_values, dim=1, keepdim=True)\n return q_values.cpu().numpy()\n\n def get_action(self, obs, deterministic=False):\n \"\"\" The core function of peer learning acquires the suggested\n actions of the peers and chooses one based on the settings. \"\"\"\n # follow peer for defined number of steps\n followed_steps = self.steps_followed\n self.steps_followed += 1\n self.steps_followed %= self.follow_steps\n if 0 < followed_steps:\n peer = self.group.peers[self.followed_peer]\n det = (peer != self and not self.peers_sample_with_noise) or \\\n deterministic\n action, _ = peer.policy.predict(obs, deterministic=det)\n return action, None\n\n # get actions\n actions = []\n for peer in self.group.peers:\n # self always uses exploration, the suggestions of the other\n # peers only do if the critic method isn't used.\n det = (peer != self and not self.peers_sample_with_noise) or \\\n deterministic\n action, _ = peer.policy.predict(obs, deterministic=det)\n actions.append(action)\n actions = np.asarray(actions).squeeze(1)\n\n # critic (Eq. 3)\n if self.use_critic:\n observations = np.tile(obs, (self.n_peers, 1))\n q_values = self.critique(observations, actions).reshape(-1)\n self.peer_values['critic'] = q_values # part of Eq. 9\n\n # calculate peer values, e.g., trust and agent values\n values = np.zeros(self.n_peers)\n for key in self.peer_values.keys():\n # part of Eq. 9 incl. Footnote 7\n values += self.__normalize(self.peer_values[key])\n\n if self.sample_actions:\n # sample action from probability distribution (Eq. 2)\n temp = self.temperature * np.exp(-self.temp_decay * self.epoch)\n p = np.exp(values / temp)\n p /= np.sum(p)\n self.followed_peer = np.random.choice(self.n_peers, p=p)\n elif self.only_follow_peers:\n p = np.full(self.n_peers, 1 / (self.n_peers - 1))\n p[self.group.peers.index(self)] = 0\n self.followed_peer = np.random.choice(self.n_peers, p=p)\n else:\n # act (epsilon) greedily\n if np.random.random(1) >= self.epsilon:\n self.followed_peer = np.argmax(values)\n else:\n self.followed_peer = np.random.choice(self.n_peers)\n\n action = actions[self.followed_peer].reshape(1, -1)\n\n return action, None\n\n @staticmethod\n def __normalize(values):\n \"\"\" Normalize the values based on their absolute maximum. \"\"\"\n return values / np.max(np.abs(values))\n\n def value(self, observations) -> np.ndarray:\n \"\"\" Calculates the value of the observations. \"\"\"\n actions, _ = self.policy.predict(observations, False)\n return self.critique(observations, actions)\n\n def _update_trust(self, batch_size=10):\n \"\"\" Updates the trust values with samples from the buffer.\n (Eq. 5 and 8)\n \"\"\"\n if self.use_buffer_for_trust:\n batch = self.buffer.sample(batch_size)\n else:\n batch = self.buffer.latest()\n batch_size = 1\n if batch is None: # buffer not sufficiently full\n return\n\n # next observations\n obs = np.array([b[3] for b in batch]).reshape(batch_size, -1)\n v = self.value(obs)\n\n if self.group.use_advantage:\n # previous observations\n prev_obs = np.array([b[4] for b in batch]).reshape(batch_size,\n -1)\n prev_v = self.value(prev_obs)\n else:\n prev_v = np.zeros_like(v) # no comparison to own act (Eq. 5)\n\n targets = np.zeros(self.n_peers)\n counts = np.zeros(self.n_peers)\n for i in range(batch_size):\n target = (batch[i][0] + self.gamma * v[i]) - prev_v[i] # Eq. 8\n counts[batch[i][2]] += 1\n targets[batch[i][2]] += target\n\n # ensure counts are >= 1, don't change these values\n targets[counts == 0] = self.trust_values[counts == 0]\n counts[counts == 0] = 1\n\n targets /= counts\n # Eq. 4\n self.trust_values += self.group.lr * (targets - self.trust_values)\n\n def _on_step(self):\n \"\"\" Adds updates of the peer values, e.g., trust or agent\n values. \"\"\"\n super(Peer, self)._on_step() # noqa\n\n if not self.group.solo_epoch:\n # update values, e.g., trust and agent values after ever step\n for key in self.peer_value_functions.keys():\n self.peer_value_functions[key]()\n\n def _store_transition(self, replay_buffer, buffer_action, new_obs,\n reward, dones, infos):\n \"\"\" Adds suggestion buffer handling. \"\"\"\n\n # get previous observations\n old_obs = self._last_obs\n\n super(Peer, self)._store_transition(replay_buffer, # noqa\n buffer_action, new_obs,\n reward, dones, infos)\n\n if not self.group.solo_epoch:\n # store transition in suggestion buffer as well\n self.buffer.add(reward, buffer_action, self.followed_peer,\n new_obs, old_obs)\n\n def _predict_train(self, observation, state=None,\n episode_start=None, deterministic=False):\n \"\"\" The action selection during training involves the peers. \"\"\"\n if deterministic:\n return self.policy.predict(observation, state=state,\n episode_start=episode_start,\n deterministic=deterministic)\n else:\n return self.get_action(observation)\n\n def learn(self, solo_episode=False, **kwargs):\n \"\"\" Adds action selection with help of peers. \"\"\"\n predict = self.predict # safe for later\n\n # use peer suggestions only when wanted\n if not (self.solo_training or solo_episode):\n self.predict = self._predict_train\n else:\n self.followed_peer = self.group.peers.index(self)\n\n result = super(Peer, self).learn(**kwargs)\n\n self.predict = predict # noqa\n return result\n\n def _excluded_save_params(self):\n \"\"\" Excludes attributes that are functions. Otherwise, the save\n method fails. \"\"\"\n ex_list = super(Peer, self)._excluded_save_params()\n ex_list.extend([\"peer_value_functions\", \"peer_values\",\n \"group\", \"predict\"])\n return ex_list\n\n return Peer"
},
{
"identifier": "PeerEvalCallback",
"path": "callbacks.py",
"snippet": "class PeerEvalCallback(EvalCallback):\n \"\"\"\n Callback to track collective measurements about peers.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use\n ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param peer_group: The group of peers\n :param eval_env: The environment used for initialization\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the\n callback.\n :param log_path: Path to a folder where the evaluations\n (``evaluations.npz``) will be saved. It will be updated at each\n evaluation.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose:\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has\n not been wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n peer_group: PeerGroup,\n eval_envs: List[Union[gym.Env, VecEnv]],\n n_samples=100,\n **kwargs\n ):\n self.peer_group = peer_group\n self.eval_envs = eval_envs\n self.n_samples = n_samples\n\n self.last_logged_matrix = None\n self.follow_matrix = np.zeros((len(peer_group), len(peer_group)))\n\n self.start_time = time.time()\n\n super().__init__(**kwargs)\n\n def _on_step(self) -> bool:\n self.accumulate_followed_peers() # needs to be done at every step\n\n # log time for debugging etc.\n self.logger.record(\"time/time_elapsed\",\n time.time() - self.start_time,\n exclude=\"tensorboard\")\n\n super()._on_step()\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n if 'agent_values' in self.peer_group.__dict__:\n self.track_agent_values()\n if 'trust_values' in self.peer_group.peers[0].__dict__:\n self.track_trust_values()\n self.track_followed_agent(self.peer_group.active_peer)\n\n peer = self.peer_group.active_peer\n eval_values = {\n f\"Peer{peer}_0/eval/mean_reward\": self.last_mean_reward,\n }\n if peer == len(self.peer_group) - 1:\n eval_values[\"global_step\"] = self.n_calls\n wandb.log(eval_values, commit=True)\n else:\n wandb.log(eval_values, commit=False)\n return True\n\n def track_agent_values(self):\n n_agents = len(self.peer_group.peers)\n for i in range(n_agents):\n agent_value = self.peer_group.agent_values[i]\n wandb.log({'Peer{}_0/eval/agent_value'.format(i): agent_value},\n commit=False)\n return True\n\n def track_trust_values(self):\n peer = self.peer_group.active_peer\n trust_i = self.peer_group.peers[peer].trust_values\n for j, el in np.ndenumerate(trust_i):\n wandb.log({'Peer{}_0/eval/trust_{}'.format(peer, j[0]): el},\n commit=False)\n return True\n\n def accumulate_followed_peers(self):\n peer = self.peer_group.active_peer\n followed_peer = self.peer_group.peers[peer].followed_peer\n if followed_peer is not None:\n self.follow_matrix[peer, followed_peer] += 1\n\n def track_followed_agent(self, active_peer):\n if self.last_logged_matrix is None:\n diff = self.follow_matrix\n else:\n diff = self.follow_matrix - self.last_logged_matrix\n\n for (followed_peer,), count in np.ndenumerate(\n self.follow_matrix[active_peer]):\n wandb.log({'Peer{}_0/eval/follow_count{}'.format(\n active_peer, followed_peer): count}, commit=False)\n # also log difference\n wandb.log({'Peer{}_0/eval/follow_count_{}diff'.format(\n active_peer, followed_peer): diff[active_peer, followed_peer]},\n commit=False)\n self.last_logged_matrix = np.copy(self.follow_matrix)\n\n def commit_global_step(self, timesteps):\n if self.peer_group.active_peer == len(self.peer_group) - 1:\n eval_values = {\"global_step\": self.n_calls + self.eval_freq}\n wandb.log(eval_values, commit=True)\n\n self.n_calls += timesteps"
},
{
"identifier": "str2bool",
"path": "utils.py",
"snippet": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')"
},
{
"identifier": "add_default_values_to_parser",
"path": "utils.py",
"snippet": "def add_default_values_to_parser(parser):\n parser.add_argument(\"--job_id\", type=str,\n default=wandb.util.generate_id())\n parser.add_argument(\"--agent-count\", type=int, help=\"Number of agents.\",\n default=4)\n parser.add_argument(\"--device\", type=str, default=\"auto\",\n choices=[\"cpu\", \"cuda\", \"auto\"],\n help=\"Device to use, either 'cpu', 'cuda' for GPU or \"\n \"'auto'.\")\n parser.add_argument(\"--env\", type=str, default=\"HalfCheetahBulletEnv-v0\",\n help=\"OpenAI Gym environment to perform algorithm on.\")\n parser.add_argument(\"--env_args\", action=StoreDictKeyPair,\n nargs='*', metavar=\"KEY=VAL\", default={})\n parser.add_argument(\"--seed\", type=int, default=1,\n help=\"Random seed in [0, 2 ** 32)\")\n parser.add_argument(\"--wandb\", type=str, default='offline',\n choices=[\"online\", \"offline\", \"disabled\"])\n parser.add_argument(\"--discrete-actions\", type=str2bool, nargs=\"?\",\n const=False, default=False)\n parser.add_argument(\"--save-dir\", type=Path,\n default=Path.cwd().joinpath(\"Experiments\"))\n\n # Agents\n agent_parser = parser.add_argument_group(\"Agent\")\n agent_parser.add_argument(\"--mix-agents\", type=str, nargs='*',\n default=[\"SAC\"])\n\n agent_parser.add_argument(\"--net-arch\", type=int, nargs='*',\n action='append')\n agent_parser.add_argument(\"--load_paths\", type=str, nargs='*',\n default=[])\n agent_parser.add_argument(\"--agents_to_store\", type=int, nargs='*',\n default=[])\n\n return parser"
},
{
"identifier": "log_reward_avg_in_wandb",
"path": "utils.py",
"snippet": "def log_reward_avg_in_wandb(callbacks):\n results = []\n for callback in callbacks:\n eval_callback = callback[-1]\n result = eval_callback.evaluations_results\n results.append(np.mean(result))\n wandb.log({'reward_avg': np.mean(results)})"
},
{
"identifier": "add_default_values_to_train_parser",
"path": "utils.py",
"snippet": "def add_default_values_to_train_parser(training_parser):\n training_parser.add_argument(\"--steps\", type=int, default=3_000_000,\n help=\"Total number of time steps to train \"\n \"the agent.\")\n training_parser.add_argument(\"--eval-interval\", type=int,\n default=10_000,\n help=\"Interval in time steps between \"\n \"evaluations.\")\n training_parser.add_argument(\"--n-eval-episodes\", type=int,\n default=10,\n help=\"Number of episodes for each \"\n \"evaluation.\")\n training_parser.add_argument(\"--buffer-size\", type=int,\n default=1_000_000)\n training_parser.add_argument(\"--buffer-start-size\", type=int,\n default=1_000,\n help=\"Minimum replay buffer size before \"\n \"performing gradient updates.\")\n training_parser.add_argument(\"--batch-size\", type=int,\n default=100,\n help=\"Minibatch size\")\n training_parser.add_argument(\"--min-epoch-length\", type=int,\n default=10_000,\n help=\"Minimal length of a training_parser \"\n \"epoch.\")\n training_parser.add_argument(\"--learning_rate\", type=str2func, nargs='*',\n default=[3e-4],\n help='Learning rate for adam optimizer, '\n 'the same learning rate will be used '\n 'for all networks (Q-Values, Actor and '\n 'Value function) it can be a function'\n ' of the current progress remaining '\n '(from 1 to 0)')\n training_parser.add_argument(\"--tau\", type=float, default=0.005)\n training_parser.add_argument(\"--gamma\", type=float, default=0.99)\n training_parser.add_argument(\"--gradient_steps\", type=int,\n default=1)\n training_parser.add_argument(\"--train_freq\", type=int,\n default=1)\n training_parser.add_argument(\"--target_update_interval\", type=int,\n default=1)\n dqn_parser = training_parser.add_argument_group(\"DQN\")\n dqn_parser.add_argument(\"--exploration-fraction\", type=float, default=0.1)\n dqn_parser.add_argument(\"--exploration-final-eps\", type=float,\n default=0.05)\n return training_parser"
},
{
"identifier": "new_random_seed",
"path": "utils.py",
"snippet": "def new_random_seed():\n return np.random.randint(np.iinfo(np.int32).max)"
},
{
"identifier": "make_env",
"path": "utils.py",
"snippet": "def make_env(env_str, n_envs=1, **env_args):\n envs = []\n for _ in range(n_envs):\n def env_func():\n env = Monitor(gym.make(env_str, **env_args))\n env.seed(new_random_seed())\n return env\n\n envs.append(env_func)\n return DummyVecEnv(envs)"
},
{
"identifier": "ControllerArguments",
"path": "utils.py",
"snippet": "class ControllerArguments:\n def __init__(self, number_agents):\n self.number_agents = number_agents\n\n def argument_for_every_agent(self, arguments, i):\n if type(arguments) is list:\n if len(arguments) == 1:\n return arguments[0]\n elif len(arguments) == self.number_agents:\n return arguments[i]\n else:\n raise AssertionError(f'number of arguments ({len(arguments)}) '\n f'has to be 1 or == number of agents '\n f'({self.number_agents}) input is'\n f' {arguments}')\n else:\n raise AssertionError(f'input is not a list input is{arguments} '\n f'{type(arguments)}')"
}
] | import argparse
import datetime
import gym
import wandb
import predefined_agents # noqa: F401
import env as local_envs # noqa: F401
from pathlib import Path
from stable_baselines3 import SAC, TD3
from stable_baselines3.common.utils import set_random_seed, \
update_learning_rate
from wandb.integration.sb3 import WandbCallback
from dqn_peer import DQNPeer
from peer import PeerGroup, make_peer_class
from callbacks import PeerEvalCallback
from utils import str2bool, add_default_values_to_parser, \
log_reward_avg_in_wandb, add_default_values_to_train_parser, \
new_random_seed, make_env, ControllerArguments | 8,484 | peer_args = []
for i in range(args.agent_count):
algo_args.append(
dict(policy="MlpPolicy",
verbose=1,
policy_kwargs=dict(
net_arch=CA.argument_for_every_agent(args.net_arch, i)
),
buffer_size=args.buffer_size,
batch_size=args.batch_size,
gamma=args.gamma,
tau=args.tau,
train_freq=args.train_freq,
target_update_interval=args.target_update_interval,
gradient_steps=args.gradient_steps,
learning_starts=args.buffer_start_size,
learning_rate=CA.argument_for_every_agent(args.learning_rate,
i),
tensorboard_log=None,
device=args.device))
peer_args.append(
dict(temperature=CA.argument_for_every_agent(args.T, i),
temp_decay=CA.argument_for_every_agent(args.T_decay, i),
algo_args=algo_args[i],
env=args.env,
env_args=args.env_args,
use_trust=args.use_trust,
use_critic=args.use_critic,
buffer_size=args.trust_buffer_size,
follow_steps=args.follow_steps,
use_trust_buffer=args.use_trust_buffer,
solo_training=not args.peer_learning,
peers_sample_with_noise=args.peers_sample_with_noise,
sample_random_actions=args.sample_random_actions,
init_trust_values=args.init_trust_values,
sample_from_suggestions=args.sample_from_suggestions,
epsilon=args.epsilon,
only_follow_peers=args.only_follow_peers))
# create Peer classes
SACPeer = make_peer_class(SAC)
TD3Peer = make_peer_class(TD3)
# create peers and peer group
peers = []
callbacks = []
eval_envs = []
for i in range(args.agent_count):
args_for_agent = peer_args[i]
agent_algo = CA.argument_for_every_agent(args.mix_agents, i)
if agent_algo == 'SAC':
args_for_agent["algo_args"]["ent_coef"] = "auto"
args_for_agent["algo_args"]["use_sde"] = True
args_for_agent["algo_args"]["policy_kwargs"]["log_std_init"] = -3
peer = SACPeer(**args_for_agent, seed=new_random_seed())
elif agent_algo == 'TD3':
peer = TD3Peer(**args_for_agent, seed=new_random_seed())
elif agent_algo == 'DQN':
args_for_agent["algo_args"]["exploration_fraction"] = \
args.exploration_fraction
args_for_agent["algo_args"]["exploration_final_eps"] = \
args.exploration_final_eps
peer = DQNPeer(**args_for_agent, seed=new_random_seed())
elif agent_algo in ['Adversarial', 'Expert']:
class_str = f"predefined_agents." \
f"{args.env.split('-')[0]}{agent_algo}"
peer = eval(class_str)(**args_for_agent, seed=new_random_seed())
else:
raise NotImplementedError(
f"The Agent {agent_algo}"
f" is not implemented")
peers.append(peer)
eval_env = make_env(args.env, args.n_eval_episodes, **args.env_args)
# every agent gets its own callbacks
callbacks.append([WandbCallback(verbose=2)])
eval_envs.append(eval_env)
peer_group = PeerGroup(peers, use_agent_values=args.use_agent_value,
lr=args.trust_lr, switch_ratio=args.switch_ratio,
init_agent_values=args.init_agent_values,
use_advantage=args.use_advantage,
max_peer_epochs=args.max_peer_epochs)
# create callbacks
for i in range(args.agent_count):
peer_callback = PeerEvalCallback(eval_env=eval_envs[i],
eval_envs=eval_envs,
peer_group=peer_group,
best_model_save_path=str_folder,
log_path=str_folder,
eval_freq=args.eval_interval,
n_eval_episodes=args.n_eval_episodes)
callbacks[i].append(peer_callback) # type: ignore
# calculate number of epochs based on episode length
max_episode_steps = max(args.min_epoch_length,
gym.spec(args.env).max_episode_steps)
n_epochs = args.steps // max_episode_steps
# load pretrained model
for i, path in enumerate(args.load_paths):
load_path = Path.cwd().joinpath("Experiments", path)
peer = peer_group.peers[i].set_parameters(load_path_or_dict=load_path)
peers[i].learning_rate = 0
peers[i].lr_schedule = lambda _: 0.0
update_learning_rate(peers[i].ent_coef_optimizer, 0)
peers[i].replay_buffer.reset()
peers[i].buffer.buffer.clear()
# train the peer group
peer_group.learn(n_epochs, callbacks=callbacks,
eval_log_path=str_folder,
max_epoch_len=max_episode_steps)
|
def add_args():
# create arg parser
parser = argparse.ArgumentParser(description="Peer learning.")
# General
parser.add_argument("--save-name", type=str, default="delete_me")
parser = add_default_values_to_parser(parser)
# Training
training = parser.add_argument_group("Training")
add_default_values_to_train_parser(training)
# Peer Learning
peer_learning = parser.add_argument_group("Peer Learning")
peer_learning.add_argument("--follow-steps", type=int, default=10)
peer_learning.add_argument("--switch-ratio", type=float, default=1,
help="How many times peer training compared to "
"solo training Ratio of peer learning "
"episodes to solo episodes; 0 -> only "
"peer learning episodes."
"ratio 0 {'solo': 0, 'peer': 100}"
"ratio 0.2 {'solo': 83, 'peer': 17}"
"ratio 0.25 {'solo': 80, 'peer': 20}"
"ratio 0.333333 {'solo': 75, 'peer': 25}"
"ratio 0.5 {'solo': 67, 'peer': 33}"
"ratio 1 {'solo': 50, 'peer': 50}"
"ratio 2 {'solo': 33, 'peer': 67}"
"ratio 3 {'solo': 25, 'peer': 75}"
"ratio 4 {'solo': 20, 'peer': 80}"
"ratio 5 {'solo': 17, 'peer': 83}")
peer_learning.add_argument("--peer-learning", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--peers-sample-with-noise", type=str2bool,
nargs="?",
const=True, default=True)
peer_learning.add_argument("--use-agent-value", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--use-trust", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--use-trust-buffer", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--trust-buffer-size", type=int, default=1000)
peer_learning.add_argument("--use-critic", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--sample_random_actions", type=str2bool,
nargs="?", const=True, default=False)
peer_learning.add_argument("--trust-lr", type=float, default=0.001)
peer_learning.add_argument("--T", type=float, nargs='*', default=[1])
peer_learning.add_argument("--T-decay", type=float, nargs='*', default=[0])
peer_learning.add_argument("--init-trust-values", type=float, default=200)
peer_learning.add_argument("--init-agent-values", type=float, default=200)
peer_learning.add_argument("--use-advantage", type=str2bool, nargs="?",
const=False, default=False)
peer_learning.add_argument("--sample-from-suggestions", type=str2bool,
nargs="?", const=False, default=False)
peer_learning.add_argument("--epsilon", type=float, default=0.0)
peer_learning.add_argument("--max-peer-epochs", type=int,
default=1_000_000_000)
peer_learning.add_argument("--only-follow-peers", type=str2bool,
nargs="?", const=False, default=False)
return parser
if __name__ == '__main__':
# parse args
arg_parser = add_args()
args = arg_parser.parse_args()
CA = ControllerArguments(args.agent_count)
# assert if any peer learning strategy is chosen peer learning must be True
option_on = (args.use_trust or args.use_critic or args.use_agent_value)
assert (option_on and args.peer_learning) or not option_on
# create results/experiments folder
time_string = datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S")
unique_dir = f"{time_string}__{args.job_id}"
experiment_folder = args.save_dir.joinpath(args.save_name, unique_dir)
experiment_folder.mkdir(exist_ok=True, parents=True)
str_folder = str(experiment_folder)
print("Experiment folder is", str_folder)
# suppress gym warnings
gym.logger.set_level(level=gym.logger.DISABLED)
# seed everything
set_random_seed(args.seed)
# init wandb
wandb.tensorboard.patch(root_logdir=str_folder)
run = wandb.init(entity="jgu-wandb", config=args.__dict__,
project="peer-learning",
monitor_gym=True, sync_tensorboard=False,
name=f"{args.save_name}__{args.job_id}",
notes=f"Peer Learning with {args.agent_count} agents on "
f"the {args.env.split('-')[0]} environment.",
dir=str_folder, mode=args.wandb)
# initialize peer group
algo_args = []
peer_args = []
for i in range(args.agent_count):
algo_args.append(
dict(policy="MlpPolicy",
verbose=1,
policy_kwargs=dict(
net_arch=CA.argument_for_every_agent(args.net_arch, i)
),
buffer_size=args.buffer_size,
batch_size=args.batch_size,
gamma=args.gamma,
tau=args.tau,
train_freq=args.train_freq,
target_update_interval=args.target_update_interval,
gradient_steps=args.gradient_steps,
learning_starts=args.buffer_start_size,
learning_rate=CA.argument_for_every_agent(args.learning_rate,
i),
tensorboard_log=None,
device=args.device))
peer_args.append(
dict(temperature=CA.argument_for_every_agent(args.T, i),
temp_decay=CA.argument_for_every_agent(args.T_decay, i),
algo_args=algo_args[i],
env=args.env,
env_args=args.env_args,
use_trust=args.use_trust,
use_critic=args.use_critic,
buffer_size=args.trust_buffer_size,
follow_steps=args.follow_steps,
use_trust_buffer=args.use_trust_buffer,
solo_training=not args.peer_learning,
peers_sample_with_noise=args.peers_sample_with_noise,
sample_random_actions=args.sample_random_actions,
init_trust_values=args.init_trust_values,
sample_from_suggestions=args.sample_from_suggestions,
epsilon=args.epsilon,
only_follow_peers=args.only_follow_peers))
# create Peer classes
SACPeer = make_peer_class(SAC)
TD3Peer = make_peer_class(TD3)
# create peers and peer group
peers = []
callbacks = []
eval_envs = []
for i in range(args.agent_count):
args_for_agent = peer_args[i]
agent_algo = CA.argument_for_every_agent(args.mix_agents, i)
if agent_algo == 'SAC':
args_for_agent["algo_args"]["ent_coef"] = "auto"
args_for_agent["algo_args"]["use_sde"] = True
args_for_agent["algo_args"]["policy_kwargs"]["log_std_init"] = -3
peer = SACPeer(**args_for_agent, seed=new_random_seed())
elif agent_algo == 'TD3':
peer = TD3Peer(**args_for_agent, seed=new_random_seed())
elif agent_algo == 'DQN':
args_for_agent["algo_args"]["exploration_fraction"] = \
args.exploration_fraction
args_for_agent["algo_args"]["exploration_final_eps"] = \
args.exploration_final_eps
peer = DQNPeer(**args_for_agent, seed=new_random_seed())
elif agent_algo in ['Adversarial', 'Expert']:
class_str = f"predefined_agents." \
f"{args.env.split('-')[0]}{agent_algo}"
peer = eval(class_str)(**args_for_agent, seed=new_random_seed())
else:
raise NotImplementedError(
f"The Agent {agent_algo}"
f" is not implemented")
peers.append(peer)
eval_env = make_env(args.env, args.n_eval_episodes, **args.env_args)
# every agent gets its own callbacks
callbacks.append([WandbCallback(verbose=2)])
eval_envs.append(eval_env)
peer_group = PeerGroup(peers, use_agent_values=args.use_agent_value,
lr=args.trust_lr, switch_ratio=args.switch_ratio,
init_agent_values=args.init_agent_values,
use_advantage=args.use_advantage,
max_peer_epochs=args.max_peer_epochs)
# create callbacks
for i in range(args.agent_count):
peer_callback = PeerEvalCallback(eval_env=eval_envs[i],
eval_envs=eval_envs,
peer_group=peer_group,
best_model_save_path=str_folder,
log_path=str_folder,
eval_freq=args.eval_interval,
n_eval_episodes=args.n_eval_episodes)
callbacks[i].append(peer_callback) # type: ignore
# calculate number of epochs based on episode length
max_episode_steps = max(args.min_epoch_length,
gym.spec(args.env).max_episode_steps)
n_epochs = args.steps // max_episode_steps
# load pretrained model
for i, path in enumerate(args.load_paths):
load_path = Path.cwd().joinpath("Experiments", path)
peer = peer_group.peers[i].set_parameters(load_path_or_dict=load_path)
peers[i].learning_rate = 0
peers[i].lr_schedule = lambda _: 0.0
update_learning_rate(peers[i].ent_coef_optimizer, 0)
peers[i].replay_buffer.reset()
peers[i].buffer.buffer.clear()
# train the peer group
peer_group.learn(n_epochs, callbacks=callbacks,
eval_log_path=str_folder,
max_epoch_len=max_episode_steps)
| log_reward_avg_in_wandb(callbacks) | 6 | 2023-12-13 10:40:55+00:00 | 12k |
ZS-YANG/FemtoDet-v3 | projects/Detic_new/detic/detic.py | [
{
"identifier": "LVISV1Dataset",
"path": "mmdet/datasets/lvis.py",
"snippet": "class LVISV1Dataset(LVISDataset):\n \"\"\"LVIS v1 dataset for detection.\"\"\"\n\n METAINFO = {\n 'classes':\n ('aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',\n 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',\n 'antenna', 'apple', 'applesauce', 'apricot', 'apron', 'aquarium',\n 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',\n 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',\n 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',\n 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',\n 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',\n 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',\n 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',\n 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',\n 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',\n 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',\n 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',\n 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',\n 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',\n 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',\n 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',\n 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',\n 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',\n 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',\n 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',\n 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',\n 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',\n 'bottle_opener', 'bouquet', 'bow_(weapon)',\n 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl',\n 'bowler_hat', 'bowling_ball', 'box', 'boxing_glove', 'suspenders',\n 'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'bread',\n 'breechcloth', 'bridal_gown', 'briefcase', 'broccoli', 'broach',\n 'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket',\n 'horse_buggy', 'bull', 'bulldog', 'bulldozer', 'bullet_train',\n 'bulletin_board', 'bulletproof_vest', 'bullhorn', 'bun', 'bunk_bed',\n 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butter',\n 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet',\n 'locker', 'cake', 'calculator', 'calendar', 'calf', 'camcorder',\n 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can',\n 'can_opener', 'candle', 'candle_holder', 'candy_bar', 'candy_cane',\n 'walking_cane', 'canister', 'canoe', 'cantaloup', 'canteen',\n 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',\n 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',\n 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',\n 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',\n 'cash_register', 'casserole', 'cassette', 'cast', 'cat',\n 'cauliflower', 'cayenne_(spice)', 'CD_player', 'celery',\n 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue',\n 'chalice', 'chandelier', 'chap', 'checkbook', 'checkerboard',\n 'cherry', 'chessboard', 'chicken_(animal)', 'chickpea',\n 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)',\n 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk',\n 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick',\n 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette',\n 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',\n 'cleat_(for_securing_rope)', 'clementine', 'clip', 'clipboard',\n 'clippers_(for_plants)', 'cloak', 'clock', 'clock_tower',\n 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat',\n 'coat_hanger', 'coatrack', 'cock', 'cockroach', 'cocoa_(beverage)',\n 'coconut', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil',\n 'coin', 'colander', 'coleslaw', 'coloring_material',\n 'combination_lock', 'pacifier', 'comic_book', 'compass',\n 'computer_keyboard', 'condiment', 'cone', 'control',\n 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',\n 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',\n 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',\n 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',\n 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',\n 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',\n 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',\n 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',\n 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',\n 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',\n 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',\n 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',\n 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table',\n 'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',\n 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',\n 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',\n 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove',\n 'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat',\n 'dress_suit', 'dresser', 'drill', 'drone', 'dropper',\n 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',\n 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'eagle',\n 'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg',\n 'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair',\n 'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot',\n 'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret',\n 'Ferris_wheel', 'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine',\n 'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine',\n 'fire_extinguisher', 'fire_hose', 'fireplace', 'fireplug',\n 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', 'fishing_rod',\n 'flag', 'flagpole', 'flamingo', 'flannel', 'flap', 'flash',\n 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)',\n 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',\n 'food_processor', 'football_(American)', 'football_helmet',\n 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast',\n 'freshener', 'frisbee', 'frog', 'fruit_juice', 'frying_pan', 'fudge',\n 'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose',\n 'gargle', 'gargoyle', 'garlic', 'gasmask', 'gazelle', 'gelatin',\n 'gemstone', 'generator', 'giant_panda', 'gift_wrap', 'ginger',\n 'giraffe', 'cincture', 'glass_(drink_container)', 'globe', 'glove',\n 'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart',\n 'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'grape', 'grater',\n 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',\n 'grill', 'grits', 'grizzly', 'grocery_bag', 'guitar', 'gull', 'gun',\n 'hairbrush', 'hairnet', 'hairpin', 'halter_top', 'ham', 'hamburger',\n 'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass',\n 'hand_towel', 'handcart', 'handcuff', 'handkerchief', 'handle',\n 'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil',\n 'headband', 'headboard', 'headlight', 'headscarf', 'headset',\n 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',\n 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',\n 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',\n 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',\n 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',\n 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',\n 'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',\n 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',\n 'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',\n 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',\n 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',\n 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',\n 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',\n 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',\n 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',\n 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade',\n 'lettuce', 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',\n 'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',\n 'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat',\n 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',\n 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange',\n 'manger', 'manhole', 'map', 'marker', 'martini', 'mascot',\n 'mashed_potato', 'masher', 'mask', 'mast', 'mat_(gym_equipment)',\n 'matchbox', 'mattress', 'measuring_cup', 'measuring_stick',\n 'meatball', 'medicine', 'melon', 'microphone', 'microscope',\n 'microwave_oven', 'milestone', 'milk', 'milk_can', 'milkshake',\n 'minivan', 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)',\n 'money', 'monitor_(computer_equipment) computer_monitor', 'monkey',\n 'motor', 'motor_scooter', 'motor_vehicle', 'motorcycle',\n 'mound_(baseball)', 'mouse_(computer_equipment)', 'mousepad',\n 'muffin', 'mug', 'mushroom', 'music_stool', 'musical_instrument',\n 'nailfile', 'napkin', 'neckerchief', 'necklace', 'necktie', 'needle',\n 'nest', 'newspaper', 'newsstand', 'nightshirt',\n 'nosebag_(for_animals)', 'noseband_(for_animals)', 'notebook',\n 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)',\n 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion',\n 'orange_(fruit)', 'orange_juice', 'ostrich', 'ottoman', 'oven',\n 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle',\n 'padlock', 'paintbrush', 'painting', 'pajamas', 'palette',\n 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',\n 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',\n 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',\n 'parasol', 'parchment', 'parka', 'parking_meter', 'parrot',\n 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',\n 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',\n 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',\n 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',\n 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',\n 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',\n 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',\n 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',\n 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',\n 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',\n 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',\n 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',\n 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',\n 'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',\n 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot',\n 'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn',\n 'pretzel', 'printer', 'projectile_(weapon)', 'projector', 'propeller',\n 'prune', 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin',\n 'puncher', 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt',\n 'rabbit', 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver',\n 'radish', 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry',\n 'rat', 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',\n 'recliner', 'record_player', 'reflector', 'remote_control',\n 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',\n 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',\n 'rolling_pin', 'root_beer', 'router_(computer_equipment)',\n 'rubber_band', 'runner_(carpet)', 'plastic_bag',\n 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',\n 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',\n 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',\n 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',\n 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',\n 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',\n 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',\n 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',\n 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',\n 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',\n 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',\n 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',\n 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',\n 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',\n 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',\n 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',\n 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',\n 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',\n 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',\n 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',\n 'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',\n 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',\n 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',\n 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew',\n 'stirrer', 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove',\n 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',\n 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',\n 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',\n 'sunglasses', 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants',\n 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit',\n 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',\n 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',\n 'tambourine', 'army_tank', 'tank_(storage_vessel)',\n 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',\n 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',\n 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',\n 'telephone_pole', 'telephoto_lens', 'television_camera',\n 'television_set', 'tennis_ball', 'tennis_racket', 'tequila',\n 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',\n 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer',\n 'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster',\n 'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs',\n 'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover',\n 'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy',\n 'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike',\n 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray',\n 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', 'tripod',\n 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', 'turban',\n 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',\n 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',\n 'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',\n 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',\n 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',\n 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',\n 'washbasin', 'automatic_washer', 'watch', 'water_bottle',\n 'water_cooler', 'water_faucet', 'water_heater', 'water_jug',\n 'water_gun', 'water_scooter', 'water_ski', 'water_tower',\n 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',\n 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',\n 'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',\n 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',\n 'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',\n 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',\n 'yoke_(animal_equipment)', 'zebra', 'zucchini'),\n 'palette':\n None\n }\n\n def load_data_list(self) -> List[dict]:\n \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n Returns:\n List[dict]: A list of annotation.\n \"\"\" # noqa: E501\n try:\n import lvis\n if getattr(lvis, '__version__', '0') >= '10.5.3':\n warnings.warn(\n 'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"', # noqa: E501\n UserWarning)\n from lvis import LVIS\n except ImportError:\n raise ImportError(\n 'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".' # noqa: E501\n )\n with get_local_path(\n self.ann_file, backend_args=self.backend_args) as local_path:\n self.lvis = LVIS(local_path)\n self.cat_ids = self.lvis.get_cat_ids()\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)\n\n img_ids = self.lvis.get_img_ids()\n data_list = []\n total_ann_ids = []\n for img_id in img_ids:\n raw_img_info = self.lvis.load_imgs([img_id])[0]\n raw_img_info['img_id'] = img_id\n # coco_url is used in LVISv1 instead of file_name\n # e.g. http://images.cocodataset.org/train2017/000000391895.jpg\n # train/val split in specified in url\n raw_img_info['file_name'] = raw_img_info['coco_url'].replace(\n 'http://images.cocodataset.org/', '')\n ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])\n raw_ann_info = self.lvis.load_anns(ann_ids)\n total_ann_ids.extend(ann_ids)\n parsed_data_info = self.parse_data_info({\n 'raw_ann_info':\n raw_ann_info,\n 'raw_img_info':\n raw_img_info\n })\n data_list.append(parsed_data_info)\n if self.ANN_ID_UNIQUE:\n assert len(set(total_ann_ids)) == len(\n total_ann_ids\n ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n del self.lvis\n\n return data_list"
},
{
"identifier": "CascadeRCNN",
"path": "mmdet/models/detectors/cascade_rcnn.py",
"snippet": "class CascadeRCNN(TwoStageDetector):\n r\"\"\"Implementation of `Cascade R-CNN: Delving into High Quality Object\n Detection <https://arxiv.org/abs/1906.09756>`_\"\"\"\n\n def __init__(self,\n backbone: ConfigType,\n neck: OptConfigType = None,\n rpn_head: OptConfigType = None,\n roi_head: OptConfigType = None,\n train_cfg: OptConfigType = None,\n test_cfg: OptConfigType = None,\n data_preprocessor: OptConfigType = None,\n init_cfg: OptMultiConfig = None) -> None:\n super().__init__(\n backbone=backbone,\n neck=neck,\n rpn_head=rpn_head,\n roi_head=roi_head,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n data_preprocessor=data_preprocessor,\n init_cfg=init_cfg)"
},
{
"identifier": "MODELS",
"path": "mmdet/registry.py",
"snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])"
},
{
"identifier": "SampleList",
"path": "mmdet/structures/det_data_sample.py",
"snippet": "class DetDataSample(BaseDataElement):\n def proposals(self) -> InstanceData:\n def proposals(self, value: InstanceData):\n def proposals(self):\n def gt_instances(self) -> InstanceData:\n def gt_instances(self, value: InstanceData):\n def gt_instances(self):\n def pred_instances(self) -> InstanceData:\n def pred_instances(self, value: InstanceData):\n def pred_instances(self):\n def pred_track_instances(self) -> InstanceData:\n def pred_track_instances(self, value: InstanceData):\n def pred_track_instances(self):\n def ignored_instances(self) -> InstanceData:\n def ignored_instances(self, value: InstanceData):\n def ignored_instances(self):\n def gt_panoptic_seg(self) -> PixelData:\n def gt_panoptic_seg(self, value: PixelData):\n def gt_panoptic_seg(self):\n def pred_panoptic_seg(self) -> PixelData:\n def pred_panoptic_seg(self, value: PixelData):\n def pred_panoptic_seg(self):\n def gt_sem_seg(self) -> PixelData:\n def gt_sem_seg(self, value: PixelData):\n def gt_sem_seg(self):\n def pred_sem_seg(self) -> PixelData:\n def pred_sem_seg(self, value: PixelData):\n def pred_sem_seg(self):"
}
] | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import clip
from typing import List, Union
from mmengine.logging import print_log
from torch import Tensor
from mmdet.datasets import LVISV1Dataset
from mmdet.models.detectors.cascade_rcnn import CascadeRCNN
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from clip.simple_tokenizer import SimpleTokenizer
from mmdet.datasets import CocoDataset
from mmdet.datasets import CityscapesDataset
from mmdet.datasets import VOCDataset
from mmdet.datasets import OpenImagesDataset
from mmdet.datasets import LVISV1Dataset | 8,255 | # Copyright (c) OpenMMLab. All rights reserved.
class CLIPTextEncoder(nn.Module):
def __init__(self, model_name='ViT-B/32'):
super().__init__()
self.tokenizer = SimpleTokenizer()
pretrained_model, _ = clip.load(model_name, device='cpu')
self.clip = pretrained_model
@property
def device(self):
return self.clip.device
@property
def dtype(self):
return self.clip.dtype
def tokenize(self,
texts: Union[str, List[str]],
context_length: int = 77) -> torch.LongTensor:
if isinstance(texts, str):
texts = [texts]
sot_token = self.tokenizer.encoder['<|startoftext|>']
eot_token = self.tokenizer.encoder['<|endoftext|>']
all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token]
for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
st = torch.randint(len(tokens) - context_length + 1,
(1, ))[0].item()
tokens = tokens[st:st + context_length]
result[i, :len(tokens)] = torch.tensor(tokens)
return result
def forward(self, text):
text = self.tokenize(text)
text_features = self.clip.encode_text(text)
return text_features
def get_class_weight(original_caption, prompt_prefix='a '):
if isinstance(original_caption, str):
if original_caption == 'coco':
class_names = CocoDataset.METAINFO['classes']
elif original_caption == 'cityscapes':
class_names = CityscapesDataset.METAINFO['classes']
elif original_caption == 'voc':
class_names = VOCDataset.METAINFO['classes']
elif original_caption == 'openimages':
class_names = OpenImagesDataset.METAINFO['classes']
elif original_caption == 'lvis':
class_names = LVISV1Dataset.METAINFO['classes']
else:
if not original_caption.endswith('.'):
original_caption = original_caption + ' . '
original_caption = original_caption.split(' . ')
class_names = list(filter(lambda x: len(x) > 0, original_caption))
# for test.py
else:
class_names = list(original_caption)
text_encoder = CLIPTextEncoder()
text_encoder.eval()
texts = [prompt_prefix + x for x in class_names]
print_log(f'Computing text embeddings for {len(class_names)} classes.')
embeddings = text_encoder(texts).detach().permute(1, 0).contiguous().cpu()
return class_names, embeddings
def reset_cls_layer_weight(roi_head, weight):
if type(weight) == str:
print_log(f'Resetting cls_layer_weight from file: {weight}')
zs_weight = torch.tensor(
np.load(weight),
dtype=torch.float32).permute(1, 0).contiguous() # D x C
else:
zs_weight = weight
zs_weight = torch.cat(
[zs_weight, zs_weight.new_zeros(
(zs_weight.shape[0], 1))], dim=1) # D x (C + 1)
zs_weight = F.normalize(zs_weight, p=2, dim=0)
zs_weight = zs_weight.to('cuda')
num_classes = zs_weight.shape[-1]
for bbox_head in roi_head.bbox_head:
bbox_head.num_classes = num_classes
del bbox_head.fc_cls.zs_weight
bbox_head.fc_cls.zs_weight = zs_weight
@MODELS.register_module()
| # Copyright (c) OpenMMLab. All rights reserved.
class CLIPTextEncoder(nn.Module):
def __init__(self, model_name='ViT-B/32'):
super().__init__()
self.tokenizer = SimpleTokenizer()
pretrained_model, _ = clip.load(model_name, device='cpu')
self.clip = pretrained_model
@property
def device(self):
return self.clip.device
@property
def dtype(self):
return self.clip.dtype
def tokenize(self,
texts: Union[str, List[str]],
context_length: int = 77) -> torch.LongTensor:
if isinstance(texts, str):
texts = [texts]
sot_token = self.tokenizer.encoder['<|startoftext|>']
eot_token = self.tokenizer.encoder['<|endoftext|>']
all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token]
for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
st = torch.randint(len(tokens) - context_length + 1,
(1, ))[0].item()
tokens = tokens[st:st + context_length]
result[i, :len(tokens)] = torch.tensor(tokens)
return result
def forward(self, text):
text = self.tokenize(text)
text_features = self.clip.encode_text(text)
return text_features
def get_class_weight(original_caption, prompt_prefix='a '):
if isinstance(original_caption, str):
if original_caption == 'coco':
class_names = CocoDataset.METAINFO['classes']
elif original_caption == 'cityscapes':
class_names = CityscapesDataset.METAINFO['classes']
elif original_caption == 'voc':
class_names = VOCDataset.METAINFO['classes']
elif original_caption == 'openimages':
class_names = OpenImagesDataset.METAINFO['classes']
elif original_caption == 'lvis':
class_names = LVISV1Dataset.METAINFO['classes']
else:
if not original_caption.endswith('.'):
original_caption = original_caption + ' . '
original_caption = original_caption.split(' . ')
class_names = list(filter(lambda x: len(x) > 0, original_caption))
# for test.py
else:
class_names = list(original_caption)
text_encoder = CLIPTextEncoder()
text_encoder.eval()
texts = [prompt_prefix + x for x in class_names]
print_log(f'Computing text embeddings for {len(class_names)} classes.')
embeddings = text_encoder(texts).detach().permute(1, 0).contiguous().cpu()
return class_names, embeddings
def reset_cls_layer_weight(roi_head, weight):
if type(weight) == str:
print_log(f'Resetting cls_layer_weight from file: {weight}')
zs_weight = torch.tensor(
np.load(weight),
dtype=torch.float32).permute(1, 0).contiguous() # D x C
else:
zs_weight = weight
zs_weight = torch.cat(
[zs_weight, zs_weight.new_zeros(
(zs_weight.shape[0], 1))], dim=1) # D x (C + 1)
zs_weight = F.normalize(zs_weight, p=2, dim=0)
zs_weight = zs_weight.to('cuda')
num_classes = zs_weight.shape[-1]
for bbox_head in roi_head.bbox_head:
bbox_head.num_classes = num_classes
del bbox_head.fc_cls.zs_weight
bbox_head.fc_cls.zs_weight = zs_weight
@MODELS.register_module() | class Detic(CascadeRCNN): | 1 | 2023-12-11 15:23:03+00:00 | 12k |
merlresearch/PixPNet | pixpnet/protonets/prp/prp.py | [
{
"identifier": "AdaptiveAvgPool2DWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class AdaptiveAvgPool2DWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module, eps):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward method.\n \"\"\"\n\n def configvalues_totensorlist(module, device):\n\n propertynames = [\"output_size\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=device)\n elif isinstance(v, tuple):\n v = torch.tensor(v, dtype=torch.int32, device=device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module, x.device)\n epstensor = torch.tensor([eps], dtype=torch.float32, device=x.device)\n ctx.save_for_backward(x, epstensor, *values) # *values unpacks the list\n\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, epstensor, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"output_size\"]\n # idea: paramsdict={ n: values[i]\n # for i,n in enumerate(propertynames) }\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n #######################################################################\n paramsdict = tensorlist_todict(values)\n eps = epstensor.item()\n\n # class instantiation\n layerclass = torch.nn.AdaptiveAvgPool2d(**paramsdict)\n\n X = input_.clone().detach().requires_grad_(True)\n R = lrp_backward(_input=X, layer=layerclass, relevance_output=grad_output[0], eps0=eps, eps=eps)\n\n return R, None, None"
},
{
"identifier": "Conv2DBeta0WrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class Conv2DBeta0WrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module, lrpignorebias):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward method.\n \"\"\"\n\n def configvalues_totensorlist(module):\n propertynames = [\"in_channels\", \"out_channels\", \"kernel_size\", \"stride\", \"padding\", \"dilation\", \"groups\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=module.weight.device)\n elif isinstance(v, tuple):\n ################\n ################\n # FAILMODE: if it is not a tuple of ints but e.g. a tuple of floats, or a tuple of a tuple\n\n v = torch.tensor(v, dtype=torch.int32, device=module.weight.device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module)\n\n if module.bias is None:\n bias = None\n else:\n bias = module.bias.data.clone()\n lrpignorebiastensor = torch.tensor([lrpignorebias], dtype=torch.bool, device=module.weight.device)\n ctx.save_for_backward(\n x, module.weight.data.clone(), bias, lrpignorebiastensor, *values\n ) # *values unpacks the list\n\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, conv2dweight, conv2dbias, lrpignorebiastensor, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"in_channels\", \"out_channels\", \"kernel_size\", \"stride\", \"padding\", \"dilation\", \"groups\"]\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n #######################################################################\n paramsdict = tensorlist_todict(values)\n\n if conv2dbias is None:\n module = nn.Conv2d(**paramsdict, bias=False)\n else:\n module = nn.Conv2d(**paramsdict, bias=True)\n module.bias = torch.nn.Parameter(conv2dbias)\n\n module.weight = torch.nn.Parameter(conv2dweight)\n\n pnconv = PosNegConv(module, ignorebias=lrpignorebiastensor.item())\n\n X = input_.clone().detach().requires_grad_(True)\n R = lrp_backward(_input=X, layer=pnconv, relevance_output=grad_output[0], eps0=1e-12, eps=0)\n\n return R, None, None"
},
{
"identifier": "CosineDistLRPClass",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class CosineDistLRPClass(torch.autograd.Function):\n @staticmethod\n def forward(ctx, conv_features, model):\n ctx.save_for_backward(conv_features, model.prototype_vectors)\n if VERBOSE:\n print(\"cosine custom forward\")\n\n # An alternative distance metric used in TesNet. Alternative to\n # l2_convolution\n x = F.normalize(conv_features, p=2, dim=1)\n prototype_vectors = F.normalize(model.prototype_vectors, p=2, dim=1)\n similarities = F.conv2d(input=x, weight=prototype_vectors)\n # clip similarities in the range [-1, +1] (numerical error can\n # cause similarities to be outside this range)\n similarities = torch.clamp(similarities, -1, 1)\n distances = 1 - similarities # bounded [0, 2]\n\n similarities = torch.log((distances + 1) / (distances + model.epsilon))\n\n return similarities\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the\n loss with respect to the output, and we need to compute the gradient of\n the loss with respect to the input.\n \"\"\"\n if VERBOSE:\n print(\"cosine custom backward\")\n conv, prototypes = ctx.saved_tensors\n i = conv.shape[2]\n j = conv.shape[3]\n c = conv.shape[1]\n p = prototypes.shape[0]\n\n # Broadcast conv to Nxsize(conv) (No. of prototypes)\n conv = conv.repeat(p, 1, 1, 1) # NP x D x Hz x Wz\n prototype = prototypes.repeat(1, 1, i, j) # P x D x Hz x Wz\n\n conv = conv.squeeze() # think this does nothing\n\n cosine_dists = 1 - F.normalize(prototype, p=2, dim=1) * F.normalize(conv, p=2, dim=1)\n d = 1 / (cosine_dists**2 + 1e-12)\n\n denom = torch.sum(d, dim=1, keepdim=True) + 1e-12\n denom = denom.repeat(1, c, 1, 1) + 1e-12\n R = torch.div(d, denom)\n\n grad_output = grad_output.repeat(c, 1, 1, 1)\n grad_output = grad_output.permute(1, 0, 2, 3)\n\n R = R * grad_output\n\n R = torch.sum(R, dim=0)\n\n R = torch.unsqueeze(R, dim=0)\n\n return R, None, None"
},
{
"identifier": "EltwiseSumStacked2EpsWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class EltwiseSumStacked2EpsWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, stackedx, module, eps):\n epstensor = torch.tensor([eps], dtype=torch.float32, device=stackedx.device)\n ctx.save_for_backward(stackedx, epstensor)\n return module.forward(stackedx)\n\n @staticmethod\n def backward(ctx, grad_output):\n stackedx, epstensor = ctx.saved_tensors\n\n X = stackedx.clone().detach().requires_grad_(True)\n\n eps = epstensor.item()\n\n s2 = SumStacked2().to(X.device)\n Rtmp = lrp_backward(_input=X, layer=s2, relevance_output=grad_output[0], eps0=eps, eps=eps)\n\n return Rtmp, None, None"
},
{
"identifier": "L2LRPClass",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class L2LRPClass(torch.autograd.Function):\n @staticmethod\n def forward(ctx, conv_features, model):\n # *values unpacks the list\n ctx.save_for_backward(conv_features, model.prototype_vectors)\n if VERBOSE:\n print(\"l2 custom forward\")\n x2 = conv_features**2\n x2_patch_sum = F.conv2d(input=x2, weight=model.ones)\n\n p2 = model.prototype_vectors**2\n p2 = torch.sum(p2, dim=(1, 2, 3))\n # p2 is a vector of shape (num_prototypes,)\n # then we reshape it to (num_prototypes, 1, 1)\n p2_reshape = p2.view(-1, 1, 1)\n\n xp = F.conv2d(input=conv_features, weight=model.prototype_vectors)\n intermediate_result = -2 * xp + p2_reshape # use broadcast\n # x2_patch_sum and intermediate_result are of the same shape\n distances = F.relu(x2_patch_sum + intermediate_result)\n\n similarities = torch.log((distances + 1) / (distances + model.epsilon))\n\n return similarities\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the\n loss with respect to the output, and we need to compute the gradient of\n the loss with respect to the input.\n \"\"\"\n if VERBOSE:\n print(\"l2 custom backward\")\n conv, prototypes = ctx.saved_tensors\n i = conv.shape[2]\n j = conv.shape[3]\n c = conv.shape[1]\n p = prototypes.shape[0]\n\n # Broadcast conv to Nxsize(conv) (No. of prototypes)\n conv = conv.repeat(p, 1, 1, 1)\n prototype = prototypes.repeat(1, 1, i, j)\n\n conv = conv.squeeze()\n\n l2 = (conv - prototype) ** 2\n d = 1 / (l2**2 + 1e-12)\n\n denom = torch.sum(d, dim=1, keepdim=True) + 1e-12\n denom = denom.repeat(1, c, 1, 1) + 1e-12\n R = torch.div(d, denom)\n\n grad_output = grad_output.repeat(c, 1, 1, 1)\n grad_output = grad_output.permute(1, 0, 2, 3)\n\n R = R * grad_output\n\n R = torch.sum(R, dim=0)\n\n R = torch.unsqueeze(R, dim=0)\n\n return R, None, None"
},
{
"identifier": "LinearLayerEpsWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class LinearLayerEpsWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module, eps):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward\n method.\n \"\"\"\n\n def configvalues_totensorlist(module):\n\n propertynames = [\"in_features\", \"out_features\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=module.weight.device)\n elif isinstance(v, tuple):\n ################\n ################\n # FAILMODE: if it is not a tuple of ints but e.g. a tuple\n # of floats, or a tuple of a tuple\n\n v = torch.tensor(v, dtype=torch.int32, device=module.weight.device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module)\n epstensor = torch.tensor([eps], dtype=torch.float32, device=x.device)\n\n if module.bias is None:\n bias = None\n else:\n bias = module.bias.data.clone()\n ctx.save_for_backward(x, module.weight.data.clone(), bias, epstensor, *values) # *values unpacks the list\n\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, weight, bias, epstensor, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"in_features\", \"out_features\"]\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n #######################################################################\n paramsdict = tensorlist_todict(values)\n\n if bias is None:\n module = nn.Linear(**paramsdict, bias=False)\n else:\n module = nn.Linear(**paramsdict, bias=True)\n module.bias = torch.nn.Parameter(bias)\n\n module.weight = torch.nn.Parameter(weight)\n\n eps = epstensor.item()\n X = input_.clone().detach().requires_grad_(True)\n R = lrp_backward(_input=X, layer=module, relevance_output=grad_output[0], eps0=eps, eps=eps)\n\n return R, None, None"
},
{
"identifier": "MaxPool2DWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class MaxPool2DWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward\n method.\n \"\"\"\n\n def configvalues_totensorlist(module, device):\n\n propertynames = [\"kernel_size\", \"stride\", \"padding\", \"dilation\", \"return_indices\", \"ceil_mode\"]\n values = []\n for attr in propertynames:\n v = getattr(module, attr)\n # convert it into tensor\n # has no treatment for booleans yet\n if isinstance(v, bool):\n v = torch.tensor([v], dtype=torch.bool, device=device)\n elif isinstance(v, int):\n v = torch.tensor([v], dtype=torch.int32, device=device)\n elif isinstance(v, bool):\n\n v = torch.tensor([v], dtype=torch.int32, device=device)\n elif isinstance(v, tuple):\n ################\n ################\n # FAILMODE: if it is not a tuple of ints but e.g. a tuple\n # of floats, or a tuple of a tuple\n\n v = torch.tensor(v, dtype=torch.int32, device=device)\n else:\n print(\"v is neither int nor tuple. unexpected\")\n exit()\n values.append(v)\n return propertynames, values\n\n # stash module config params and trainable params\n propertynames, values = configvalues_totensorlist(module, x.device)\n ctx.save_for_backward(x, *values) # *values unpacks the list\n\n if VERBOSE:\n print(\"maxpool2d custom forward\")\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n\n input_, *values = ctx.saved_tensors\n\n #######################################################################\n # reconstruct dictionary of config parameters\n def tensorlist_todict(values):\n propertynames = [\"kernel_size\", \"stride\", \"padding\", \"dilation\", \"return_indices\", \"ceil_mode\"]\n # idea: paramsdict={ n: values[i]\n # for i,n in enumerate(propertynames) }\n # but needs to turn tensors to ints or tuples!\n paramsdict = {}\n for i, n in enumerate(propertynames):\n v = values[i]\n if v.numel == 1:\n paramsdict[n] = v.item() # to cpu?\n else:\n alist = v.tolist()\n if len(alist) == 1:\n paramsdict[n] = alist[0]\n else:\n paramsdict[n] = tuple(alist)\n return paramsdict\n\n paramsdict = tensorlist_todict(values)\n\n layerclass = torch.nn.MaxPool2d(**paramsdict)\n\n X = input_.clone().detach().requires_grad_(True)\n with torch.enable_grad():\n Z = layerclass.forward(X)\n relevance_output_data = grad_output[0].clone().detach().unsqueeze(0)\n Z.backward(relevance_output_data)\n R = X.grad\n\n return R, None"
},
{
"identifier": "ReluWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class ReluWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module):\n # stash module config params and trainable params\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, None"
},
{
"identifier": "SigmoidWrapperFct",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class SigmoidWrapperFct(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n\n @staticmethod\n def forward(ctx, x, module):\n return module.forward(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, None"
},
{
"identifier": "SumStacked2",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "class SumStacked2(nn.Module):\n def __init__(self):\n super(SumStacked2, self).__init__()\n\n @staticmethod\n def forward(x): # from X=torch.stack([X0, X1], dim=0)\n assert x.shape[0] == 2\n return torch.sum(x, dim=0)"
},
{
"identifier": "bnafterconv_overwrite_intoconv",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "def bnafterconv_overwrite_intoconv(conv, bn): # after visatt\n\n if VERBOSE:\n print(conv, bn)\n\n assert isinstance(bn, nn.BatchNorm2d)\n assert isinstance(conv, nn.Conv2d)\n\n s = (bn.running_var + bn.eps) ** 0.5\n w = bn.weight\n b = bn.bias\n m = bn.running_mean\n conv.weight = torch.nn.Parameter(conv.weight * (w / s).reshape(-1, 1, 1, 1))\n\n if conv.bias is None:\n conv.bias = torch.nn.Parameter((0 - m) * (w / s) + b)\n else:\n conv.bias = torch.nn.Parameter((conv.bias - m) * (w / s) + b)\n return conv"
},
{
"identifier": "get_lrpwrapperformodule",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "def get_lrpwrapperformodule(module, lrp_params, lrp_layer2method, thisis_inputconv_andiwant_zbeta=False):\n if isinstance(module, nn.ReLU):\n key = \"nn.ReLU\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, nn.Sigmoid):\n key = \"nn.Sigmoid\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, nn.BatchNorm2d):\n\n key = \"nn.BatchNorm2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, nn.Linear):\n\n key = \"nn.Linear\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default linearlayer_eps_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"linear_eps\"])\n\n elif isinstance(module, nn.Conv2d):\n if thisis_inputconv_andiwant_zbeta:\n return Conv2DZBetaWrapperClass(module, lrp_params[\"conv2d_ignorebias\"])\n else:\n key = \"nn.Conv2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\n \"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key\n )\n\n # default conv2d_beta0_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(\n module, autogradfunction=autogradfunction, parameter1=lrp_params[\"conv2d_ignorebias\"]\n )\n\n elif isinstance(module, nn.AdaptiveAvgPool2d):\n\n key = \"nn.AdaptiveAvgPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default adaptiveavgpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"pooling_eps\"])\n\n elif isinstance(module, nn.AvgPool2d):\n\n key = \"nn.AvgPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default adaptiveavgpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"pooling_eps\"])\n\n elif isinstance(module, nn.MaxPool2d):\n\n key = \"nn.MaxPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default maxpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, SumStacked2): # resnet specific\n\n key = \"sum_stacked2\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default eltwisesum_stacked2_eps_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"eltwise_eps\"])\n\n elif isinstance(module, ClampLayer): # densenet specific\n\n key = \"clamplayer\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n\n elif isinstance(module, TensorBiasedLinearLayer): # densenet specific\n\n key = \"tensorbiased_linearlayer\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(module, autogradfunction=autogradfunction, parameter1=lrp_params[\"linear_eps\"])\n\n elif isinstance(module, TensorBiasedConvLayer): # densenet specific\n\n key = \"tensorbiased_convlayer\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default relu_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return OneParamWrapperClass(\n module, autogradfunction=autogradfunction, parameter1=lrp_params[\"conv2d_ignorebias\"]\n )\n\n else:\n key = \"nn.MaxPool2d\"\n if key not in lrp_layer2method:\n print(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n raise LRLookupNotFoundError(\"found no dictionary entry in \" \"lrp_layer2method for this module name:\", key)\n\n # default maxpool2d_wrapper_fct()\n autogradfunction = lrp_layer2method[key]()\n return ZeroparamWrapperClass(module, autogradfunction=autogradfunction)\n print(\"found no lookup for this module:\", module)\n raise LRLookupNotFoundError(\"found no lookup for this module:\", module)"
},
{
"identifier": "resetbn",
"path": "pixpnet/protonets/prp/lrp_general6.py",
"snippet": "def resetbn(bn):\n assert isinstance(bn, nn.BatchNorm2d)\n\n bnc = copy.deepcopy(bn)\n bnc.reset_parameters()\n\n return bnc"
},
{
"identifier": "BasicBlock",
"path": "pixpnet/protonets/prp/resnet_features.py",
"snippet": "class BasicBlock(nn.Module):\n # class attribute\n expansion = 1\n num_layers = 2\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n # only conv with possibly not 1 stride\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n\n # if stride is not 1 then self.downsample cannot be None\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n # the residual connection\n out += identity\n out = self.relu(out)\n\n return out\n\n def block_conv_info(self):\n block_kernel_sizes = [3, 3]\n block_strides = [self.stride, 1]\n block_paddings = [1, 1]\n\n return block_kernel_sizes, block_strides, block_paddings"
},
{
"identifier": "Bottleneck",
"path": "pixpnet/protonets/prp/resnet_features.py",
"snippet": "class Bottleneck(nn.Module):\n # class attribute\n expansion = 4\n num_layers = 3\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = conv1x1(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n # only conv with possibly not 1 stride\n self.conv2 = conv3x3(planes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv1x1(planes, planes * self.expansion)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n\n # if stride is not 1 then self.downsample cannot be None\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n def block_conv_info(self):\n block_kernel_sizes = [1, 3, 1]\n block_strides = [1, self.stride, 1]\n block_paddings = [0, 1, 0]\n\n return block_kernel_sizes, block_strides, block_paddings"
},
{
"identifier": "ResNetFeatures",
"path": "pixpnet/protonets/prp/resnet_features.py",
"snippet": "class ResNetFeatures(nn.Module):\n \"\"\"\n the convolutional layers of ResNet\n the average pooling and final fully convolutional layer is removed\n \"\"\"\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):\n super(ResNetFeatures, self).__init__()\n\n self.inplanes = 64\n\n # the first convolutional layer before the structured sequence of blocks\n # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n # bias=False)\n self.conv1_no_act = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.conv1 = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n # comes from the first conv and the following max pool\n self.kernel_sizes = [7, 3]\n self.strides = [2, 2]\n self.paddings = [3, 1]\n\n # the following layers, each layer is a sequence of blocks\n self.block = block\n self.layers = layers\n self.layer1 = self._make_layer(block=block, planes=64, num_blocks=self.layers[0])\n self.layer2 = self._make_layer(block=block, planes=128, num_blocks=self.layers[1], stride=2)\n self.layer3 = self._make_layer(block=block, planes=256, num_blocks=self.layers[2], stride=2)\n self.layer4 = self._make_layer(block=block, planes=512, num_blocks=self.layers[3], stride=2)\n\n # initialize the parameters\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual\n # block behaves like an identity.\n # This improves the model by 0.2~0.3% according to\n # https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = [block(self.inplanes, planes, stride, downsample)]\n # only the first block has downsample that is possibly not None\n\n self.inplanes = planes * block.expansion\n for _ in range(1, num_blocks):\n layers.append(block(self.inplanes, planes))\n\n # keep track of every block's conv size, stride size, and padding size\n for each_block in layers:\n block_kernel_sizes, block_strides, block_paddings = each_block.block_conv_info()\n self.kernel_sizes.extend(block_kernel_sizes)\n self.strides.extend(block_strides)\n self.paddings.extend(block_paddings)\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1_no_act(x)\n x = self.bn1(x)\n x = self.conv1(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\n def conv_info(self):\n return self.kernel_sizes, self.strides, self.paddings\n\n def num_layers(self):\n \"\"\"\n the number of conv layers in the network, not counting the number\n of bypass layers\n \"\"\"\n return (\n self.block.num_layers * self.layers[0]\n + self.block.num_layers * self.layers[1]\n + self.block.num_layers * self.layers[2]\n + self.block.num_layers * self.layers[3]\n + 1\n )\n\n def __repr__(self):\n template = \"resnet{}_features\"\n return template.format(self.num_layers() + 1)"
}
] | import copy
import torch
from collections import OrderedDict
from torch import nn
from torchvision import datasets
from pixpnet.protonets.prp.lrp_general6 import (
AdaptiveAvgPool2DWrapperFct,
Conv2DBeta0WrapperFct,
CosineDistLRPClass,
EltwiseSumStacked2EpsWrapperFct,
L2LRPClass,
LinearLayerEpsWrapperFct,
MaxPool2DWrapperFct,
ReluWrapperFct,
SigmoidWrapperFct,
SumStacked2,
bnafterconv_overwrite_intoconv,
get_lrpwrapperformodule,
resetbn,
)
from pixpnet.protonets.prp.resnet_features import BasicBlock, Bottleneck, ResNetFeatures | 10,000 | """
Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
Copyright (c) 2022 Srishti Gautam, Marina Hohne, Robert Jenssen, Michael Kampffmeyer
SPDX-License-Identifier: AGPL-3.0-or-later
SPDX-License-Identifier: MIT
"""
def imshow_im(hm, q=100):
hm = hm.squeeze().sum(dim=0).detach()
return hm
# partial replacement of BN, use own classes, no pretrained loading
class TorchModuleNotFoundError(Exception):
pass
class BasicBlockFused(BasicBlock):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlockFused, self).__init__(inplanes, planes, stride, downsample)
# own
| """
Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
Copyright (c) 2022 Srishti Gautam, Marina Hohne, Robert Jenssen, Michael Kampffmeyer
SPDX-License-Identifier: AGPL-3.0-or-later
SPDX-License-Identifier: MIT
"""
def imshow_im(hm, q=100):
hm = hm.squeeze().sum(dim=0).detach()
return hm
# partial replacement of BN, use own classes, no pretrained loading
class TorchModuleNotFoundError(Exception):
pass
class BasicBlockFused(BasicBlock):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlockFused, self).__init__(inplanes, planes, stride, downsample)
# own | self.elt = SumStacked2() # eltwisesum2() | 9 | 2023-12-06 23:49:31+00:00 | 12k |
dvmazur/mixtral-offloading | src/build_model.py | [
{
"identifier": "ExpertCache",
"path": "src/expert_cache.py",
"snippet": "class ExpertCache:\n def __init__(self, make_module: callable, main_size: int, offload_size: int, buffer_size: int):\n \"\"\"Dynamically loads an array of modules with identical hyperparameters\"\"\"\n self.module_type = self.module_size = self.device = None\n self.active = False\n\n self.registered_experts: Dict[ExpertUID, ExpertInfo] = dict()\n\n self.main_modules = [self._check_module(make_module()) for i in range(main_size)]\n self.main_infos: List[Optional[ExpertInfo]] = [None for _ in range(main_size)]\n\n assert self.module_size is not None\n self.offloaded_storages = [\n torch.UntypedStorage(self.module_size).pin_memory(self.device) for _ in range(offload_size)]\n self.offloaded_infos: List[Optional[ExpertInfo]] = [None for _ in range(offload_size)]\n\n # temporary storage to shave off latency\n self.device_expert_buffers = deque([self._check_module(make_module()) for _ in range(buffer_size)])\n self.offloaded_storage_buffers = deque([\n torch.UntypedStorage(self.module_size).pin_memory(self.device) for _ in range(buffer_size)])\n self.group_infos: Dict[int, EvictionGroupInfo] = defaultdict(EvictionGroupInfo)\n\n def _check_module(self, module: MixtralExpertWrapper):\n assert isinstance(module.storage, torch.UntypedStorage)\n if self.module_type is None:\n self.module_type = type(module)\n self.module_size = len(module.storage)\n self.device = module.storage.device\n else:\n assert isinstance(module, self.module_type)\n assert len(module.storage) == self.module_size\n assert module.storage.device == self.device\n return module\n\n def add_expert(self, uid: ExpertUID, module: MixtralExpertWrapper, eviction_group: int = 0,\n offload: Optional[bool] = None):\n \"\"\"Register an expert to the cache and associate it with uid\"\"\"\n assert self.module_type is not None\n assert isinstance(module, self.module_type)\n return self.add_expert_storage(uid, module.storage, eviction_group=eviction_group, offload=offload)\n\n def add_expert_storage(self, uid: ExpertUID, storage: torch.UntypedStorage,\n eviction_group: int = 0, offload: Optional[bool] = None):\n assert uid not in self.registered_experts, f\"expert {uid} already registered\"\n assert isinstance(storage, torch.UntypedStorage)\n assert len(storage) == self.module_size\n\n if offload is None or not offload: # False or None\n for i in range(len(self.main_modules)):\n if self.main_infos[i] is None:\n self.main_modules[i].storage.copy_(storage)\n info = ExpertInfo(uid, eviction_group=eviction_group, offloaded=False, index=i)\n self.registered_experts[uid] = self.main_infos[i] = info\n self.group_infos[eviction_group].add(info)\n return # done allocating; found spot on device\n if offload is None or offload: # True or None\n for i in range(len(self.offloaded_storages)):\n if self.offloaded_infos[i] is None:\n self.offloaded_storages[i].copy_(storage)\n info = ExpertInfo(uid, eviction_group=eviction_group, offloaded=True, index=i)\n self.registered_experts[uid] = self.offloaded_infos[i] = info\n self.group_infos[eviction_group].add(info)\n return # done allocating; found an offloaded spot\n raise ValueError(\"Cache is full\")\n\n def load_experts(\n self, *uids: ExpertUID, unordered: bool = False) -> Iterator[Tuple[ExpertUID, MixtralExpertWrapper]]:\n \"\"\"\n :example:\n >>> for uid, expert in expert_cache.load_experts(*list_of_uids, unordered=True):\n >>> for uid, expert in expert_iter:\n >>> result += expert(x) * get_moe_weight(uid)\n\n :param uids: iterate over the specified expert uids. Same uids as in add_expert\n :param unordered: if True, allows cache to iterate experts in arbitrary order\n The order is chosen to minimize the total wait time.\n :returns: an iterator that yields (uid, expert) pairs, only usable inside the for loop\n\n \"\"\"\n assert len(set(uids)) == len(uids)\n assert not self.active, \"already loading experts; buffers are busy\"\n if unordered: # yield non-offloaded experts first\n uids = sorted(uids, key=lambda uid: self.registered_experts[uid].offloaded)\n infos = [self.registered_experts[uid] for uid in uids]\n\n assert len(set(info.eviction_group for info in infos)) == 1, \"experts must be in the same evicton group\"\n eviction_group = self.group_infos[infos[0].eviction_group]\n for info in infos:\n eviction_group.mark_used(info)\n\n try:\n self.active = True\n # save pre-loaded experts before they can be swapped\n pre_loaded_infos = deque([info for info in infos if not info.offloaded])\n pre_loaded_experts = deque([self.main_modules[info.index] for info in pre_loaded_infos])\n\n # begin loading experts into free buffers in background (via non-blocking copy)\n infos_to_load = deque([info for info in infos if info.offloaded])\n infos_in_loading = deque([])\n experts_in_loading = deque([])\n window_size = min(len(self.device_expert_buffers) - 1,\n len(eviction_group.main_infos),\n len(infos_to_load))\n for _ in range(window_size):\n info_to_load = infos_to_load.popleft()\n infos_in_loading.append(info_to_load)\n experts_in_loading.append(\n self._swap(info_to_load, eviction_group.choose_expert_to_evict()))\n\n for info in infos:\n if len(pre_loaded_infos) > 0 and info is pre_loaded_infos[0]:\n pre_loaded_infos.popleft()\n yield (info.uid, pre_loaded_experts.popleft())\n elif len(infos_in_loading) > 0 and info is infos_in_loading[0]:\n infos_in_loading.popleft()\n yield (info.uid, experts_in_loading.popleft())\n if len(infos_to_load) > 0:\n info_to_load = infos_to_load.popleft()\n infos_in_loading.append(info_to_load)\n experts_in_loading.append(\n self._swap(info_to_load, eviction_group.choose_expert_to_evict()))\n else:\n raise RuntimeError(\"internal error: caching algorithm failed\")\n finally:\n self.active = False\n\n def _swap(self, info_to_load: ExpertInfo, info_to_evict: ExpertInfo) -> nn.Module:\n \"\"\"Swap an offloaded expert (info_to_load) with an on-device expert (info_to_evict) return the loaded expert\"\"\"\n assert info_to_load.offloaded and not info_to_evict.offloaded\n assert info_to_load.eviction_group == info_to_evict.eviction_group\n # swap a single on-device expert with a single offloaded expert using buffers for parallelism\n offloaded_storage_buffer = self.offloaded_storage_buffers.popleft()\n device_expert_buffer = self.device_expert_buffers.popleft()\n device_expert_buffer.storage.copy_(self.offloaded_storages[info_to_load.index], non_blocking=True)\n offloaded_storage_buffer.copy_(self.main_modules[info_to_evict.index].storage, non_blocking=True)\n\n self.device_expert_buffers.append(self.main_modules[info_to_evict.index])\n self.main_modules[info_to_evict.index] = device_expert_buffer\n self.offloaded_storage_buffers.append(self.offloaded_storages[info_to_load.index])\n self.offloaded_storages[info_to_load.index] = offloaded_storage_buffer\n\n self.main_infos[info_to_evict.index] = info_to_load\n self.offloaded_infos[info_to_load.index] = info_to_evict\n info_to_evict.offloaded, info_to_load.offloaded = info_to_load.offloaded, info_to_evict.offloaded\n info_to_evict.index, info_to_load.index = info_to_load.index, info_to_evict.index\n self.group_infos[info_to_load.eviction_group].swap(info_to_load, info_to_evict)\n return device_expert_buffer"
},
{
"identifier": "MixtralExpertWrapper",
"path": "src/expert_wrapper.py",
"snippet": "class MixtralExpertWrapper(nn.Module):\n def __init__(\n self,\n expert_module: tp.Any,\n device: torch.device,\n ):\n super().__init__()\n \n expert_module, self.storage = self.replace_layer_storage(expert_module, device)\n self.expert_module = lambda *args, **kwargs: expert_module(*args, **kwargs)\n \n self._register_state_dict_hook(self._add_storage_to_state_dict_hook)\n self._register_load_state_dict_pre_hook(self._load_storage_from_state_dict_hook)\n \n @staticmethod\n def _add_storage_to_state_dict_hook(self, state_dict, prefix, local_metadata):\n state_dict[prefix + 'storage'] = torch.as_tensor(self.storage, dtype=torch.uint8)\n return state_dict\n \n def _load_storage_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n self.storage.copy_(state_dict[prefix + 'storage'].storage().untyped())\n del state_dict[prefix + 'storage']\n \n def forward(self, *args, **kwargs):\n return self.expert_module(*args, **kwargs)\n \n \n @staticmethod\n def replace_layer_storage(\n layer: tp.Any,\n device: torch.device,\n ):\n state_dict = {\n f\"w{i}\": {\n \"W_q\": getattr(layer, f\"w{i}\").W_q,\n \"meta\": getattr(layer, f\"w{i}\").meta,\n \"bias\": getattr(layer, f\"w{i}\").bias,\n }\n for i in range(1, 4)\n }\n\n storage_size = 0\n offsets = [0]\n\n for x in nested_flatten(state_dict):\n if not isinstance(x, torch.Tensor):\n continue\n storage_size += x.nbytes\n offsets.append(storage_size)\n\n storage = torch.UntypedStorage(storage_size, device=device) \n\n i = 0\n new_flattened_states = list()\n for x in nested_flatten(state_dict):\n if not isinstance(x, torch.Tensor):\n new_flattened_states.append(x)\n continue\n\n start = offsets[i]\n end = offsets[i + 1]\n a_view = torch.as_tensor(storage[start:end], dtype=x.dtype, device=device).view(x.shape)\n a_view[...] = x\n assert a_view.data_ptr() == storage.data_ptr() + start\n i += 1\n new_flattened_states.append(a_view)\n\n state_dict = nested_pack(new_flattened_states, state_dict)\n\n for layer_id, states in state_dict.items():\n patched = getattr(layer, layer_id)\n patched.W_q = states[\"W_q\"]\n patched.meta = states[\"meta\"]\n patched.bias = states[\"bias\"]\n setattr(layer, layer_id, patched)\n\n return layer, storage"
},
{
"identifier": "HQQLinearTritonSavable",
"path": "src/custom_layers.py",
"snippet": "class HQQLinearTritonSavable(HQQLinear):\n def __init__(self, layer, quant_config, meta=None, **kwargs):\n \"\"\"\n Example how to get meta:\n >>>> meta1 = HQQLinearSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)\n >>>> meta2 = HQQLinearSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)\n \"\"\"\n \n assert quant_config['weight_quant_params']['nbits'] in [2, 3, 4]\n \n super().__init__(layer, quant_config, **kwargs)\n \n if not hasattr(self, 'meta'):\n assert meta is not None\n self.meta = copy.deepcopy(meta)\n \n self._register_state_dict_hook(self._add_to_state_dict_hook)\n self._register_load_state_dict_pre_hook(self._load_from_state_dict_hook)\n \n def quantize(self, *args, **kwargs):\n super().quantize(*args, **kwargs)\n \n # repacking\n self.repack()\n \n def repack(self):\n if self.W_q.shape != self.meta['shape']:\n W_q = Quantizer.unpack[self.meta['packing']](self.W_q)\n sh = self.meta['shape']\n W_q = W_q.reshape((-1,) + sh[1:])\n W_q = W_q[:sh[0], ...]\n self.W_q = Quantizer.pack[self.meta['packing']](W_q)\n \n def forward(self, x):\n return self.forward_triton(x)\n \n def set_backend(self, backend):\n pass\n \n @torch.inference_mode()\n def forward_triton(self, x):\n assert self.ready, \"model was not quantized\"\n assert self.meta['axis'] == 0\n\n W_q, meta = self.W_q, self.meta\n\n del_keys = []\n if 'quant_scale' in meta and meta['quant_scale']:\n meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')\n if 'quant_zero' in meta and meta['quant_zero']:\n meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')\n\n K = meta['shape'][1]\n N = meta['shape'][0]\n \n if self.meta['nbits'] == 4:\n fn = triton_matmul4_transpose\n elif self.meta['nbits'] == 3:\n fn = functools.partial(triton_matmul3_transpose, N=N)\n elif self.meta['nbits'] == 2:\n fn = triton_matmul2_transpose\n else:\n raise RuntimeError(f\"nbits == {self.meta['nbits']} isn't yet supported\")\n \n output = fn(\n meta['group_size'], x,\n W_q.view(-1, K),\n meta['scale'].view(-1, K),\n meta['zero'].view(-1, K),\n bias=self.bias if hasattr(self, 'bias') else None,\n )\n\n #Cleanup\n for key in del_keys:\n del meta[key]\n\n return output\n\n # to support .forward_pytorch(...) - backward compatibility\n @torch.inference_mode()\n def dequantize(self):\n assert self.ready, \"model was not quantized\"\n W_q, meta = self.W_q, self.meta\n del_keys = []\n if(meta['quant_scale']):\n meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')\n if(meta['quant_zero']):\n meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')\n \n W_q_p = Quantizer.unpack[meta['packing']](W_q).half()\n W_q_p = W_q_p[:meta['shape'][0], ...]\n W_q_p = W_q_p.reshape((meta['group_size'], -1))\n \n if((meta['group_size'] is not None) and (meta['nbits']==3)):\n W_q_p = W_q_p[:meta['group_size']] if (meta['axis']==0) else W_q_p[:,:meta['group_size']]\n W_est = ((W_q_p - meta['zero'])*meta['scale']).reshape(meta['shape']) \n \n #Cleanup\n del W_q_p\n for key in del_keys: del meta[key]\n return W_est\n \n @classmethod\n def get_hqq_meta(cls, linear_shape, quant_config):\n layer = HQQLinear(nn.Linear(*linear_shape, bias=False), quant_config)\n meta = layer.meta\n\n def _remove_tensors_recursive(d):\n keys = list(d.keys())\n\n for k in keys:\n if isinstance(d[k], torch.Tensor):\n del d[k]\n elif isinstance(d[k], dict):\n _remove_tensors_recursive(d[k])\n\n _remove_tensors_recursive(meta)\n\n return meta\n \n @staticmethod\n def _add_to_state_dict_hook(self, state_dict, prefix, local_metadata):\n tensor_paths = self._get_tensor_paths(self.meta)\n assert set(tensor_paths).issubset(\n {'scale_q', 'meta_scale.scale', 'meta_scale.zero', 'zero_q', 'meta_zero.scale', 'meta_zero.zero',\n 'scale', 'zero'}\n )\n \n def _add(name, value):\n state_dict[prefix + name] = value\n \n _add('W_q', self.W_q)\n \n if self.bias is not None:\n _add('bias', self.bias)\n \n if 'meta_scale' in self.meta:\n _add('meta.scale_q', self.meta['scale_q'])\n _add('meta.meta_scale.scale', self.meta['meta_scale']['scale'])\n _add('meta.meta_scale.zero', self.meta['meta_scale']['zero'])\n else:\n _add('meta.scale', self.meta['scale'])\n \n if 'meta_zero' in self.meta:\n _add('meta.zero_q', self.meta['zero_q'])\n _add('meta.meta_zero.scale', self.meta['meta_zero']['scale'])\n _add('meta.meta_zero.zero', self.meta['meta_zero']['zero'])\n else:\n _add('meta.zero', self.meta['zero'])\n \n return state_dict\n \n def _load_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n tensor_paths = [k[len(prefix + 'meta.'):] for k in state_dict.keys() if k.startswith(prefix + 'meta.')]\n assert set(tensor_paths).issubset(\n {'scale_q', 'meta_scale.scale', 'meta_scale.zero', 'zero_q', 'meta_zero.scale', 'meta_zero.zero',\n 'scale', 'zero'}\n )\n \n def _del(name):\n del state_dict[prefix + name]\n def _set(name):\n setattr(self, name, state_dict[prefix + name])\n _del(name)\n def _get(name):\n v = state_dict[prefix + name]\n _del(name)\n return v\n \n _set('W_q')\n if 'bias' in state_dict:\n _set('bias')\n else:\n self.bias = None\n \n if not hasattr(self, 'meta'):\n self.meta = {}\n \n if (prefix + 'meta.meta_scale.scale') in state_dict:\n self.meta['scale_q'] = _get('meta.scale_q')\n self.meta['quant_scale'] = True\n if not 'meta_scale' in self.meta:\n self.meta['meta_scale'] = {}\n self.meta['meta_scale'] |= {\n 'scale': _get('meta.meta_scale.scale'),\n 'zero': _get('meta.meta_scale.zero')\n }\n else:\n self.meta['scale'] = _get('meta.scale')\n if (prefix + 'meta.meta_zero.scale') in state_dict:\n self.meta['zero_q'] = _get('meta.zero_q')\n self.meta['quant_zero'] = True\n if not 'meta_zero' in self.meta:\n self.meta['meta_zero'] = {}\n self.meta['meta_zero'] |= {\n 'scale': _get('meta.meta_zero.scale'),\n 'zero': _get('meta.meta_zero.zero')\n }\n else:\n self.meta['zero'] = _get('meta.zero')\n self.ready = True\n \n # self.cuda()\n # self.in_gpu = self.W_q.device.type == 'cuda'\n # assert self.in_gpu\n \n self.repack()\n \n @classmethod\n def _get_tensor_paths(cls, state: Dict[str, Any], prefix=''):\n paths = []\n \n for k, v in state.items():\n if isinstance(v, dict):\n paths += cls._get_tensor_paths(v, prefix=k + '.')\n elif isinstance(v, torch.Tensor):\n paths.append(prefix + k)\n \n return paths\n \n def state_dict(self, *args, **kwargs):\n return nn.Module.state_dict(self, *args, **kwargs)\n \n def load_state_dict(self, *args, **kwargs):\n nn.Module.load_state_dict(self, *args, **kwargs)"
},
{
"identifier": "MixtralBLockSparseTop2MLP_HQQ",
"path": "src/custom_layers.py",
"snippet": "class MixtralBLockSparseTop2MLP_HQQ(nn.Module):\n def __init__(self, config: MixtralConfig, quant_config: Dict[str, Any], meta1, meta2):\n super().__init__()\n \n self.w1 = HQQLinearTritonSavable(None, quant_config, meta1)\n self.w2 = HQQLinearTritonSavable(None, quant_config, meta2)\n self.w3 = HQQLinearTritonSavable(None, quant_config, meta1)\n\n self.act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states):\n current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)\n current_hidden_states = self.w2(current_hidden_states)\n return current_hidden_states"
},
{
"identifier": "SparseMoeWrapper",
"path": "src/custom_layers.py",
"snippet": "class SparseMoeWrapper(nn.Module):\n def __init__(self, config, layer_id, gate, expert_cache):\n super().__init__()\n\n self.hidden_dim = config.hidden_size\n self.ffn_dim = config.intermediate_size\n self.num_experts = config.num_local_experts\n self.top_k = config.num_experts_per_tok\n self.layer_id = layer_id\n\n self.gate = gate\n self.experts = expert_cache\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n batch_size, sequence_length, hidden_dim = hidden_states.shape\n hidden_states = hidden_states.view(-1, hidden_dim)\n # router_logits: (batch * sequence_length, n_experts)\n router_logits = self.gate(hidden_states)\n\n routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)\n routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)\n routing_weights /= routing_weights.sum(dim=-1, keepdim=True)\n # we cast back to the input dtype\n routing_weights = routing_weights.to(hidden_states.dtype)\n\n final_hidden_states = torch.zeros(\n (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device\n )\n\n # One hot encode the selected experts to create an expert mask\n # this will be used to easily index which expert is going to be sollicitated\n expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)\n\n active_experts = selected_experts.flatten().unique().tolist()\n\n # Loop over all available experts in the model and perform the computation on each expert\n for (_layer_index, expert_idx), expert_layer in self.experts.load_experts(\n *((self.layer_id, expert_idx) for expert_idx in active_experts), unordered=True):\n idx, top_x = torch.where(expert_mask[expert_idx])\n assert top_x.shape[0] > 0\n\n # in torch it is faster to index using lists than torch tensors\n top_x_list = top_x.tolist()\n idx_list = idx.tolist()\n\n # Index the correct hidden states and compute the expert hidden state for\n # the current expert. We need to make sure to multiply the output hidden\n # states by `routing_weights` on the corresponding tokens (top-1 and top-2)\n current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim)\n current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None]\n\n # However `index_add_` only support torch tensors for indexing so we'll use\n # the `top_x` tensor here.\n final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))\n final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)\n return final_hidden_states, router_logits"
},
{
"identifier": "with_default_dtype",
"path": "src/utils.py",
"snippet": "@contextmanager\ndef with_default_dtype(dtype):\n _dtype_original = torch.get_default_dtype()\n\n try:\n torch.set_default_dtype(dtype)\n yield\n finally:\n torch.set_default_dtype(_dtype_original)"
}
] | import os
import json
import typing as tp
import torch
from functools import cache
from dataclasses import dataclass
from torch import nn
from transformers import AutoConfig
from transformers.models.mixtral import MixtralForCausalLM, MixtralConfig
from safetensors.torch import load_file
from torch import nn
from tqdm.auto import trange
from hqq.core.quantize import BaseQuantizeConfig
from .expert_cache import ExpertCache
from .expert_wrapper import MixtralExpertWrapper
from .custom_layers import (
HQQLinearTritonSavable,
MixtralBLockSparseTop2MLP_HQQ,
SparseMoeWrapper,
)
from .utils import with_default_dtype | 7,276 |
hidden_size = config.hidden_size
num_heads = config.num_attention_heads
head_dim = hidden_size // num_heads
num_key_value_heads = config.num_key_value_heads
shapes = [
(hidden_size, num_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(num_heads * head_dim, hidden_size),
]
shape_to_meta = {
shape: HQQLinearTritonSavable.get_hqq_meta(shape, attn_quant_config)
for shape in shapes
}
def patch_fct_hqq(shape, quant_config):
meta = shape_to_meta[shape]
layer = HQQLinearTritonSavable(None, quant_config, meta=meta)
return layer
for layer in model.model.layers:
layer.block_sparse_moe.gate = nn.Linear(
config.hidden_size,
config.num_local_experts,
dtype=torch.float16,
device=device,
bias=False,
)
layer.self_attn.q_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
layer.self_attn.k_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.v_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.o_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
@cache
def get_default_ffn_quant_config(ffn_dim: int = 14336, hidden_dim: int = 4096):
quant_config = BaseQuantizeConfig(
nbits=2,
group_size=16,
quant_zero=True,
quant_scale=True,
)
meta1 = HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)
meta2 = HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)
return quant_config, meta1, meta2
def make_empty_expert(
model_config: MixtralConfig, quant_config: QuantConfig
) -> MixtralBLockSparseTop2MLP_HQQ:
meta1, meta2 = quant_config.get_ffn_metas(
model_config.hidden_size, model_config.intermediate_size
)
return MixtralBLockSparseTop2MLP_HQQ(
model_config,
quant_config.ffn_config,
meta1,
meta2,
)
def make_and_load_expert_wrapper(
config: MixtralConfig,
quant_config: QuantConfig,
states_dir: str,
expert_uid: tuple[int, int],
device: torch.device,
) -> MixtralExpertWrapper:
layer_idx, expert_idx = expert_uid
index_path = os.path.join(states_dir, "model.safetensors.index.json")
with open(index_path) as f:
module_idx = f"model.layers.{layer_idx}.block_sparse_moe.experts.{expert_idx}"
state_fpath = json.load(f)["weight_map"][f"{module_idx}.w1.W_q"]
state_dict = load_file(os.path.join(states_dir, state_fpath), device=str(device))
expert = make_empty_expert(config, quant_config)
expert.load_state_dict(state_dict, strict=True)
return MixtralExpertWrapper(expert, device)
def load_00_expert_state_dict(states_dir: str, device: torch.device):
index_path = os.path.join(states_dir, "model.safetensors.index.json")
with open(index_path) as f:
module_idx = f"model.layers.0.block_sparse_moe.experts.0"
state_fpath = json.load(f)["weight_map"][f"{module_idx}.w1.W_q"]
return load_file(os.path.join(states_dir, state_fpath), device=str(device))
def build_model(
device: torch.device,
quant_config: QuantConfig,
offload_config: OffloadConfig,
state_path: str,
):
model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
state_dict_00 = load_00_expert_state_dict(state_path, device)
def _make_module():
config = AutoConfig.from_pretrained(model_name)
expert = make_empty_expert(config, quant_config)
expert.load_state_dict(state_dict_00)
return MixtralExpertWrapper(expert, device=device)
|
@dataclass(frozen=True)
class OffloadConfig:
main_size: int
offload_size: int
buffer_size: int
offload_per_layer: int
class QuantConfig:
def __init__(
self,
ffn_config: BaseQuantizeConfig,
attn_config: BaseQuantizeConfig,
):
self.ffn_config = ffn_config
self.attn_config = attn_config
@cache
def get_ffn_metas(self, hidden_dim: int, ffn_dim: int) -> tuple[tp.Any, tp.Any]:
return (
HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), self.ffn_config),
HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), self.ffn_config),
)
def replace_attn_layers(
model: MixtralForCausalLM,
config: MixtralConfig,
quant_config: QuantConfig,
device: torch.device,
) -> None:
attn_quant_config = quant_config.attn_config
hidden_size = config.hidden_size
num_heads = config.num_attention_heads
head_dim = hidden_size // num_heads
num_key_value_heads = config.num_key_value_heads
shapes = [
(hidden_size, num_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(num_heads * head_dim, hidden_size),
]
shape_to_meta = {
shape: HQQLinearTritonSavable.get_hqq_meta(shape, attn_quant_config)
for shape in shapes
}
def patch_fct_hqq(shape, quant_config):
meta = shape_to_meta[shape]
layer = HQQLinearTritonSavable(None, quant_config, meta=meta)
return layer
for layer in model.model.layers:
layer.block_sparse_moe.gate = nn.Linear(
config.hidden_size,
config.num_local_experts,
dtype=torch.float16,
device=device,
bias=False,
)
layer.self_attn.q_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
layer.self_attn.k_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.v_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.o_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
@cache
def get_default_ffn_quant_config(ffn_dim: int = 14336, hidden_dim: int = 4096):
quant_config = BaseQuantizeConfig(
nbits=2,
group_size=16,
quant_zero=True,
quant_scale=True,
)
meta1 = HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)
meta2 = HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)
return quant_config, meta1, meta2
def make_empty_expert(
model_config: MixtralConfig, quant_config: QuantConfig
) -> MixtralBLockSparseTop2MLP_HQQ:
meta1, meta2 = quant_config.get_ffn_metas(
model_config.hidden_size, model_config.intermediate_size
)
return MixtralBLockSparseTop2MLP_HQQ(
model_config,
quant_config.ffn_config,
meta1,
meta2,
)
def make_and_load_expert_wrapper(
config: MixtralConfig,
quant_config: QuantConfig,
states_dir: str,
expert_uid: tuple[int, int],
device: torch.device,
) -> MixtralExpertWrapper:
layer_idx, expert_idx = expert_uid
index_path = os.path.join(states_dir, "model.safetensors.index.json")
with open(index_path) as f:
module_idx = f"model.layers.{layer_idx}.block_sparse_moe.experts.{expert_idx}"
state_fpath = json.load(f)["weight_map"][f"{module_idx}.w1.W_q"]
state_dict = load_file(os.path.join(states_dir, state_fpath), device=str(device))
expert = make_empty_expert(config, quant_config)
expert.load_state_dict(state_dict, strict=True)
return MixtralExpertWrapper(expert, device)
def load_00_expert_state_dict(states_dir: str, device: torch.device):
index_path = os.path.join(states_dir, "model.safetensors.index.json")
with open(index_path) as f:
module_idx = f"model.layers.0.block_sparse_moe.experts.0"
state_fpath = json.load(f)["weight_map"][f"{module_idx}.w1.W_q"]
return load_file(os.path.join(states_dir, state_fpath), device=str(device))
def build_model(
device: torch.device,
quant_config: QuantConfig,
offload_config: OffloadConfig,
state_path: str,
):
model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
state_dict_00 = load_00_expert_state_dict(state_path, device)
def _make_module():
config = AutoConfig.from_pretrained(model_name)
expert = make_empty_expert(config, quant_config)
expert.load_state_dict(state_dict_00)
return MixtralExpertWrapper(expert, device=device)
| with device, with_default_dtype(torch.float16): | 5 | 2023-12-15 03:32:35+00:00 | 12k |
open-mmlab/PIA | predict.py | [
{
"identifier": "I2VPipeline",
"path": "animatediff/pipelines/i2v_pipeline.py",
"snippet": "class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin):\n _optional_components = []\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n EulerDiscreteScheduler,\n EulerAncestralDiscreteScheduler,\n DPMSolverMultistepScheduler,\n ],\n # memory_format: torch.memory_format,\n feature_extractor: CLIPImageProcessor = None,\n image_encoder: CLIPVisionModelWithProjection = None,\n ):\n super().__init__()\n\n if hasattr(scheduler.config, \"steps_offset\") and scheduler.config.steps_offset != 1:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`\"\n f\" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure \"\n \"to update the config accordingly as leaving `steps_offset` might led to incorrect results\"\n \" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,\"\n \" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`\"\n \" file\"\n )\n deprecate(\"steps_offset!=1\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"steps_offset\"] = 1\n scheduler._internal_dict = FrozenDict(new_config)\n\n if hasattr(scheduler.config, \"clip_sample\") and scheduler.config.clip_sample is True:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`.\"\n \" `clip_sample` should be set to False in the configuration file. Please make sure to update the\"\n \" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in\"\n \" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very\"\n \" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file\"\n )\n deprecate(\"clip_sample not set\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"clip_sample\"] = False\n scheduler._internal_dict = FrozenDict(new_config)\n\n is_unet_version_less_0_9_0 = hasattr(unet.config, \"_diffusers_version\") and version.parse(\n version.parse(unet.config._diffusers_version).base_version\n ) < version.parse(\"0.9.0.dev0\")\n is_unet_sample_size_less_64 = hasattr(unet.config, \"sample_size\") and unet.config.sample_size < 64\n if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:\n deprecation_message = (\n \"The configuration file of the unet has set the default `sample_size` to smaller than\"\n \" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the\"\n \" following: \\n- CompVis/stable-diffusion-v1-4 \\n- CompVis/stable-diffusion-v1-3 \\n-\"\n \" CompVis/stable-diffusion-v1-2 \\n- CompVis/stable-diffusion-v1-1 \\n- runwayml/stable-diffusion-v1-5\"\n \" \\n- runwayml/stable-diffusion-inpainting \\n you should change 'sample_size' to 64 in the\"\n \" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`\"\n \" in the config might lead to incorrect results in future versions. If you have downloaded this\"\n \" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for\"\n \" the `unet/config.json` file\"\n )\n deprecate(\"sample_size<64\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(unet.config)\n new_config[\"sample_size\"] = 64\n unet._internal_dict = FrozenDict(new_config)\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n image_encoder=image_encoder,\n feature_extractor=feature_extractor,\n scheduler=scheduler,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n # self.memory_format = memory_format\n self.use_ip_adapter = False\n\n @classmethod\n def build_pipeline(cls,\n base_cfg,\n base_model: str,\n unet_path: str,\n dreambooth_path: Optional[str] = None,\n lora_path: Optional[str] = None,\n lora_alpha: float = 0,\n vae_path: Optional[str] = None,\n ip_adapter_path: Optional[str] = None,\n ip_adapter_scale: float = 0.0,\n only_load_vae_decoder: bool = False,\n only_load_vae_encoder: bool = False) -> 'I2VPipeline':\n \"\"\"Method to build pipeline in a faster way~\n Args:\n base_cfg: The config to build model\n base_mode: The model id to initialize StableDiffusion\n unet_path: Path for i2v unet\n\n dreambooth_path: path for dreambooth model\n lora_path: path for lora model\n lora_alpha: value for lora scale\n\n only_load_vae_decoder: Only load VAE decoder from dreambooth / VAE ckpt\n and maitain encoder as original.\n\n \"\"\"\n # build unet\n unet = UNet3DConditionModel.from_pretrained_2d(\n base_model, subfolder=\"unet\",\n unet_additional_kwargs=OmegaConf.to_container(\n base_cfg.unet_additional_kwargs))\n\n old_weights = unet.conv_in.weight\n old_bias = unet.conv_in.bias\n new_conv1 = InflatedConv3d(\n 9, old_weights.shape[0],\n kernel_size=unet.conv_in.kernel_size,\n stride=unet.conv_in.stride,\n padding=unet.conv_in.padding,\n bias=True if old_bias is not None else False)\n param = torch.zeros((320,5,3,3),requires_grad=True)\n new_conv1.weight = torch.nn.Parameter(torch.cat((old_weights,param),dim=1))\n if old_bias is not None:\n new_conv1.bias = old_bias\n unet.conv_in = new_conv1\n unet.config[\"in_channels\"] = 9\n\n unet_ckpt = torch.load(unet_path, map_location='cpu')\n unet.load_state_dict(unet_ckpt, strict=False)\n # NOTE: only load temporal layers and condition module\n # for key, value in unet_ckpt.items():\n # if 'motion' in key or 'conv_in' in key:\n # unet.state_dict()[key].copy_(value)\n\n # load vae, tokenizer, text encoder\n vae = AutoencoderKL.from_pretrained(base_model, subfolder=\"vae\")\n tokenizer = CLIPTokenizer.from_pretrained(base_model, subfolder=\"tokenizer\")\n text_encoder = CLIPTextModel.from_pretrained(base_model, subfolder=\"text_encoder\")\n noise_scheduler = DDIMScheduler(**OmegaConf.to_container(base_cfg.noise_scheduler_kwargs))\n\n if dreambooth_path:\n\n print(\" >>> Begin loading DreamBooth >>>\")\n base_model_state_dict = {}\n with safe_open(dreambooth_path, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n base_model_state_dict[key] = f.get_tensor(key)\n\n # load unet\n converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, unet.config)\n\n old_value = converted_unet_checkpoint['conv_in.weight']\n new_param = unet_ckpt['conv_in.weight'][:,4:,:,:].clone().cpu()\n new_value = torch.nn.Parameter(torch.cat((old_value, new_param), dim=1))\n converted_unet_checkpoint['conv_in.weight'] = new_value\n unet.load_state_dict(converted_unet_checkpoint, strict=False)\n\n # load vae\n converted_vae_checkpoint = convert_ldm_vae_checkpoint(\n base_model_state_dict, vae.config,\n only_decoder=only_load_vae_decoder,\n only_encoder=only_load_vae_encoder,)\n need_strict = not (only_load_vae_decoder or only_load_vae_encoder)\n vae.load_state_dict(converted_vae_checkpoint, strict=need_strict)\n print('Prefix in loaded VAE checkpoint: ')\n print(set([k.split('.')[0] for k in converted_vae_checkpoint.keys()]))\n\n # load text encoder\n text_encoder_checkpoint = convert_ldm_clip_checkpoint(base_model_state_dict)\n if text_encoder_checkpoint:\n text_encoder.load_state_dict(text_encoder_checkpoint, strict=False)\n\n print(\" <<< Loaded DreamBooth <<<\")\n\n if vae_path:\n print(' >>> Begin loading VAE >>>')\n vae_state_dict = {}\n if vae_path.endswith('safetensors'):\n with safe_open(vae_path, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n vae_state_dict[key] = f.get_tensor(key)\n elif vae_path.endswith('ckpt') or vae_path.endswith('pt'):\n vae_state_dict = torch.load(vae_path, map_location='cpu')\n if 'state_dict' in vae_state_dict:\n vae_state_dict = vae_state_dict['state_dict']\n\n vae_state_dict = {f'first_stage_model.{k}': v for k, v in vae_state_dict.items()}\n\n converted_vae_checkpoint = convert_ldm_vae_checkpoint(\n vae_state_dict, vae.config,\n only_decoder=only_load_vae_decoder,\n only_encoder=only_load_vae_encoder,)\n print('Prefix in loaded VAE checkpoint: ')\n print(set([k.split('.')[0] for k in converted_vae_checkpoint.keys()]))\n need_strict = not (only_load_vae_decoder or only_load_vae_encoder)\n vae.load_state_dict(converted_vae_checkpoint, strict=need_strict)\n print(\" <<< Loaded VAE <<<\")\n\n if lora_path:\n\n print(\" >>> Begin loading LoRA >>>\")\n\n lora_dict = {}\n with safe_open(lora_path, framework='pt', device='cpu') as file:\n for k in file.keys():\n lora_dict[k] = file.get_tensor(k)\n unet, text_encoder = convert_lora_model_level(\n lora_dict, unet, text_encoder, alpha=lora_alpha)\n\n print(\" <<< Loaded LoRA <<<\")\n\n # move model to device\n device = torch.device('cuda')\n unet_dtype = torch.float16\n tenc_dtype = torch.float16\n vae_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float32\n\n unet = unet.to(device=device, dtype=unet_dtype)\n text_encoder = text_encoder.to(device=device, dtype=tenc_dtype)\n vae = vae.to(device=device, dtype=vae_dtype)\n print(f'Set Unet to {unet_dtype}')\n print(f'Set text encoder to {tenc_dtype}')\n print(f'Set vae to {vae_dtype}')\n\n if is_xformers_available():\n unet.enable_xformers_memory_efficient_attention()\n\n pipeline = cls(unet=unet,\n vae=vae,\n tokenizer=tokenizer,\n text_encoder=text_encoder,\n scheduler=noise_scheduler)\n\n # ip_adapter_path = 'h94/IP-Adapter'\n if ip_adapter_path and ip_adapter_scale > 0:\n ip_adapter_name = 'ip-adapter_sd15.bin'\n # only online repo need subfolder\n if not osp.isdir(ip_adapter_path):\n subfolder = 'models'\n else:\n subfolder = ''\n pipeline.load_ip_adapter(ip_adapter_path, subfolder, ip_adapter_name)\n pipeline.set_ip_adapter_scale(ip_adapter_scale)\n pipeline.use_ip_adapter = True\n print(f'Load IP-Adapter, scale: {ip_adapter_scale}')\n\n # text_inversion_path = './models/TextualInversion/easynegative.safetensors'\n # if text_inversion_path:\n # pipeline.load_textual_inversion(text_inversion_path, 'easynegative')\n\n return pipeline\n\n def enable_vae_slicing(self):\n self.vae.enable_slicing()\n\n def disable_vae_slicing(self):\n self.vae.disable_slicing()\n\n def enable_sequential_cpu_offload(self, gpu_id=0):\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:\n if cpu_offloaded_model is not None:\n cpu_offload(cpu_offloaded_model, device)\n\n @property\n def _execution_device(self):\n if self.device != torch.device(\"meta\") or not hasattr(self.unet, \"_hf_hook\"):\n return self.device\n for module in self.unet.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt):\n batch_size = len(prompt) if isinstance(prompt, list) else 1\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):\n removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n text_embeddings = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n text_embeddings = text_embeddings[0]\n\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = text_embeddings.shape\n text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1)\n text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1)\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n max_length = text_input_ids.shape[-1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = uncond_input.attention_mask.to(device)\n else:\n attention_mask = None\n\n uncond_embeddings = self.text_encoder(\n uncond_input.input_ids.to(device),\n attention_mask=attention_mask,\n )\n uncond_embeddings = uncond_embeddings[0]\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = uncond_embeddings.shape[1]\n uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1)\n uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n return text_embeddings\n\n def decode_latents(self, latents):\n video_length = latents.shape[2]\n latents = 1 / 0.18215 * latents\n latents = rearrange(latents, \"b c f h w -> (b f) c h w\")\n # video = self.vae.decode(latents).sample\n video = []\n for frame_idx in tqdm(range(latents.shape[0])):\n video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample)\n video = torch.cat(video)\n video = rearrange(video, \"(b f) c h w -> b c f h w\", f=video_length)\n video = (video / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n video = video.cpu().float().numpy()\n return video\n\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n def check_inputs(self, prompt, height, width, callback_steps):\n if not isinstance(prompt, str) and not isinstance(prompt, list):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n def get_timesteps(self, num_inference_steps, strength, device):\n # get the original timestep using init_timestep\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n\n t_start = max(num_inference_steps - init_timestep, 0)\n timesteps = self.scheduler.timesteps[t_start:]\n\n return timesteps, num_inference_steps - t_start\n\n def prepare_latents(self, add_noise_time_step, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None):\n shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor)\n\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n if latents is None:\n rand_device = \"cpu\" if device.type == \"mps\" else device\n\n if isinstance(generator, list):\n shape = shape\n # shape = (1,) + shape[1:]\n latents = [\n torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype)\n for i in range(batch_size)\n ]\n latents = torch.cat(latents, dim=0).to(device)\n else:\n latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)\n else:\n if latents.shape != shape:\n raise ValueError(f\"Unexpected latents shape, got {latents.shape}, expected {shape}\")\n latents = latents.to(device)\n\n return latents\n\n def encode_image(self, image, device, num_images_per_prompt):\n \"\"\"Encode image for ip-adapter. Copied from\n https://github.com/huggingface/diffusers/blob/f9487783228cd500a21555da3346db40e8f05992/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L492-L514 # noqa\n \"\"\"\n dtype = next(self.image_encoder.parameters()).dtype\n\n if not isinstance(image, torch.Tensor):\n image = self.feature_extractor(image, return_tensors=\"pt\").pixel_values\n\n image = image.to(device=device, dtype=dtype)\n image_embeds = self.image_encoder(image).image_embeds\n image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)\n\n uncond_image_embeds = torch.zeros_like(image_embeds)\n return image_embeds, uncond_image_embeds\n\n @torch.no_grad()\n def __call__(\n self,\n image: np.ndarray,\n prompt: Union[str, List[str]],\n video_length: Optional[int],\n height: Optional[int] = None,\n width: Optional[int] = None,\n global_inf_num: int = 0,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_videos_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"tensor\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n\n cond_frame: int = 0,\n mask_sim_template_idx: int = 0,\n ip_adapter_scale: float = 0,\n strength: float = 1,\n progress_fn=None,\n **kwargs,\n ):\n # Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n assert strength > 0 and strength <= 1, (\n f'\"strength\" for img2vid must in (0, 1]. But receive {strength}.')\n\n # Check inputs. Raise error if not correct\n self.check_inputs(prompt, height, width, callback_steps)\n\n # Define call parameters\n # batch_size = 1 if isinstance(prompt, str) else len(prompt)\n batch_size = 1\n if latents is not None:\n batch_size = latents.shape[0]\n if isinstance(prompt, list):\n batch_size = len(prompt)\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # Encode input prompt\n prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size\n\n if negative_prompt is None:\n negative_prompt = DEFAULT_N_PROMPT\n negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size\n text_embeddings = self._encode_prompt(\n prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n\n # Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n #timesteps = self.scheduler.timesteps\n timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)\n latent_timestep = timesteps[:1].repeat(batch_size)\n\n # Prepare latent variables\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n latent_timestep,\n batch_size * num_videos_per_prompt,\n 4,\n video_length,\n height,\n width,\n text_embeddings.dtype,\n device,\n generator,\n latents,\n )\n\n shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor)\n\n raw_image = image.copy()\n image = torch.from_numpy(image)[None, ...].permute(0, 3, 1, 2)\n image = image / 255 # [0, 1]\n image = image * 2 - 1 # [-1, 1]\n image = image.to(device=device, dtype=self.vae.dtype)\n\n if isinstance(generator, list):\n image_latent = [\n self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size)\n ]\n image_latent = torch.cat(image_latent, dim=0)\n else:\n image_latent = self.vae.encode(image).latent_dist.sample(generator)\n\n image_latent = image_latent.to(device=device, dtype=self.unet.dtype)\n image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]])\n image_latent_padding = image_latent.clone() * 0.18215\n mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device=device, dtype=self.unet.dtype)\n\n # prepare mask\n mask_coef = prepare_mask_coef_by_statistics(video_length, cond_frame, mask_sim_template_idx)\n\n masked_image = torch.zeros(shape[0], 4, shape[2], shape[3], shape[4]).to(device=device, dtype=self.unet.dtype)\n for f in range(video_length):\n mask[:,:,f,:,:] = mask_coef[f]\n masked_image[:,:,f,:,:] = image_latent_padding.clone()\n\n # Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask\n masked_image = torch.cat([masked_image] * 2) if do_classifier_free_guidance else masked_image\n # Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n\n # prepare for ip-adapter\n if self.use_ip_adapter:\n image_embeds, neg_image_embeds = self.encode_image(raw_image, device, num_videos_per_prompt)\n image_embeds = torch.cat([neg_image_embeds, image_embeds])\n image_embeds = image_embeds.to(device=device, dtype=self.unet.dtype)\n\n self.set_ip_adapter_scale(ip_adapter_scale)\n print(f'Set IP-Adapter Scale as {ip_adapter_scale}')\n\n else:\n\n image_embeds = None\n\n # prepare for latents if strength < 1, add convert gaussian latent to masked_img and add noise\n if strength < 1:\n noise = torch.randn_like(latents)\n latents = self.scheduler.add_noise(masked_image[0], noise, timesteps[0])\n print(latents.shape)\n\n if progress_fn is None:\n progress_bar = tqdm(timesteps)\n terminal_pbar = None\n else:\n progress_bar = progress_fn.tqdm(timesteps)\n terminal_pbar = tqdm(total=len(timesteps))\n\n # with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(progress_bar):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n mask,\n masked_image,\n t,\n encoder_hidden_states=text_embeddings,\n image_embeds=image_embeds\n )['sample']\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n if terminal_pbar is not None:\n terminal_pbar.update(1)\n\n # Post-processing\n video = self.decode_latents(latents.to(device, dtype=self.vae.dtype))\n\n # Convert to tensor\n if output_type == \"tensor\":\n video = torch.from_numpy(video)\n\n if not return_dict:\n return video\n\n return AnimationPipelineOutput(videos=video)"
},
{
"identifier": "save_videos_grid",
"path": "animatediff/utils/util.py",
"snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = torch.clamp((x * 255), 0, 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)"
}
] | import os
import os.path as osp
import numpy as np
import torch
from glob import glob
from omegaconf import OmegaConf
from PIL import Image
from cog import BasePredictor, Input, Path
from animatediff.pipelines import I2VPipeline
from animatediff.utils.util import save_videos_grid | 8,363 | # Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md
N_PROMPT = (
"wrong white balance, dark, sketches,worst quality,low quality, "
"deformed, distorted, disfigured, bad eyes, wrong lips, "
"weird mouth, bad teeth, mutated hands and fingers, bad anatomy,"
"wrong anatomy, amputation, extra limb, missing limb, "
"floating,limbs, disconnected limbs, mutation, ugly, disgusting, "
"bad_pictures, negative_hand-neg"
)
BASE_CONFIG = "example/config/base.yaml"
STYLE_CONFIG_LIST = {
"realistic": "example/replicate/1-realistic.yaml",
"3d_cartoon": "example/replicate/3-3d.yaml",
}
PIA_PATH = "models/PIA"
VAE_PATH = "models/VAE"
DreamBooth_LoRA_PATH = "models/DreamBooth_LoRA"
STABLE_DIFFUSION_PATH = "models/StableDiffusion"
class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
self.ip_adapter_dir = (
"models/IP_Adapter/h94/IP-Adapter/models" # cached h94/IP-Adapter
)
self.inference_config = OmegaConf.load("example/config/base.yaml")
self.stable_diffusion_dir = self.inference_config.pretrained_model_path
self.pia_path = self.inference_config.generate.model_path
self.style_configs = {
k: OmegaConf.load(v) for k, v in STYLE_CONFIG_LIST.items()
}
self.pipeline_dict = self.load_model_list()
def load_model_list(self):
pipeline_dict = dict()
for style, cfg in self.style_configs.items():
print(f"Loading {style}")
dreambooth_path = cfg.get("dreambooth", "none")
if dreambooth_path and dreambooth_path.upper() != "NONE":
dreambooth_path = osp.join(DreamBooth_LoRA_PATH, dreambooth_path)
lora_path = cfg.get("lora", None)
if lora_path is not None:
lora_path = osp.join(DreamBooth_LoRA_PATH, lora_path)
lora_alpha = cfg.get("lora_alpha", 0.0)
vae_path = cfg.get("vae", None)
if vae_path is not None:
vae_path = osp.join(VAE_PATH, vae_path)
| # Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md
N_PROMPT = (
"wrong white balance, dark, sketches,worst quality,low quality, "
"deformed, distorted, disfigured, bad eyes, wrong lips, "
"weird mouth, bad teeth, mutated hands and fingers, bad anatomy,"
"wrong anatomy, amputation, extra limb, missing limb, "
"floating,limbs, disconnected limbs, mutation, ugly, disgusting, "
"bad_pictures, negative_hand-neg"
)
BASE_CONFIG = "example/config/base.yaml"
STYLE_CONFIG_LIST = {
"realistic": "example/replicate/1-realistic.yaml",
"3d_cartoon": "example/replicate/3-3d.yaml",
}
PIA_PATH = "models/PIA"
VAE_PATH = "models/VAE"
DreamBooth_LoRA_PATH = "models/DreamBooth_LoRA"
STABLE_DIFFUSION_PATH = "models/StableDiffusion"
class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
self.ip_adapter_dir = (
"models/IP_Adapter/h94/IP-Adapter/models" # cached h94/IP-Adapter
)
self.inference_config = OmegaConf.load("example/config/base.yaml")
self.stable_diffusion_dir = self.inference_config.pretrained_model_path
self.pia_path = self.inference_config.generate.model_path
self.style_configs = {
k: OmegaConf.load(v) for k, v in STYLE_CONFIG_LIST.items()
}
self.pipeline_dict = self.load_model_list()
def load_model_list(self):
pipeline_dict = dict()
for style, cfg in self.style_configs.items():
print(f"Loading {style}")
dreambooth_path = cfg.get("dreambooth", "none")
if dreambooth_path and dreambooth_path.upper() != "NONE":
dreambooth_path = osp.join(DreamBooth_LoRA_PATH, dreambooth_path)
lora_path = cfg.get("lora", None)
if lora_path is not None:
lora_path = osp.join(DreamBooth_LoRA_PATH, lora_path)
lora_alpha = cfg.get("lora_alpha", 0.0)
vae_path = cfg.get("vae", None)
if vae_path is not None:
vae_path = osp.join(VAE_PATH, vae_path)
| pipeline_dict[style] = I2VPipeline.build_pipeline( | 0 | 2023-12-21 03:29:34+00:00 | 12k |
xinghaochen/TinySAM | tinysam/hierarchical_mask_generator.py | [
{
"identifier": "Sam",
"path": "tinysam/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "SamPredictor",
"path": "tinysam/predictor.py",
"snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n #import pdb; pdb.set_trace()\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None"
},
{
"identifier": "MaskData",
"path": "tinysam/utils/amg.py",
"snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()"
},
{
"identifier": "area_from_rle",
"path": "tinysam/utils/amg.py",
"snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])"
},
{
"identifier": "batch_iterator",
"path": "tinysam/utils/amg.py",
"snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]"
},
{
"identifier": "batched_mask_to_box",
"path": "tinysam/utils/amg.py",
"snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out"
},
{
"identifier": "box_xyxy_to_xywh",
"path": "tinysam/utils/amg.py",
"snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh"
},
{
"identifier": "build_all_layer_point_grids",
"path": "tinysam/utils/amg.py",
"snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer"
},
{
"identifier": "calculate_stability_score",
"path": "tinysam/utils/amg.py",
"snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions"
},
{
"identifier": "coco_encode_rle",
"path": "tinysam/utils/amg.py",
"snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle"
},
{
"identifier": "generate_crop_boxes",
"path": "tinysam/utils/amg.py",
"snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs"
},
{
"identifier": "is_box_near_crop_edge",
"path": "tinysam/utils/amg.py",
"snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)"
},
{
"identifier": "mask_to_rle_pytorch",
"path": "tinysam/utils/amg.py",
"snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out"
},
{
"identifier": "remove_small_regions",
"path": "tinysam/utils/amg.py",
"snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True"
},
{
"identifier": "rle_to_mask",
"path": "tinysam/utils/amg.py",
"snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order"
},
{
"identifier": "uncrop_boxes_xyxy",
"path": "tinysam/utils/amg.py",
"snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset"
},
{
"identifier": "uncrop_masks",
"path": "tinysam/utils/amg.py",
"snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)"
},
{
"identifier": "uncrop_points",
"path": "tinysam/utils/amg.py",
"snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset"
}
] | import numpy as np
import torch
import cv2 # type: ignore # noqa: F401
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from typing import Any, Dict, List, Optional, Tuple
from .modeling import Sam
from .predictor import SamPredictor
from .utils.amg import (
MaskData,
area_from_rle,
batch_iterator,
batched_mask_to_box,
box_xyxy_to_xywh,
build_all_layer_point_grids,
calculate_stability_score,
coco_encode_rle,
generate_crop_boxes,
is_box_near_crop_edge,
mask_to_rle_pytorch,
remove_small_regions,
rle_to_mask,
uncrop_boxes_xyxy,
uncrop_masks,
uncrop_points,
)
from pycocotools import mask as mask_utils # type: ignore # noqa: F401 | 9,084 | # Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
class SamHierarchicalMaskGenerator:
def __init__(
self,
model: Sam,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
high_score_thresh: float = 8.5,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
min_mask_region_area: int = 0,
output_mode: str = "binary_mask",
) -> None:
"""
Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
high_score_thresh (float): A filtering threshold in [-inf,inf], to find out
the unmasked area for the next generation.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
| # Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
class SamHierarchicalMaskGenerator:
def __init__(
self,
model: Sam,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
high_score_thresh: float = 8.5,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
min_mask_region_area: int = 0,
output_mode: str = "binary_mask",
) -> None:
"""
Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
high_score_thresh (float): A filtering threshold in [-inf,inf], to find out
the unmasked area for the next generation.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None: | self.point_grids = build_all_layer_point_grids( | 7 | 2023-12-19 11:25:54+00:00 | 12k |
dcharatan/pixelsplat | src/model/encoder/visualization/encoder_visualizer_epipolar.py | [
{
"identifier": "BatchedViews",
"path": "src/dataset/types.py",
"snippet": "class BatchedViews(TypedDict, total=False):\n extrinsics: Float[Tensor, \"batch _ 4 4\"] # batch view 4 4\n intrinsics: Float[Tensor, \"batch _ 3 3\"] # batch view 3 3\n image: Float[Tensor, \"batch _ _ _ _\"] # batch view channel height width\n near: Float[Tensor, \"batch _\"] # batch view\n far: Float[Tensor, \"batch _\"] # batch view\n index: Int64[Tensor, \"batch _\"] # batch view"
},
{
"identifier": "generate_heterogeneous_index",
"path": "src/misc/heterogeneous_pairings.py",
"snippet": "def generate_heterogeneous_index(\n n: int,\n device: torch.device = torch.device(\"cpu\"),\n) -> tuple[Index, Index]:\n \"\"\"Generate indices for all pairs except self-pairs.\"\"\"\n arange = torch.arange(n, device=device)\n\n # Generate an index that represents the item itself.\n index_self = repeat(arange, \"h -> h w\", w=n - 1)\n\n # Generate an index that represents the other items.\n index_other = repeat(arange, \"w -> h w\", h=n).clone()\n index_other += torch.ones((n, n), device=device, dtype=torch.int64).triu()\n index_other = index_other[:, :-1]\n\n return index_self, index_other"
},
{
"identifier": "add_label",
"path": "src/visualization/annotation.py",
"snippet": "def add_label(\n image: Float[Tensor, \"3 width height\"],\n label: str,\n font: Path = Path(\"assets/Inter-Regular.otf\"),\n font_size: int = 24,\n) -> Float[Tensor, \"3 width_with_label height_with_label\"]:\n return vcat(\n draw_label(label, font, font_size, image.device),\n image,\n align=\"left\",\n gap=4,\n )"
},
{
"identifier": "apply_color_map",
"path": "src/visualization/color_map.py",
"snippet": "def apply_color_map(\n x: Float[Tensor, \" *batch\"],\n color_map: str = \"inferno\",\n) -> Float[Tensor, \"*batch 3\"]:\n cmap = cm.get_cmap(color_map)\n\n # Convert to NumPy so that Matplotlib color maps can be used.\n mapped = cmap(x.detach().clip(min=0, max=1).cpu().numpy())[..., :3]\n\n # Convert back to the original format.\n return torch.tensor(mapped, device=x.device, dtype=torch.float32)"
},
{
"identifier": "apply_color_map_to_image",
"path": "src/visualization/color_map.py",
"snippet": "def apply_color_map_to_image(\n image: Float[Tensor, \"*batch height width\"],\n color_map: str = \"inferno\",\n) -> Float[Tensor, \"*batch 3 height with\"]:\n image = apply_color_map(image, color_map)\n return rearrange(image, \"... h w c -> ... c h w\")"
},
{
"identifier": "get_distinct_color",
"path": "src/visualization/colors.py",
"snippet": "def get_distinct_color(index: int) -> tuple[float, float, float]:\n hex = DISTINCT_COLORS[index % len(DISTINCT_COLORS)]\n return tuple(x / 255 for x in ImageColor.getcolor(hex, \"RGB\"))"
},
{
"identifier": "draw_lines",
"path": "src/visualization/drawing/lines.py",
"snippet": "def draw_lines(\n image: Float[Tensor, \"3 height width\"],\n start: Vector,\n end: Vector,\n color: Vector,\n width: Scalar,\n cap: Literal[\"butt\", \"round\", \"square\"] = \"round\",\n num_msaa_passes: int = 1,\n x_range: Optional[Pair] = None,\n y_range: Optional[Pair] = None,\n) -> Float[Tensor, \"3 height width\"]:\n device = image.device\n start = sanitize_vector(start, 2, device)\n end = sanitize_vector(end, 2, device)\n color = sanitize_vector(color, 3, device)\n width = sanitize_scalar(width, device)\n (num_lines,) = torch.broadcast_shapes(\n start.shape[0],\n end.shape[0],\n color.shape[0],\n width.shape,\n )\n\n # Convert world-space points to pixel space.\n _, h, w = image.shape\n world_to_pixel, _ = generate_conversions((h, w), device, x_range, y_range)\n start = world_to_pixel(start)\n end = world_to_pixel(end)\n\n def color_function(\n xy: Float[Tensor, \"point 2\"],\n ) -> Float[Tensor, \"point 4\"]:\n # Define a vector between the start and end points.\n delta = end - start\n delta_norm = delta.norm(dim=-1, keepdim=True)\n u_delta = delta / delta_norm\n\n # Define a vector between each sample and the start point.\n indicator = xy - start[:, None]\n\n # Determine whether each sample is inside the line in the parallel direction.\n extra = 0.5 * width[:, None] if cap == \"square\" else 0\n parallel = einsum(u_delta, indicator, \"l xy, l s xy -> l s\")\n parallel_inside_line = (parallel <= delta_norm + extra) & (parallel > -extra)\n\n # Determine whether each sample is inside the line perpendicularly.\n perpendicular = indicator - parallel[..., None] * u_delta[:, None]\n perpendicular_inside_line = perpendicular.norm(dim=-1) < 0.5 * width[:, None]\n\n inside_line = parallel_inside_line & perpendicular_inside_line\n\n # Compute round caps.\n if cap == \"round\":\n near_start = indicator.norm(dim=-1) < 0.5 * width[:, None]\n inside_line |= near_start\n end_indicator = indicator = xy - end[:, None]\n near_end = end_indicator.norm(dim=-1) < 0.5 * width[:, None]\n inside_line |= near_end\n\n # Determine the sample's color.\n selectable_color = color.broadcast_to((num_lines, 3))\n arrangement = inside_line * torch.arange(num_lines, device=device)[:, None]\n top_color = selectable_color.gather(\n dim=0,\n index=repeat(arrangement.argmax(dim=0), \"s -> s c\", c=3),\n )\n rgba = torch.cat((top_color, inside_line.any(dim=0).float()[:, None]), dim=-1)\n\n return rgba\n\n return render_over_image(image, color_function, device, num_passes=num_msaa_passes)"
},
{
"identifier": "draw_points",
"path": "src/visualization/drawing/points.py",
"snippet": "def draw_points(\n image: Float[Tensor, \"3 height width\"],\n points: Vector,\n color: Vector = [1, 1, 1],\n radius: Scalar = 1,\n inner_radius: Scalar = 0,\n num_msaa_passes: int = 1,\n x_range: Optional[Pair] = None,\n y_range: Optional[Pair] = None,\n) -> Float[Tensor, \"3 height width\"]:\n device = image.device\n points = sanitize_vector(points, 2, device)\n color = sanitize_vector(color, 3, device)\n radius = sanitize_scalar(radius, device)\n inner_radius = sanitize_scalar(inner_radius, device)\n (num_points,) = torch.broadcast_shapes(\n points.shape[0],\n color.shape[0],\n radius.shape,\n inner_radius.shape,\n )\n\n # Convert world-space points to pixel space.\n _, h, w = image.shape\n world_to_pixel, _ = generate_conversions((h, w), device, x_range, y_range)\n points = world_to_pixel(points)\n\n def color_function(\n xy: Float[Tensor, \"point 2\"],\n ) -> Float[Tensor, \"point 4\"]:\n # Define a vector between the start and end points.\n delta = xy[:, None] - points[None]\n delta_norm = delta.norm(dim=-1)\n mask = (delta_norm >= inner_radius[None]) & (delta_norm <= radius[None])\n\n # Determine the sample's color.\n selectable_color = color.broadcast_to((num_points, 3))\n arrangement = mask * torch.arange(num_points, device=device)\n top_color = selectable_color.gather(\n dim=0,\n index=repeat(arrangement.argmax(dim=1), \"s -> s c\", c=3),\n )\n rgba = torch.cat((top_color, mask.any(dim=1).float()[:, None]), dim=-1)\n\n return rgba\n\n return render_over_image(image, color_function, device, num_passes=num_msaa_passes)"
},
{
"identifier": "add_border",
"path": "src/visualization/layout.py",
"snippet": "def add_border(\n image: Float[Tensor, \"channel height width\"],\n border: int = 8,\n color: Color = 1,\n) -> Float[Tensor, \"channel new_height new_width\"]:\n color = _sanitize_color(color).to(image)\n c, h, w = image.shape\n result = torch.empty(\n (c, h + 2 * border, w + 2 * border), dtype=torch.float32, device=image.device\n )\n result[:] = color[:, None, None]\n result[:, border : h + border, border : w + border] = image\n return result"
},
{
"identifier": "hcat",
"path": "src/visualization/layout.py",
"snippet": "def hcat(\n *images: Iterable[Float[Tensor, \"channel _ _\"]],\n align: Literal[\"start\", \"center\", \"end\", \"top\", \"bottom\"] = \"start\",\n gap: int = 8,\n gap_color: Color = 1,\n):\n \"\"\"Shorthand for a horizontal linear concatenation.\"\"\"\n return cat(\n \"horizontal\",\n *images,\n align={\n \"start\": \"start\",\n \"center\": \"center\",\n \"end\": \"end\",\n \"top\": \"start\",\n \"bottom\": \"end\",\n }[align],\n gap=gap,\n gap_color=gap_color,\n )"
},
{
"identifier": "vcat",
"path": "src/visualization/layout.py",
"snippet": "def vcat(\n *images: Iterable[Float[Tensor, \"channel _ _\"]],\n align: Literal[\"start\", \"center\", \"end\", \"left\", \"right\"] = \"start\",\n gap: int = 8,\n gap_color: Color = 1,\n):\n \"\"\"Shorthand for a horizontal linear concatenation.\"\"\"\n return cat(\n \"vertical\",\n *images,\n align={\n \"start\": \"start\",\n \"center\": \"center\",\n \"end\": \"end\",\n \"left\": \"start\",\n \"right\": \"end\",\n }[align],\n gap=gap,\n gap_color=gap_color,\n )"
},
{
"identifier": "export_ply",
"path": "src/model/ply_export.py",
"snippet": "def export_ply(\n extrinsics: Float[Tensor, \"4 4\"],\n means: Float[Tensor, \"gaussian 3\"],\n scales: Float[Tensor, \"gaussian 3\"],\n rotations: Float[Tensor, \"gaussian 4\"],\n harmonics: Float[Tensor, \"gaussian 3 d_sh\"],\n opacities: Float[Tensor, \" gaussian\"],\n path: Path,\n):\n # Shift the scene so that the median Gaussian is at the origin.\n means = means - means.median(dim=0).values\n\n # Rescale the scene so that most Gaussians are within range [-1, 1].\n scale_factor = means.abs().quantile(0.95, dim=0).max()\n means = means / scale_factor\n scales = scales / scale_factor\n\n # Define a rotation that makes +Z be the world up vector.\n rotation = [\n [0, 0, 1],\n [-1, 0, 0],\n [0, -1, 0],\n ]\n rotation = torch.tensor(rotation, dtype=torch.float32, device=means.device)\n\n # The Polycam viewer seems to start at a 45 degree angle. Since we want to be\n # looking directly at the object, we compose a 45 degree rotation onto the above\n # rotation.\n adjustment = torch.tensor(\n R.from_rotvec([0, 0, -45], True).as_matrix(),\n dtype=torch.float32,\n device=means.device,\n )\n rotation = adjustment @ rotation\n\n # We also want to see the scene in camera space (as the default view). We therefore\n # compose the w2c rotation onto the above rotation.\n rotation = rotation @ extrinsics[:3, :3].inverse()\n\n # Apply the rotation to the means (Gaussian positions).\n means = einsum(rotation, means, \"i j, ... j -> ... i\")\n\n # Apply the rotation to the Gaussian rotations.\n rotations = R.from_quat(rotations.detach().cpu().numpy()).as_matrix()\n rotations = rotation.detach().cpu().numpy() @ rotations\n rotations = R.from_matrix(rotations).as_quat()\n x, y, z, w = rearrange(rotations, \"g xyzw -> xyzw g\")\n rotations = np.stack((w, x, y, z), axis=-1)\n\n # Since our axes are swizzled for the spherical harmonics, we only export the DC\n # band.\n harmonics_view_invariant = harmonics[..., 0]\n\n dtype_full = [(attribute, \"f4\") for attribute in construct_list_of_attributes(0)]\n elements = np.empty(means.shape[0], dtype=dtype_full)\n attributes = (\n means.detach().cpu().numpy(),\n torch.zeros_like(means).detach().cpu().numpy(),\n harmonics_view_invariant.detach().cpu().contiguous().numpy(),\n opacities[..., None].detach().cpu().numpy(),\n scales.log().detach().cpu().numpy(),\n rotations,\n )\n attributes = np.concatenate(attributes, axis=1)\n elements[:] = list(map(tuple, attributes))\n path.parent.mkdir(exist_ok=True, parents=True)\n PlyData([PlyElement.describe(elements, \"vertex\")]).write(path)"
},
{
"identifier": "EncoderEpipolar",
"path": "src/model/encoder/encoder_epipolar.py",
"snippet": "class EncoderEpipolar(Encoder[EncoderEpipolarCfg]):\n backbone: Backbone\n backbone_projection: nn.Sequential\n epipolar_transformer: EpipolarTransformer | None\n depth_predictor: DepthPredictorMonocular\n to_gaussians: nn.Sequential\n gaussian_adapter: GaussianAdapter\n high_resolution_skip: nn.Sequential\n\n def __init__(self, cfg: EncoderEpipolarCfg) -> None:\n super().__init__(cfg)\n\n self.backbone = get_backbone(cfg.backbone, 3)\n self.backbone_projection = nn.Sequential(\n nn.ReLU(),\n nn.Linear(self.backbone.d_out, cfg.d_feature),\n )\n if cfg.use_epipolar_transformer:\n self.epipolar_transformer = EpipolarTransformer(\n cfg.epipolar_transformer,\n cfg.d_feature,\n )\n else:\n self.epipolar_transformer = None\n self.depth_predictor = DepthPredictorMonocular(\n cfg.d_feature,\n cfg.num_monocular_samples,\n cfg.num_surfaces,\n cfg.use_transmittance,\n )\n self.gaussian_adapter = GaussianAdapter(cfg.gaussian_adapter)\n if cfg.predict_opacity:\n self.to_opacity = nn.Sequential(\n nn.ReLU(),\n nn.Linear(cfg.d_feature, 1),\n nn.Sigmoid(),\n )\n self.to_gaussians = nn.Sequential(\n nn.ReLU(),\n nn.Linear(\n cfg.d_feature,\n cfg.num_surfaces * (2 + self.gaussian_adapter.d_in),\n ),\n )\n self.high_resolution_skip = nn.Sequential(\n nn.Conv2d(3, cfg.d_feature, 7, 1, 3),\n nn.ReLU(),\n )\n\n def map_pdf_to_opacity(\n self,\n pdf: Float[Tensor, \" *batch\"],\n global_step: int,\n ) -> Float[Tensor, \" *batch\"]:\n # https://www.desmos.com/calculator/opvwti3ba9\n\n # Figure out the exponent.\n cfg = self.cfg.opacity_mapping\n x = cfg.initial + min(global_step / cfg.warm_up, 1) * (cfg.final - cfg.initial)\n exponent = 2**x\n\n # Map the probability density to an opacity.\n return 0.5 * (1 - (1 - pdf) ** exponent + pdf ** (1 / exponent))\n\n def forward(\n self,\n context: dict,\n global_step: int,\n deterministic: bool = False,\n visualization_dump: Optional[dict] = None,\n ) -> Gaussians:\n device = context[\"image\"].device\n b, v, _, h, w = context[\"image\"].shape\n\n # Encode the context images.\n features = self.backbone(context)\n features = rearrange(features, \"b v c h w -> b v h w c\")\n features = self.backbone_projection(features)\n features = rearrange(features, \"b v h w c -> b v c h w\")\n\n # Run the epipolar transformer.\n if self.cfg.use_epipolar_transformer:\n features, sampling = self.epipolar_transformer(\n features,\n context[\"extrinsics\"],\n context[\"intrinsics\"],\n context[\"near\"],\n context[\"far\"],\n )\n\n # Add the high-resolution skip connection.\n skip = rearrange(context[\"image\"], \"b v c h w -> (b v) c h w\")\n skip = self.high_resolution_skip(skip)\n features = features + rearrange(skip, \"(b v) c h w -> b v c h w\", b=b, v=v)\n\n # Sample depths from the resulting features.\n features = rearrange(features, \"b v c h w -> b v (h w) c\")\n depths, densities = self.depth_predictor.forward(\n features,\n context[\"near\"],\n context[\"far\"],\n deterministic,\n 1 if deterministic else self.cfg.gaussians_per_pixel,\n )\n\n # Convert the features and depths into Gaussians.\n xy_ray, _ = sample_image_grid((h, w), device)\n xy_ray = rearrange(xy_ray, \"h w xy -> (h w) () xy\")\n gaussians = rearrange(\n self.to_gaussians(features),\n \"... (srf c) -> ... srf c\",\n srf=self.cfg.num_surfaces,\n )\n offset_xy = gaussians[..., :2].sigmoid()\n pixel_size = 1 / torch.tensor((w, h), dtype=torch.float32, device=device)\n xy_ray = xy_ray + (offset_xy - 0.5) * pixel_size\n gpp = self.cfg.gaussians_per_pixel\n gaussians = self.gaussian_adapter.forward(\n rearrange(context[\"extrinsics\"], \"b v i j -> b v () () () i j\"),\n rearrange(context[\"intrinsics\"], \"b v i j -> b v () () () i j\"),\n rearrange(xy_ray, \"b v r srf xy -> b v r srf () xy\"),\n depths,\n self.map_pdf_to_opacity(densities, global_step) / gpp,\n rearrange(gaussians[..., 2:], \"b v r srf c -> b v r srf () c\"),\n (h, w),\n )\n\n # Dump visualizations if needed.\n if visualization_dump is not None:\n visualization_dump[\"depth\"] = rearrange(\n depths, \"b v (h w) srf s -> b v h w srf s\", h=h, w=w\n )\n visualization_dump[\"scales\"] = rearrange(\n gaussians.scales, \"b v r srf spp xyz -> b (v r srf spp) xyz\"\n )\n visualization_dump[\"rotations\"] = rearrange(\n gaussians.rotations, \"b v r srf spp xyzw -> b (v r srf spp) xyzw\"\n )\n if self.cfg.use_epipolar_transformer:\n visualization_dump[\"sampling\"] = sampling\n\n # Optionally apply a per-pixel opacity.\n opacity_multiplier = (\n rearrange(self.to_opacity(features), \"b v r () -> b v r () ()\")\n if self.cfg.predict_opacity\n else 1\n )\n\n return Gaussians(\n rearrange(\n gaussians.means,\n \"b v r srf spp xyz -> b (v r srf spp) xyz\",\n ),\n rearrange(\n gaussians.covariances,\n \"b v r srf spp i j -> b (v r srf spp) i j\",\n ),\n rearrange(\n gaussians.harmonics,\n \"b v r srf spp c d_sh -> b (v r srf spp) c d_sh\",\n ),\n rearrange(\n opacity_multiplier * gaussians.opacities,\n \"b v r srf spp -> b (v r srf spp)\",\n ),\n )\n\n def get_data_shim(self) -> DataShim:\n def data_shim(batch: BatchedExample) -> BatchedExample:\n batch = apply_patch_shim(\n batch,\n patch_size=self.cfg.epipolar_transformer.self_attention.patch_size\n * self.cfg.epipolar_transformer.downscale,\n )\n\n if self.cfg.apply_bounds_shim:\n _, _, _, h, w = batch[\"context\"][\"image\"].shape\n near_disparity = self.cfg.near_disparity * min(h, w)\n batch = apply_bounds_shim(batch, near_disparity, 0.5)\n\n return batch\n\n return data_shim\n\n @property\n def sampler(self):\n # hack to make the visualizer work\n return self.epipolar_transformer.epipolar_sampler"
},
{
"identifier": "EpipolarSampling",
"path": "src/model/encoder/epipolar/epipolar_sampler.py",
"snippet": "class EpipolarSampling:\n features: Float[Tensor, \"batch view other_view ray sample channel\"]\n valid: Bool[Tensor, \"batch view other_view ray\"]\n xy_ray: Float[Tensor, \"batch view ray 2\"]\n xy_sample: Float[Tensor, \"batch view other_view ray sample 2\"]\n xy_sample_near: Float[Tensor, \"batch view other_view ray sample 2\"]\n xy_sample_far: Float[Tensor, \"batch view other_view ray sample 2\"]\n origins: Float[Tensor, \"batch view ray 3\"]\n directions: Float[Tensor, \"batch view ray 3\"]"
},
{
"identifier": "EncoderVisualizer",
"path": "src/model/encoder/visualization/encoder_visualizer.py",
"snippet": "class EncoderVisualizer(ABC, Generic[T_cfg, T_encoder]):\n cfg: T_cfg\n encoder: T_encoder\n\n def __init__(self, cfg: T_cfg, encoder: T_encoder) -> None:\n self.cfg = cfg\n self.encoder = encoder\n\n @abstractmethod\n def visualize(\n self,\n context: dict,\n global_step: int,\n ) -> dict[str, Float[Tensor, \"3 _ _\"]]:\n pass"
},
{
"identifier": "EncoderVisualizerEpipolarCfg",
"path": "src/model/encoder/visualization/encoder_visualizer_epipolar_cfg.py",
"snippet": "class EncoderVisualizerEpipolarCfg:\n num_samples: int\n min_resolution: int\n export_ply: bool"
}
] | from pathlib import Path
from random import randrange
from typing import Optional
from einops import rearrange, reduce, repeat
from jaxtyping import Bool, Float
from torch import Tensor
from ....dataset.types import BatchedViews
from ....misc.heterogeneous_pairings import generate_heterogeneous_index
from ....visualization.annotation import add_label
from ....visualization.color_map import apply_color_map, apply_color_map_to_image
from ....visualization.colors import get_distinct_color
from ....visualization.drawing.lines import draw_lines
from ....visualization.drawing.points import draw_points
from ....visualization.layout import add_border, hcat, vcat
from ...ply_export import export_ply
from ..encoder_epipolar import EncoderEpipolar
from ..epipolar.epipolar_sampler import EpipolarSampling
from .encoder_visualizer import EncoderVisualizer
from .encoder_visualizer_epipolar_cfg import EncoderVisualizerEpipolarCfg
import numpy as np
import torch
import wandb | 7,992 | attention, "l (b v r) hd () s -> l b v r hd s", b=b, v=v, r=r
)
attention = attention[:, rb, rv, rr, :, :]
num_layers, _, hd, _ = attention.shape
vis = []
for il in range(num_layers):
vis_layer = []
for ihd in range(hd):
# Create colors according to attention.
color = [get_distinct_color(i) for i, _ in enumerate(rr)]
color = torch.tensor(color, device=attention.device)
color = rearrange(color, "r c -> r () c")
attn = rearrange(attention[il, :, ihd], "r s -> r s ()")
color = rearrange(attn * color, "r s c -> (r s ) c")
# Draw the alternating bucket lines.
vis_layer_head = draw_lines(
context_images[rb, self.encoder.sampler.index_v[rv, rov]],
rearrange(
sampling.xy_sample_near[rb, rv, rov, rr], "r s xy -> (r s) xy"
),
rearrange(
sampling.xy_sample_far[rb, rv, rov, rr], "r s xy -> (r s) xy"
),
color,
3,
cap="butt",
x_range=(0, 1),
y_range=(0, 1),
)
vis_layer.append(vis_layer_head)
vis.append(add_label(vcat(*vis_layer), f"Layer {il}"))
vis = add_label(add_border(add_border(hcat(*vis)), 1, 0), "Keys & Values")
vis = add_border(hcat(add_label(ray_view), vis, align="top"))
return vis
def visualize_depth(
self,
context: BatchedViews,
multi_depth: Float[Tensor, "batch view height width surface spp"],
) -> Float[Tensor, "3 vis_width vis_height"]:
multi_vis = []
*_, srf, _ = multi_depth.shape
for i in range(srf):
depth = multi_depth[..., i, :]
depth = depth.mean(dim=-1)
# Compute relative depth and disparity.
near = rearrange(context["near"], "b v -> b v () ()")
far = rearrange(context["far"], "b v -> b v () ()")
relative_depth = (depth - near) / (far - near)
relative_disparity = 1 - (1 / depth - 1 / far) / (1 / near - 1 / far)
relative_depth = apply_color_map_to_image(relative_depth, "turbo")
relative_depth = vcat(*[hcat(*x) for x in relative_depth])
relative_depth = add_label(relative_depth, "Depth")
relative_disparity = apply_color_map_to_image(relative_disparity, "turbo")
relative_disparity = vcat(*[hcat(*x) for x in relative_disparity])
relative_disparity = add_label(relative_disparity, "Disparity")
multi_vis.append(add_border(hcat(relative_depth, relative_disparity)))
return add_border(vcat(*multi_vis))
def visualize_overlaps(
self,
context_images: Float[Tensor, "batch view 3 height width"],
sampling: EpipolarSampling,
is_monocular: Optional[Bool[Tensor, "batch view height width"]] = None,
) -> Float[Tensor, "3 vis_width vis_height"]:
device = context_images.device
b, v, _, h, w = context_images.shape
green = torch.tensor([0.235, 0.706, 0.294], device=device)[..., None, None]
rb = randrange(b)
valid = sampling.valid[rb].float()
ds = self.encoder.cfg.epipolar_transformer.downscale
valid = repeat(
valid,
"v ov (h w) -> v ov c (h rh) (w rw)",
c=3,
h=h // ds,
w=w // ds,
rh=ds,
rw=ds,
)
if is_monocular is not None:
is_monocular = is_monocular[rb].float()
is_monocular = repeat(is_monocular, "v h w -> v c h w", c=3, h=h, w=w)
# Select context images in grid.
context_images = context_images[rb]
index, _ = generate_heterogeneous_index(v)
valid = valid * (green + context_images[index]) / 2
vis = vcat(*(hcat(im, hcat(*v)) for im, v in zip(context_images, valid)))
vis = add_label(vis, "Context Overlaps")
if is_monocular is not None:
vis = hcat(vis, add_label(vcat(*is_monocular), "Monocular?"))
return add_border(vis)
def visualize_gaussians(
self,
context_images: Float[Tensor, "batch view 3 height width"],
opacities: Float[Tensor, "batch vrspp"],
covariances: Float[Tensor, "batch vrspp 3 3"],
colors: Float[Tensor, "batch vrspp 3"],
) -> Float[Tensor, "3 vis_height vis_width"]:
b, v, _, h, w = context_images.shape
rb = randrange(b)
context_images = context_images[rb]
opacities = repeat(
opacities[rb], "(v h w spp) -> spp v c h w", v=v, c=3, h=h, w=w
)
colors = rearrange(colors[rb], "(v h w spp) c -> spp v c h w", v=v, h=h, w=w)
# Color-map Gaussian covariawnces.
det = covariances[rb].det()
|
def box(
image: Float[Tensor, "3 height width"],
) -> Float[Tensor, "3 new_height new_width"]:
return add_border(add_border(image), 1, 0)
class EncoderVisualizerEpipolar(
EncoderVisualizer[EncoderVisualizerEpipolarCfg, EncoderEpipolar]
):
def visualize(
self,
context: BatchedViews,
global_step: int,
) -> dict[str, Float[Tensor, "3 _ _"]]:
# Short-circuit execution when ablating the epipolar transformer.
if self.encoder.epipolar_transformer is None:
return {}
visualization_dump = {}
softmax_weights = []
def hook(module, input, output):
softmax_weights.append(output)
# Register hooks to grab attention.
handles = [
layer[0].fn.attend.register_forward_hook(hook)
for layer in self.encoder.epipolar_transformer.transformer.layers
]
result = self.encoder.forward(
context,
global_step,
visualization_dump=visualization_dump,
deterministic=True,
)
# De-register hooks.
for handle in handles:
handle.remove()
softmax_weights = torch.stack(softmax_weights)
# Generate high-resolution context images that can be drawn on.
context_images = context["image"]
_, _, _, h, w = context_images.shape
length = min(h, w)
min_resolution = self.cfg.min_resolution
scale_multiplier = (min_resolution + length - 1) // length
if scale_multiplier > 1:
context_images = repeat(
context_images,
"b v c h w -> b v c (h rh) (w rw)",
rh=scale_multiplier,
rw=scale_multiplier,
)
# This is kind of hacky for now, since we're using it for short experiments.
if self.cfg.export_ply and wandb.run is not None:
name = wandb.run._name.split(" ")[0]
ply_path = Path(f"outputs/gaussians/{name}/{global_step:0>6}.ply")
export_ply(
context["extrinsics"][0, 0],
result.means[0],
visualization_dump["scales"][0],
visualization_dump["rotations"][0],
result.harmonics[0],
result.opacities[0],
ply_path,
)
return {
# "attention": self.visualize_attention(
# context_images,
# visualization_dump["sampling"],
# softmax_weights,
# ),
"epipolar_samples": self.visualize_epipolar_samples(
context_images,
visualization_dump["sampling"],
),
"epipolar_color_samples": self.visualize_epipolar_color_samples(
context_images,
context,
),
"gaussians": self.visualize_gaussians(
context["image"],
result.opacities,
result.covariances,
result.harmonics[..., 0], # Just visualize DC component.
),
"overlaps": self.visualize_overlaps(
context["image"],
visualization_dump["sampling"],
visualization_dump.get("is_monocular", None),
),
"depth": self.visualize_depth(
context,
visualization_dump["depth"],
),
}
def visualize_attention(
self,
context_images: Float[Tensor, "batch view 3 height width"],
sampling: EpipolarSampling,
attention: Float[Tensor, "layer bvr head 1 sample"],
) -> Float[Tensor, "3 vis_height vis_width"]:
device = context_images.device
# Pick a random batch element, view, and other view.
b, v, ov, r, s, _ = sampling.xy_sample.shape
rb = randrange(b)
rv = randrange(v)
rov = randrange(ov)
num_samples = self.cfg.num_samples
rr = np.random.choice(r, num_samples, replace=False)
rr = torch.tensor(rr, dtype=torch.int64, device=device)
# Visualize the rays in the ray view.
ray_view = draw_points(
context_images[rb, rv],
sampling.xy_ray[rb, rv, rr],
0,
radius=4,
x_range=(0, 1),
y_range=(0, 1),
)
ray_view = draw_points(
ray_view,
sampling.xy_ray[rb, rv, rr],
[get_distinct_color(i) for i, _ in enumerate(rr)],
radius=3,
x_range=(0, 1),
y_range=(0, 1),
)
# Visualize attention in the sample view.
attention = rearrange(
attention, "l (b v r) hd () s -> l b v r hd s", b=b, v=v, r=r
)
attention = attention[:, rb, rv, rr, :, :]
num_layers, _, hd, _ = attention.shape
vis = []
for il in range(num_layers):
vis_layer = []
for ihd in range(hd):
# Create colors according to attention.
color = [get_distinct_color(i) for i, _ in enumerate(rr)]
color = torch.tensor(color, device=attention.device)
color = rearrange(color, "r c -> r () c")
attn = rearrange(attention[il, :, ihd], "r s -> r s ()")
color = rearrange(attn * color, "r s c -> (r s ) c")
# Draw the alternating bucket lines.
vis_layer_head = draw_lines(
context_images[rb, self.encoder.sampler.index_v[rv, rov]],
rearrange(
sampling.xy_sample_near[rb, rv, rov, rr], "r s xy -> (r s) xy"
),
rearrange(
sampling.xy_sample_far[rb, rv, rov, rr], "r s xy -> (r s) xy"
),
color,
3,
cap="butt",
x_range=(0, 1),
y_range=(0, 1),
)
vis_layer.append(vis_layer_head)
vis.append(add_label(vcat(*vis_layer), f"Layer {il}"))
vis = add_label(add_border(add_border(hcat(*vis)), 1, 0), "Keys & Values")
vis = add_border(hcat(add_label(ray_view), vis, align="top"))
return vis
def visualize_depth(
self,
context: BatchedViews,
multi_depth: Float[Tensor, "batch view height width surface spp"],
) -> Float[Tensor, "3 vis_width vis_height"]:
multi_vis = []
*_, srf, _ = multi_depth.shape
for i in range(srf):
depth = multi_depth[..., i, :]
depth = depth.mean(dim=-1)
# Compute relative depth and disparity.
near = rearrange(context["near"], "b v -> b v () ()")
far = rearrange(context["far"], "b v -> b v () ()")
relative_depth = (depth - near) / (far - near)
relative_disparity = 1 - (1 / depth - 1 / far) / (1 / near - 1 / far)
relative_depth = apply_color_map_to_image(relative_depth, "turbo")
relative_depth = vcat(*[hcat(*x) for x in relative_depth])
relative_depth = add_label(relative_depth, "Depth")
relative_disparity = apply_color_map_to_image(relative_disparity, "turbo")
relative_disparity = vcat(*[hcat(*x) for x in relative_disparity])
relative_disparity = add_label(relative_disparity, "Disparity")
multi_vis.append(add_border(hcat(relative_depth, relative_disparity)))
return add_border(vcat(*multi_vis))
def visualize_overlaps(
self,
context_images: Float[Tensor, "batch view 3 height width"],
sampling: EpipolarSampling,
is_monocular: Optional[Bool[Tensor, "batch view height width"]] = None,
) -> Float[Tensor, "3 vis_width vis_height"]:
device = context_images.device
b, v, _, h, w = context_images.shape
green = torch.tensor([0.235, 0.706, 0.294], device=device)[..., None, None]
rb = randrange(b)
valid = sampling.valid[rb].float()
ds = self.encoder.cfg.epipolar_transformer.downscale
valid = repeat(
valid,
"v ov (h w) -> v ov c (h rh) (w rw)",
c=3,
h=h // ds,
w=w // ds,
rh=ds,
rw=ds,
)
if is_monocular is not None:
is_monocular = is_monocular[rb].float()
is_monocular = repeat(is_monocular, "v h w -> v c h w", c=3, h=h, w=w)
# Select context images in grid.
context_images = context_images[rb]
index, _ = generate_heterogeneous_index(v)
valid = valid * (green + context_images[index]) / 2
vis = vcat(*(hcat(im, hcat(*v)) for im, v in zip(context_images, valid)))
vis = add_label(vis, "Context Overlaps")
if is_monocular is not None:
vis = hcat(vis, add_label(vcat(*is_monocular), "Monocular?"))
return add_border(vis)
def visualize_gaussians(
self,
context_images: Float[Tensor, "batch view 3 height width"],
opacities: Float[Tensor, "batch vrspp"],
covariances: Float[Tensor, "batch vrspp 3 3"],
colors: Float[Tensor, "batch vrspp 3"],
) -> Float[Tensor, "3 vis_height vis_width"]:
b, v, _, h, w = context_images.shape
rb = randrange(b)
context_images = context_images[rb]
opacities = repeat(
opacities[rb], "(v h w spp) -> spp v c h w", v=v, c=3, h=h, w=w
)
colors = rearrange(colors[rb], "(v h w spp) c -> spp v c h w", v=v, h=h, w=w)
# Color-map Gaussian covariawnces.
det = covariances[rb].det() | det = apply_color_map(det / det.max(), "inferno") | 3 | 2023-12-20 19:45:59+00:00 | 12k |
hutaiHang/Faster-Diffusion | if_demo.py | [
{
"identifier": "register_if1",
"path": "utils_if.py",
"snippet": "def register_if1(pipe):\r\n def new_call(self):\r\n @torch.no_grad()\r\n def call(\r\n prompt: Union[str, List[str]] = None,\r\n num_inference_steps: int = 100,\r\n timesteps: List[int] = None,\r\n guidance_scale: float = 7.0,\r\n negative_prompt: Optional[Union[str, List[str]]] = None,\r\n num_images_per_prompt: Optional[int] = 1,\r\n height: Optional[int] = None,\r\n width: Optional[int] = None,\r\n eta: float = 0.0,\r\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\r\n prompt_embeds: Optional[torch.FloatTensor] = None,\r\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\r\n output_type: Optional[str] = \"pil\",\r\n return_dict: bool = True,\r\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\r\n callback_steps: int = 1,\r\n clean_caption: bool = True,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n ):\r\n # 1. Check inputs. Raise error if not correct\r\n self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)\r\n\r\n # 2. Define call parameters\r\n height = height or self.unet.config.sample_size\r\n width = width or self.unet.config.sample_size\r\n\r\n if prompt is not None and isinstance(prompt, str):\r\n batch_size = 1\r\n elif prompt is not None and isinstance(prompt, list):\r\n batch_size = len(prompt)\r\n else:\r\n batch_size = prompt_embeds.shape[0]\r\n\r\n device = self._execution_device\r\n\r\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\r\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\r\n # corresponds to doing no classifier free guidance.\r\n do_classifier_free_guidance = guidance_scale > 1.0\r\n\r\n # 3. Encode input prompt\r\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\r\n prompt,\r\n do_classifier_free_guidance,\r\n num_images_per_prompt=num_images_per_prompt,\r\n device=device,\r\n negative_prompt=negative_prompt,\r\n prompt_embeds=prompt_embeds,\r\n negative_prompt_embeds=negative_prompt_embeds,\r\n clean_caption=clean_caption,\r\n )\r\n\r\n if do_classifier_free_guidance:\r\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\r\n\r\n # 4. Prepare timesteps\r\n if timesteps is not None:\r\n self.scheduler.set_timesteps(timesteps=timesteps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n num_inference_steps = len(timesteps)\r\n else:\r\n self.scheduler.set_timesteps(num_inference_steps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n\r\n # 5. Prepare intermediate images\r\n intermediate_images = self.prepare_intermediate_images(\r\n batch_size * num_images_per_prompt,\r\n self.unet.config.in_channels,\r\n height,\r\n width,\r\n prompt_embeds.dtype,\r\n device,\r\n generator,\r\n )\r\n\r\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\r\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\r\n\r\n # HACK: see comment in `enable_model_cpu_offload`\r\n if hasattr(self, \"text_encoder_offload_hook\") and self.text_encoder_offload_hook is not None:\r\n self.text_encoder_offload_hook.offload()\r\n\r\n # 7. Denoising loop\r\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\r\n\r\n all_timesteps = len(timesteps)\r\n curr_step = 0\r\n st = time.time()\r\n while curr_step<all_timesteps:\r\n refister_time(self.unet, curr_step)\r\n\r\n time_ls = []\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n cond = curr_step > 85 or curr_step < 10 or (curr_step % 5 == 0)\r\n \r\n while (not cond) and (curr_step<all_timesteps):\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n cond = curr_step > 85 or curr_step < 10 or (curr_step % 5 == 0)\r\n\r\n # print('curr_step', curr_step, len(time_ls))\r\n model_input = (\r\n torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images\r\n )\r\n\r\n # predict the noise residual\r\n noise_pred = self.unet(\r\n model_input,\r\n time_ls,\r\n encoder_hidden_states=prompt_embeds,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n return_dict=False,\r\n )[0]\r\n\r\n # perform guidance\r\n if do_classifier_free_guidance:\r\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\r\n noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1)\r\n noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1)\r\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\r\n noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)\r\n\r\n if self.scheduler.config.variance_type not in [\"learned\", \"learned_range\"]:\r\n noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1)\r\n\r\n # compute the previous noisy sample x_t -> x_t-1\r\n # intermediate_images = self.scheduler.step(\r\n # noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False\r\n # )[0]\r\n intermediate_images = multistep_pre(\r\n self, noise_pred, time_ls, intermediate_images)\r\n et = time.time()\r\n print('unet time: ', et-st, 'seconds')\r\n image = intermediate_images\r\n\r\n if output_type == \"pil\":\r\n # 8. Post-processing\r\n image = (image / 2 + 0.5).clamp(0, 1)\r\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\r\n\r\n # 9. Run safety checker\r\n image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n\r\n # 10. Convert to PIL\r\n image = self.numpy_to_pil(image)\r\n\r\n # 11. Apply watermark\r\n if self.watermarker is not None:\r\n image = self.watermarker.apply_watermark(image, self.unet.config.sample_size)\r\n elif output_type == \"pt\":\r\n nsfw_detected = None\r\n watermark_detected = None\r\n\r\n if hasattr(self, \"unet_offload_hook\") and self.unet_offload_hook is not None:\r\n self.unet_offload_hook.offload()\r\n else:\r\n # 8. Post-processing\r\n image = (image / 2 + 0.5).clamp(0, 1)\r\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\r\n\r\n # 9. Run safety checker\r\n image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n\r\n # Offload all models\r\n self.maybe_free_model_hooks()\r\n\r\n if not return_dict:\r\n return (image, nsfw_detected, watermark_detected)\r\n\r\n return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)\r\n return call\r\n pipe.call = new_call(pipe)\r"
},
{
"identifier": "register_if2",
"path": "utils_if.py",
"snippet": "def register_if2(pipe):\r\n def new_call(self):\r\n @torch.no_grad()\r\n def call(\r\n prompt: Union[str, List[str]] = None,\r\n height: int = None,\r\n width: int = None,\r\n image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor] = None,\r\n num_inference_steps: int = 50,\r\n timesteps: List[int] = None,\r\n guidance_scale: float = 4.0,\r\n negative_prompt: Optional[Union[str, List[str]]] = None,\r\n num_images_per_prompt: Optional[int] = 1,\r\n eta: float = 0.0,\r\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\r\n prompt_embeds: Optional[torch.FloatTensor] = None,\r\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\r\n output_type: Optional[str] = \"pil\",\r\n return_dict: bool = True,\r\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\r\n callback_steps: int = 1,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n noise_level: int = 250,\r\n clean_caption: bool = True,\r\n ):\r\n # 1. Check inputs. Raise error if not correct\r\n\r\n if prompt is not None and isinstance(prompt, str):\r\n batch_size = 1\r\n elif prompt is not None and isinstance(prompt, list):\r\n batch_size = len(prompt)\r\n else:\r\n batch_size = prompt_embeds.shape[0]\r\n\r\n self.check_inputs(\r\n prompt,\r\n image,\r\n batch_size,\r\n noise_level,\r\n callback_steps,\r\n negative_prompt,\r\n prompt_embeds,\r\n negative_prompt_embeds,\r\n )\r\n\r\n # 2. Define call parameters\r\n\r\n height = height or self.unet.config.sample_size\r\n width = width or self.unet.config.sample_size\r\n\r\n device = self._execution_device\r\n\r\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\r\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\r\n # corresponds to doing no classifier free guidance.\r\n do_classifier_free_guidance = guidance_scale > 1.0\r\n\r\n # 3. Encode input prompt\r\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\r\n prompt,\r\n do_classifier_free_guidance,\r\n num_images_per_prompt=num_images_per_prompt,\r\n device=device,\r\n negative_prompt=negative_prompt,\r\n prompt_embeds=prompt_embeds,\r\n negative_prompt_embeds=negative_prompt_embeds,\r\n clean_caption=clean_caption,\r\n )\r\n\r\n if do_classifier_free_guidance:\r\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\r\n\r\n # 4. Prepare timesteps\r\n if timesteps is not None:\r\n self.scheduler.set_timesteps(timesteps=timesteps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n num_inference_steps = len(timesteps)\r\n else:\r\n self.scheduler.set_timesteps(num_inference_steps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n\r\n # 5. Prepare intermediate images\r\n num_channels = self.unet.config.in_channels // 2\r\n intermediate_images = self.prepare_intermediate_images(\r\n batch_size * num_images_per_prompt,\r\n num_channels,\r\n height,\r\n width,\r\n prompt_embeds.dtype,\r\n device,\r\n generator,\r\n )\r\n\r\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\r\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\r\n\r\n # 7. Prepare upscaled image and noise level\r\n image = self.preprocess_image(image, num_images_per_prompt, device)\r\n upscaled = F.interpolate(image, (height, width), mode=\"bilinear\", align_corners=True)\r\n\r\n noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device)\r\n noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype)\r\n upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level)\r\n\r\n if do_classifier_free_guidance:\r\n noise_level = torch.cat([noise_level] * 2)\r\n\r\n # HACK: see comment in `enable_model_cpu_offload`\r\n if hasattr(self, \"text_encoder_offload_hook\") and self.text_encoder_offload_hook is not None:\r\n self.text_encoder_offload_hook.offload()\r\n\r\n # 8. Denoising loop\r\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\r\n\r\n all_timesteps = len(timesteps)\r\n curr_step = 0\r\n st = time.time()\r\n while curr_step<all_timesteps:\r\n refister_time(self.unet, curr_step)\r\n\r\n time_ls = []\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n cond = curr_step < 20 or curr_step > 40 or (curr_step % 2 == 0)\r\n \r\n while (not cond) and (curr_step<all_timesteps):\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n cond = curr_step < 20 or curr_step > 40 or (curr_step % 2 == 0)\r\n\r\n # print('curr_step', curr_step, len(time_ls))\r\n model_input = torch.cat([intermediate_images, upscaled], dim=1)\r\n\r\n model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input\r\n\r\n # predict the noise residual\r\n noise_pred = self.unet(\r\n model_input,\r\n time_ls,\r\n encoder_hidden_states=prompt_embeds,\r\n class_labels=noise_level,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n return_dict=False,\r\n )[0]\r\n\r\n # perform guidance\r\n if do_classifier_free_guidance:\r\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\r\n noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1)\r\n noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1)\r\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\r\n noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)\r\n\r\n if self.scheduler.config.variance_type not in [\"learned\", \"learned_range\"]:\r\n noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1)\r\n\r\n # # compute the previous noisy sample x_t -> x_t-1\r\n # intermediate_images = self.scheduler.step(\r\n # noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False\r\n # )[0]\r\n\r\n # compute the previous noisy sample x_t -> x_t-1\r\n # intermediate_images = self.scheduler.step(\r\n # noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False\r\n # )[0]\r\n intermediate_images = multistep_pre(\r\n self, noise_pred, time_ls, intermediate_images)\r\n \r\n et = time.time()\r\n print('unet time:', et - st, 'seconds')\r\n image = intermediate_images\r\n\r\n if output_type == \"pil\":\r\n # 9. Post-processing\r\n image = (image / 2 + 0.5).clamp(0, 1)\r\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\r\n\r\n # 10. Run safety checker\r\n image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n\r\n # 11. Convert to PIL\r\n image = self.numpy_to_pil(image)\r\n\r\n # 12. Apply watermark\r\n if self.watermarker is not None:\r\n self.watermarker.apply_watermark(image, self.unet.config.sample_size)\r\n elif output_type == \"pt\":\r\n nsfw_detected = None\r\n watermark_detected = None\r\n\r\n if hasattr(self, \"unet_offload_hook\") and self.unet_offload_hook is not None:\r\n self.unet_offload_hook.offload()\r\n else:\r\n # 9. Post-processing\r\n image = (image / 2 + 0.5).clamp(0, 1)\r\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\r\n\r\n # 10. Run safety checker\r\n image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n\r\n # Offload all models\r\n self.maybe_free_model_hooks()\r\n\r\n if not return_dict:\r\n return (image, nsfw_detected, watermark_detected)\r\n\r\n return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)\r\n\r\n return call\r\n pipe.call = new_call(pipe)\r"
},
{
"identifier": "register_if3",
"path": "utils_if.py",
"snippet": "def register_if3(pipe):\r\n def new_call(self):\r\n @torch.no_grad()\r\n def call(\r\n prompt: Union[str, List[str]] = None,\r\n image = None,\r\n num_inference_steps: int = 75,\r\n guidance_scale: float = 9.0,\r\n noise_level: int = 20,\r\n negative_prompt: Optional[Union[str, List[str]]] = None,\r\n num_images_per_prompt: Optional[int] = 1,\r\n eta: float = 0.0,\r\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\r\n latents: Optional[torch.FloatTensor] = None,\r\n prompt_embeds: Optional[torch.FloatTensor] = None,\r\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\r\n output_type: Optional[str] = \"pil\",\r\n return_dict: bool = True,\r\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\r\n callback_steps: int = 1,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n clip_skip: int = None,\r\n ):\r\n # 1. Check inputs\r\n self.check_inputs(\r\n prompt,\r\n image,\r\n noise_level,\r\n callback_steps,\r\n negative_prompt,\r\n prompt_embeds,\r\n negative_prompt_embeds,\r\n )\r\n\r\n if image is None:\r\n raise ValueError(\"`image` input cannot be undefined.\")\r\n\r\n # 2. Define call parameters\r\n if prompt is not None and isinstance(prompt, str):\r\n batch_size = 1\r\n elif prompt is not None and isinstance(prompt, list):\r\n batch_size = len(prompt)\r\n else:\r\n batch_size = prompt_embeds.shape[0]\r\n\r\n device = self._execution_device\r\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\r\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\r\n # corresponds to doing no classifier free guidance.\r\n do_classifier_free_guidance = guidance_scale > 1.0\r\n\r\n # 3. Encode input prompt\r\n text_encoder_lora_scale = (\r\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\r\n )\r\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\r\n prompt,\r\n device,\r\n num_images_per_prompt,\r\n do_classifier_free_guidance,\r\n negative_prompt,\r\n prompt_embeds=prompt_embeds,\r\n negative_prompt_embeds=negative_prompt_embeds,\r\n lora_scale=text_encoder_lora_scale,\r\n clip_skip=clip_skip,\r\n )\r\n # For classifier free guidance, we need to do two forward passes.\r\n # Here we concatenate the unconditional and text embeddings into a single batch\r\n # to avoid doing two forward passes\r\n if do_classifier_free_guidance:\r\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\r\n\r\n # 4. Preprocess image\r\n image = self.image_processor.preprocess(image)\r\n image = image.to(dtype=prompt_embeds.dtype, device=device)\r\n\r\n # 5. set timesteps\r\n self.scheduler.set_timesteps(num_inference_steps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n\r\n # 5. Add noise to image\r\n noise_level = torch.tensor([noise_level], dtype=torch.long, device=device)\r\n noise = randn_tensor(image.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)\r\n image = self.low_res_scheduler.add_noise(image, noise, noise_level)\r\n\r\n batch_multiplier = 2 if do_classifier_free_guidance else 1\r\n image = torch.cat([image] * batch_multiplier * num_images_per_prompt)\r\n noise_level = torch.cat([noise_level] * image.shape[0])\r\n\r\n # 6. Prepare latent variables\r\n height, width = image.shape[2:]\r\n num_channels_latents = self.vae.config.latent_channels\r\n latents = self.prepare_latents(\r\n batch_size * num_images_per_prompt,\r\n num_channels_latents,\r\n height,\r\n width,\r\n prompt_embeds.dtype,\r\n device,\r\n generator,\r\n latents,\r\n )\r\n\r\n # 7. Check that sizes of image and latents match\r\n num_channels_image = image.shape[1]\r\n if num_channels_latents + num_channels_image != self.unet.config.in_channels:\r\n raise ValueError(\r\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\r\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\r\n f\" `num_channels_image`: {num_channels_image} \"\r\n f\" = {num_channels_latents+num_channels_image}. Please verify the config of\"\r\n \" `pipeline.unet` or your `image` input.\"\r\n )\r\n\r\n # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\r\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\r\n\r\n # 9. Denoising loop\r\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\r\n\r\n\r\n all_timesteps = len(timesteps)\r\n curr_step = 0\r\n st = time.time()\r\n while curr_step<all_timesteps:\r\n refister_time(self.unet, curr_step)\r\n\r\n time_ls = []\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n\r\n ipow = int(np.sqrt(9 + 8*curr_step))\r\n cond = ipow * ipow == (9 + 8 * curr_step)\r\n # cond = curr_step in [0, 1, 2, 3, 5, 10, 15, 25, 35,45,55,65]\r\n while (not cond) and (curr_step<all_timesteps):\r\n time_ls.append(timesteps[curr_step])\r\n curr_step += 1\r\n\r\n ipow = int(np.sqrt(9 + 8*curr_step))\r\n cond = ipow * ipow == (9 + 8 * curr_step)\r\n # cond = curr_step in [0, 1, 2, 3, 5, 10, 15, 25, 35,45,55,65]\r\n\r\n # print(curr_step, len(time_ls))\r\n # expand the latents if we are doing classifier free guidance\r\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\r\n\r\n # concat latents, mask, masked_image_latents in the channel dimension\r\n latent_model_input = torch.cat([latent_model_input, image], dim=1)\r\n\r\n input = (latent_model_input,time_ls[0],\r\n prompt_embeds,noise_level, None, None,\r\n cross_attention_kwargs,None, None, None,None,None,\r\n False)\r\n\r\n # predict the noise residual\r\n noise_pred = self.unet(\r\n latent_model_input,\r\n time_ls,\r\n encoder_hidden_states=prompt_embeds,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n class_labels=noise_level,\r\n return_dict=False,\r\n )[0]\r\n\r\n # perform guidance\r\n if do_classifier_free_guidance:\r\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\r\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\r\n\r\n # compute the previous noisy sample x_t -> x_t-1\r\n latents = multistep_pre(self, noise_pred, time_ls, latents)\r\n \r\n et = time.time()\r\n print('unet time:', et - st, 'seconds')\r\n\r\n if not output_type == \"latent\":\r\n # make sure the VAE is in float32 mode, as it overflows in float16\r\n needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast\r\n\r\n if needs_upcasting:\r\n self.upcast_vae()\r\n\r\n # Ensure latents are always the same type as the VAE\r\n latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)\r\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\r\n\r\n # cast back to fp16 if needed\r\n if needs_upcasting:\r\n self.vae.to(dtype=torch.float16)\r\n\r\n image, has_nsfw_concept, _ = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n else:\r\n image = latents\r\n has_nsfw_concept = None\r\n\r\n if has_nsfw_concept is None:\r\n do_denormalize = [True] * image.shape[0]\r\n else:\r\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\r\n\r\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\r\n\r\n # 11. Apply watermark\r\n if output_type == \"pil\" and self.watermarker is not None:\r\n image = self.watermarker.apply_watermark(image)\r\n\r\n # Offload all models\r\n self.maybe_free_model_hooks()\r\n\r\n if not return_dict:\r\n return (image, has_nsfw_concept)\r\n\r\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)\r\n return call\r\n pipe.call = new_call(pipe)"
},
{
"identifier": "register_faster_forward",
"path": "utils_if.py",
"snippet": "def register_faster_forward(model, mod):\r\n def faster_forward(self):\r\n def forward(\r\n sample: torch.FloatTensor,\r\n timestep: Union[torch.Tensor, float, int],\r\n encoder_hidden_states: torch.Tensor,\r\n class_labels: Optional[torch.Tensor] = None,\r\n timestep_cond: Optional[torch.Tensor] = None,\r\n attention_mask: Optional[torch.Tensor] = None,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\r\n mid_block_additional_residual: Optional[torch.Tensor] = None,\r\n return_dict: bool = True,\r\n ) -> Union[UNet2DConditionOutput, Tuple]:\r\n r\"\"\"\r\n Args:\r\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\r\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\r\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\r\n return_dict (`bool`, *optional*, defaults to `True`):\r\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\r\n cross_attention_kwargs (`dict`, *optional*):\r\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\r\n `self.processor` in\r\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\r\n\r\n Returns:\r\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\r\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\r\n returning a tuple, the first element is the sample tensor.\r\n \"\"\"\r\n # By default samples have to be AT least a multiple of the overall upsampling factor.\r\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).\r\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\r\n # on the fly if necessary.\r\n default_overall_up_factor = 2**self.num_upsamplers\r\n\r\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\r\n forward_upsample_size = False\r\n upsample_size = None\r\n\r\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\r\n logger.info(\"Forward upsample size to force interpolation output size.\")\r\n forward_upsample_size = True\r\n\r\n # prepare attention_mask\r\n if attention_mask is not None:\r\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\r\n attention_mask = attention_mask.unsqueeze(1)\r\n\r\n # 0. center input if necessary\r\n if self.config.center_input_sample:\r\n sample = 2 * sample - 1.0\r\n\r\n # 1. time\r\n if isinstance(timestep, list):\r\n timesteps = timestep[0]\r\n step = len(timestep)\r\n else:\r\n timesteps = timestep\r\n step = 1\r\n if not torch.is_tensor(timesteps) and (not isinstance(timesteps,list)):\r\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\r\n # This would be a good case for the `match` statement (Python 3.10+)\r\n is_mps = sample.device.type == \"mps\"\r\n if isinstance(timestep, float):\r\n dtype = torch.float32 if is_mps else torch.float64\r\n else:\r\n dtype = torch.int32 if is_mps else torch.int64\r\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\r\n elif (not isinstance(timesteps,list)) and len(timesteps.shape) == 0:\r\n timesteps = timesteps[None].to(sample.device)\r\n \r\n if (not isinstance(timesteps,list)) and len(timesteps.shape) == 1:\r\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\r\n timesteps = timesteps.expand(sample.shape[0])\r\n elif isinstance(timesteps, list):\r\n #timesteps list, such as [981,961,941]\r\n timesteps = warpped_timestep(timesteps, sample.shape[0]).to(sample.device)\r\n t_emb = self.time_proj(timesteps)\r\n\r\n # `Timesteps` does not contain any weights and will always return f32 tensors\r\n # but time_embedding might actually be running in fp16. so we need to cast here.\r\n # there might be better ways to encapsulate this.\r\n t_emb = t_emb.to(dtype=self.dtype)\r\n\r\n emb = self.time_embedding(t_emb, timestep_cond)\r\n\r\n if self.class_embedding is not None:\r\n if class_labels is None:\r\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\r\n\r\n if self.config.class_embed_type == \"timestep\":\r\n class_labels = self.time_proj(class_labels)\r\n\r\n # `Timesteps` does not contain any weights and will always return f32 tensors\r\n # there might be better ways to encapsulate this.\r\n class_labels = class_labels.to(dtype=sample.dtype)\r\n\r\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\r\n\r\n if self.config.class_embeddings_concat:\r\n emb = torch.cat([emb, class_emb], dim=-1)\r\n else:\r\n emb = emb + class_emb\r\n\r\n if self.config.addition_embed_type == \"text\":\r\n aug_emb = self.add_embedding(encoder_hidden_states)\r\n emb = emb + aug_emb\r\n\r\n if self.time_embed_act is not None:\r\n emb = self.time_embed_act(emb)\r\n\r\n if self.encoder_hid_proj is not None:\r\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)\r\n\r\n order = self.order\r\n cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35]\r\n ipow = int(np.sqrt(9 + 8*order))\r\n if isinstance(mod, int):\r\n cond = order % mod == 0\r\n elif mod == \"pro\":\r\n cond = ipow * ipow == (9 + 8 * order)\r\n elif mod == \"50ls\":\r\n cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35,40] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"50ls2\":\r\n cond = order in [0, 10, 11, 12, 15, 20, 25, 30,35,45] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"50ls3\":\r\n cond = order in [0, 20, 25, 30,35,45,46,47,48,49] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"50ls4\":\r\n cond = order in [0, 9, 13, 14, 15, 28, 29, 32, 36] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"100ls\":\r\n cond = order > 85 or order < 10 or order % 5 == 0\r\n elif mod == \"75ls\":\r\n cond = order > 65 or order < 10 or order % 5 == 0\r\n elif mod == \"75ls2\":\r\n cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35,45,55,65]\r\n elif mod == \"s2\":\r\n cond = True\r\n #===============\r\n order = self.order #timestep, start by 0\r\n #===============\r\n # if ipow*ipow == (9+8*order): #progressive skip, i.e. [0,2,5,...]\r\n if cond:\r\n # if order%2 == 0: # merge 2 step\r\n # print(order)\r\n # 2. pre-process\r\n sample = self.conv_in(sample)\r\n\r\n # 3. down\r\n down_block_res_samples = (sample,)\r\n for downsample_block in self.down_blocks:\r\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\r\n sample, res_samples = downsample_block(\r\n hidden_states=sample,\r\n temb=emb,\r\n encoder_hidden_states=encoder_hidden_states,\r\n attention_mask=attention_mask,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n )\r\n else:\r\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\r\n\r\n down_block_res_samples += res_samples\r\n\r\n if down_block_additional_residuals is not None:\r\n new_down_block_res_samples = ()\r\n\r\n for down_block_res_sample, down_block_additional_residual in zip(\r\n down_block_res_samples, down_block_additional_residuals\r\n ):\r\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\r\n new_down_block_res_samples += (down_block_res_sample,)\r\n\r\n down_block_res_samples = new_down_block_res_samples\r\n\r\n # 4. mid\r\n if self.mid_block is not None:\r\n sample = self.mid_block(\r\n sample,\r\n emb,\r\n encoder_hidden_states=encoder_hidden_states,\r\n attention_mask=attention_mask,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n )\r\n\r\n if mid_block_additional_residual is not None:\r\n sample = sample + mid_block_additional_residual\r\n\r\n #----------------------save feature-------------------------\r\n setattr(self, 'skip_feature', deepcopy(down_block_res_samples))\r\n setattr(self, 'toup_feature', sample.detach().clone())\r\n #-----------------------save feature------------------------\r\n\r\n\r\n\r\n #-------------------expand feature for parallel---------------\r\n # print(step)\r\n\r\n # print('pre emb shape', emb.shape)\r\n if isinstance(timestep, list):\r\n #timesteps list, such as [981,961,941]\r\n timesteps = warpped_timestep(timestep, sample.shape[0]).to(sample.device)\r\n t_emb = self.time_proj(timesteps)\r\n\r\n # `Timesteps` does not contain any weights and will always return f32 tensors\r\n # but time_embedding might actually be running in fp16. so we need to cast here.\r\n # there might be better ways to encapsulate this.\r\n t_emb = t_emb.to(dtype=self.dtype)\r\n\r\n emb = self.time_embedding(t_emb, timestep_cond)\r\n # print('post emb shape', emb.shape)\r\n\r\n # print('pre sample shape', sample.shape)\r\n # print(step, sample.shape)\r\n down_block_res_samples = warpped_skip_feature(down_block_res_samples, step)\r\n sample = warpped_feature(sample, step)\r\n # print('post sample shape', sample.shape)\r\n\r\n # print('pre text shape', encoder_hidden_states.shape)\r\n encoder_hidden_states = warpped_text_emb(encoder_hidden_states, step)\r\n # print('post text shape', encoder_hidden_states.shape)\r\n # print('==========================')\r\n #-------------------expand feature for parallel---------------\r\n \r\n else:\r\n down_block_res_samples = self.skip_feature\r\n sample = self.toup_feature\r\n\r\n #-------------------expand feature for parallel---------------\r\n down_block_res_samples = warpped_skip_feature(down_block_res_samples, step)\r\n sample = warpped_feature(sample, step)\r\n encoder_hidden_states = warpped_text_emb(encoder_hidden_states, step)\r\n #-------------------expand feature for parallel---------------\r\n\r\n # 5. up\r\n for i, upsample_block in enumerate(self.up_blocks):\r\n is_final_block = i == len(self.up_blocks) - 1\r\n\r\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\r\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\r\n\r\n # if we have not reached the final block and need to forward the\r\n # upsample size, we do it here\r\n if not is_final_block and forward_upsample_size:\r\n upsample_size = down_block_res_samples[-1].shape[2:]\r\n\r\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\r\n sample = upsample_block(\r\n hidden_states=sample,\r\n temb=emb,\r\n res_hidden_states_tuple=res_samples,\r\n encoder_hidden_states=encoder_hidden_states,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n upsample_size=upsample_size,\r\n attention_mask=attention_mask,\r\n )\r\n else:\r\n sample = upsample_block(\r\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size\r\n )\r\n\r\n # 6. post-process\r\n if self.conv_norm_out:\r\n sample = self.conv_norm_out(sample)\r\n sample = self.conv_act(sample)\r\n sample = self.conv_out(sample)\r\n\r\n if not return_dict:\r\n return (sample,)\r\n\r\n return UNet2DConditionOutput(sample=sample)\r\n return forward\r\n if model.__class__.__name__ == 'UNet2DConditionModel':\r\n model.forward = faster_forward(model)\r"
},
{
"identifier": "seed_everything",
"path": "utils_if.py",
"snippet": "def seed_everything(seed):\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed(seed)\r\n random.seed(seed)\r\n np.random.seed(seed)\r"
}
] | from diffusers import DiffusionPipeline , IFPipeline, IFSuperResolutionPipeline, StableDiffusionUpscalePipeline
from diffusers.utils import pt_to_pil
from diffusers import DPMSolverMultistepScheduler
from utils_if import register_if1, register_if2,register_if3, register_faster_forward, seed_everything
import torch
| 9,934 |
seed_everything(2023)
prompt = "a lone sailboat drifting on calm waters"
stage_1 = DiffusionPipeline.from_pretrained(
"DeepFloyd/IF-I-XL-v1.0",
variant="fp16",
torch_dtype=torch.float16,
).to('cuda')
stage_2 = DiffusionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0",
text_encoder=None,
variant="fp16",
torch_dtype=torch.float16,
).to('cuda')
# stage 3
safety_modules = {
"feature_extractor": stage_1.feature_extractor,
"safety_checker": None,
"watermarker": stage_1.watermarker,
}
stage_3 = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler",
**safety_modules,
torch_dtype=torch.float16
).to('cuda')
|
seed_everything(2023)
prompt = "a lone sailboat drifting on calm waters"
stage_1 = DiffusionPipeline.from_pretrained(
"DeepFloyd/IF-I-XL-v1.0",
variant="fp16",
torch_dtype=torch.float16,
).to('cuda')
stage_2 = DiffusionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0",
text_encoder=None,
variant="fp16",
torch_dtype=torch.float16,
).to('cuda')
# stage 3
safety_modules = {
"feature_extractor": stage_1.feature_extractor,
"safety_checker": None,
"watermarker": stage_1.watermarker,
}
stage_3 = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler",
**safety_modules,
torch_dtype=torch.float16
).to('cuda')
| register_faster_forward(stage_1.unet, mod = '100ls')
| 3 | 2023-12-15 05:03:37+00:00 | 12k |
FoundationVision/GLEE | app/GLEE/glee/models/transformer_decoder/maskdino_decoder.py | [
{
"identifier": "TransformerDecoder",
"path": "app/GLEE/glee/models/transformer_decoder/dino_decoder.py",
"snippet": "class TransformerDecoder(nn.Module):\r\n\r\n def __init__(self, decoder_layer, num_layers, norm=None,\r\n return_intermediate=False,\r\n d_model=256, query_dim=4,\r\n modulate_hw_attn=True,\r\n num_feature_levels=1,\r\n deformable_decoder=True,\r\n decoder_query_perturber=None,\r\n dec_layer_number=None, # number of queries each layer in decoder\r\n rm_dec_query_scale=True,\r\n dec_layer_share=False,\r\n dec_layer_dropout_prob=None,\r\n cross_track_layer = False,\r\n n_levels = None, \r\n n_heads = None, \r\n n_points = None,\r\n ):\r\n super().__init__()\r\n if num_layers > 0:\r\n self.layers = _get_clones(decoder_layer, num_layers, layer_share=dec_layer_share)\r\n else:\r\n self.layers = []\r\n self.num_layers = num_layers\r\n self.norm = norm\r\n self.return_intermediate = return_intermediate\r\n assert return_intermediate, \"support return_intermediate only\"\r\n self.query_dim = query_dim\r\n assert query_dim in [2, 4], \"query_dim should be 2/4 but {}\".format(query_dim)\r\n self.num_feature_levels = num_feature_levels\r\n\r\n self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2)\r\n if not deformable_decoder:\r\n self.query_pos_sine_scale = MLP(d_model, d_model, d_model, 2)\r\n else:\r\n self.query_pos_sine_scale = None\r\n\r\n if rm_dec_query_scale:\r\n self.query_scale = None\r\n else:\r\n raise NotImplementedError\r\n self.query_scale = MLP(d_model, d_model, d_model, 2)\r\n self.bbox_embed = None\r\n self.class_embed = None\r\n\r\n self.d_model = d_model\r\n self.modulate_hw_attn = modulate_hw_attn\r\n self.deformable_decoder = deformable_decoder\r\n\r\n if not deformable_decoder and modulate_hw_attn:\r\n self.ref_anchor_head = MLP(d_model, d_model, 2, 2)\r\n else:\r\n self.ref_anchor_head = None\r\n\r\n self.decoder_query_perturber = decoder_query_perturber\r\n self.box_pred_damping = None\r\n\r\n self.dec_layer_number = dec_layer_number\r\n if dec_layer_number is not None:\r\n assert isinstance(dec_layer_number, list)\r\n assert len(dec_layer_number) == num_layers\r\n # assert dec_layer_number[0] ==\r\n\r\n self.dec_layer_dropout_prob = dec_layer_dropout_prob\r\n if dec_layer_dropout_prob is not None:\r\n assert isinstance(dec_layer_dropout_prob, list)\r\n assert len(dec_layer_dropout_prob) == num_layers\r\n for i in dec_layer_dropout_prob:\r\n assert 0.0 <= i <= 1.0\r\n if cross_track_layer: # add a cross-attention-layer before track ffn head\r\n self.cross_track_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\r\n self.cross_track = True\r\n else:\r\n self.cross_track = False\r\n\r\n self._reset_parameters()\r\n\r\n def _reset_parameters(self):\r\n for p in self.parameters():\r\n if p.dim() > 1:\r\n nn.init.xavier_uniform_(p)\r\n for m in self.modules():\r\n if isinstance(m, MSDeformAttn):\r\n m._reset_parameters()\r\n @staticmethod\r\n def with_pos_embed(tensor, pos):\r\n return tensor if pos is None else tensor + pos\r\n\r\n\r\n def forward(self, tgt, memory,\r\n tgt_mask: Optional[Tensor] = None,\r\n memory_mask: Optional[Tensor] = None,\r\n tgt_key_padding_mask: Optional[Tensor] = None,\r\n memory_key_padding_mask: Optional[Tensor] = None,\r\n pos: Optional[Tensor] = None,\r\n refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2\r\n # for memory\r\n level_start_index: Optional[Tensor] = None, # num_levels\r\n spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\r\n valid_ratios: Optional[Tensor] = None,\r\n task = None,\r\n extra = None,\r\n\r\n ):\r\n \"\"\"\r\n Input:\r\n - tgt: nq, bs, d_model\r\n - memory: hw, bs, d_model\r\n - pos: hw, bs, d_model\r\n - refpoints_unsigmoid: nq, bs, 2/4\r\n - valid_ratios/spatial_shapes: bs, nlevel, 2\r\n \"\"\"\r\n output = tgt\r\n device = tgt.device\r\n\r\n intermediate = []\r\n reference_points = refpoints_unsigmoid.sigmoid().to(device)\r\n ref_points = [reference_points]\r\n\r\n for layer_id, layer in enumerate(self.layers):\r\n # preprocess ref points\r\n if self.training and self.decoder_query_perturber is not None and layer_id != 0:\r\n reference_points = self.decoder_query_perturber(reference_points)\r\n\r\n reference_points_input = reference_points[:, :, None] \\\r\n * torch.cat([valid_ratios, valid_ratios], -1)[None, :] # nq, bs, nlevel, 4\r\n query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2\r\n\r\n raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256\r\n pos_scale = self.query_scale(output) if self.query_scale is not None else 1\r\n query_pos = pos_scale * raw_query_pos\r\n\r\n output = layer(\r\n tgt=output,\r\n tgt_query_pos=query_pos,\r\n tgt_query_sine_embed=query_sine_embed,\r\n tgt_key_padding_mask=tgt_key_padding_mask,\r\n tgt_reference_points=reference_points_input,\r\n\r\n memory=memory,\r\n memory_key_padding_mask=memory_key_padding_mask,\r\n memory_level_start_index=level_start_index,\r\n memory_spatial_shapes=spatial_shapes,\r\n memory_pos=pos,\r\n\r\n self_attn_mask=tgt_mask,\r\n cross_attn_mask=memory_mask,\r\n task = task,\r\n extra = extra,\r\n layer_id = layer_id,\r\n )\r\n\r\n # iter update\r\n if self.bbox_embed is not None:\r\n reference_before_sigmoid = inverse_sigmoid(reference_points)\r\n delta_unsig = self.bbox_embed[layer_id](output).to(device)\r\n outputs_unsig = delta_unsig + reference_before_sigmoid\r\n new_reference_points = outputs_unsig.sigmoid()\r\n\r\n reference_points = new_reference_points.detach()\r\n # if layer_id != self.num_layers - 1:\r\n ref_points.append(new_reference_points)\r\n\r\n intermediate.append(self.norm(output))\r\n\r\n\r\n if self.cross_track:\r\n tgt_track = self.cross_track_attn(self.with_pos_embed(output, query_pos).transpose(0, 1),\r\n reference_points_input.transpose(0, 1).contiguous(),\r\n memory.transpose(0, 1), spatial_shapes, level_start_index,\r\n memory_key_padding_mask).transpose(0, 1)\r\n tgt_track = tgt_track + output\r\n tgt_track = tgt_track.transpose(0, 1)\r\n else:\r\n tgt_track = None\r\n\r\n return [\r\n [itm_out.transpose(0, 1) for itm_out in intermediate],\r\n [itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points], tgt_track\r\n ]\r"
},
{
"identifier": "DeformableTransformerDecoderLayer",
"path": "app/GLEE/glee/models/transformer_decoder/dino_decoder.py",
"snippet": "class DeformableTransformerDecoderLayer(nn.Module):\r\n\r\n def __init__(self, d_model=256, d_ffn=1024,\r\n dropout=0.1, activation=\"relu\",\r\n n_levels=4, n_heads=8, n_points=4,\r\n use_deformable_box_attn=False,\r\n key_aware_type=None,\r\n ):\r\n super().__init__()\r\n self.n_heads = n_heads\r\n # cross attention\r\n if use_deformable_box_attn:\r\n raise NotImplementedError\r\n else:\r\n self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\r\n self.dropout1 = nn.Dropout(dropout)\r\n self.norm1 = nn.LayerNorm(d_model)\r\n\r\n # self attention\r\n self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\r\n self.dropout2 = nn.Dropout(dropout)\r\n self.norm2 = nn.LayerNorm(d_model)\r\n\r\n # ffn\r\n self.linear1 = nn.Linear(d_model, d_ffn)\r\n self.activation = _get_activation_fn(activation)\r\n self.dropout3 = nn.Dropout(dropout)\r\n self.linear2 = nn.Linear(d_ffn, d_model)\r\n self.dropout4 = nn.Dropout(dropout)\r\n self.norm3 = nn.LayerNorm(d_model)\r\n\r\n self.key_aware_type = key_aware_type\r\n self.key_aware_proj = None\r\n\r\n def rm_self_attn_modules(self):\r\n self.self_attn = None\r\n self.dropout2 = None\r\n self.norm2 = None\r\n\r\n @staticmethod\r\n def with_pos_embed(tensor, pos):\r\n return tensor if pos is None else tensor + pos\r\n\r\n def forward_ffn(self, tgt):\r\n tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))\r\n tgt = tgt + self.dropout4(tgt2)\r\n tgt = self.norm3(tgt)\r\n return tgt\r\n\r\n @autocast(enabled=False)\r\n def forward(self,\r\n # for tgt\r\n tgt: Optional[Tensor], # nq, bs, d_model\r\n tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\r\n tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\r\n tgt_key_padding_mask: Optional[Tensor] = None,\r\n tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\r\n\r\n # for memory\r\n memory: Optional[Tensor] = None, # hw, bs, d_model\r\n memory_key_padding_mask: Optional[Tensor] = None,\r\n memory_level_start_index: Optional[Tensor] = None, # num_levels\r\n memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\r\n memory_pos: Optional[Tensor] = None, # pos for memory\r\n\r\n # sa\r\n self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\r\n cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\r\n task = None,\r\n extra = None,\r\n layer_id = None,\r\n ):\r\n \"\"\"\r\n Input:\r\n - tgt/tgt_query_pos: nq, bs, d_model\r\n -\r\n \"\"\"\r\n # self attention\r\n\r\n\r\n if task in ['grounding', 'rvos'] or 'visual_prompt_tokens' in extra:\r\n if self_attn_mask is not None: # training with denoising query \r\n\r\n if 'visual_prompt_tokens' in extra: # has visual prompt \r\n level_index = layer_id % 3 # src level : self.num_feature_levels\r\n prompt_tokens = extra['visual_prompt_tokens'][level_index]\r\n promot_pos = prompt_tokens.detach().clone()\r\n prompt_mask = extra['visual_prompt_nonzero_mask'][level_index]\r\n else: #grounding\r\n prompt_tokens = extra['grounding_tokens']\r\n promot_pos = prompt_tokens.detach().clone()\r\n prompt_mask = extra['grounding_nonzero_mask']\r\n ori_size = tgt.shape[0]\r\n new_mask_size = tgt.shape[0]+prompt_tokens.shape[0]\r\n new_self_attn_mask = torch.zeros((tgt.shape[1], new_mask_size, new_mask_size), dtype=torch.bool, device=tgt.device)\r\n \r\n new_self_attn_mask[:,:ori_size,:ori_size] = self_attn_mask.unsqueeze(0).repeat(tgt.shape[1],1,1) #denoising matching keepmask\r\n\r\n # prompt to prompt mask set to True if they are not valid\r\n # new_self_attn_mask[:,ori_size:,ori_size:][prompt_mask] = True\r\n # new_self_attn_mask[:,ori_size:,ori_size:].transpose(1,2)[prompt_mask] = True\r\n\r\n # prompt2obj and obj2prompt mask set to True \r\n # new_self_attn_mask[:,ori_size-300:ori_size,ori_size:][] = True \r\n new_self_attn_mask[:,:ori_size,ori_size:].transpose(1,2)[prompt_mask] = True \r\n \r\n new_self_attn_mask[:,ori_size:,:ori_size][prompt_mask] = True \r\n # new_self_attn_mask[:,ori_size:,ori_size-300:ori_size].transpose(1,2)[] = True \r\n\r\n new_self_attn_mask = new_self_attn_mask.repeat_interleave(self.n_heads, dim=0)\r\n else: # with out denoising query\r\n if 'visual_prompt_tokens' in extra: # has visual prompt \r\n level_index = layer_id % 3 # src level : self.num_feature_levels\r\n prompt_tokens = extra['visual_prompt_tokens'][level_index]\r\n promot_pos = prompt_tokens.detach().clone()\r\n prompt_mask = extra['visual_prompt_nonzero_mask'][level_index]\r\n else: #grounding\r\n prompt_tokens = extra['grounding_tokens']\r\n promot_pos = prompt_tokens.detach().clone()\r\n prompt_mask = extra['grounding_nonzero_mask']\r\n ori_size = tgt.shape[0]\r\n new_mask_size = tgt.shape[0]+prompt_tokens.shape[0]\r\n new_self_attn_mask = torch.zeros((tgt.shape[1], new_mask_size, new_mask_size), dtype=torch.bool, device=tgt.device)\r\n new_self_attn_mask[:,:ori_size,ori_size:].transpose(1,2)[prompt_mask] = True \r\n new_self_attn_mask[:,ori_size:,:ori_size][prompt_mask] = True \r\n new_self_attn_mask = new_self_attn_mask.repeat_interleave(self.n_heads, dim=0)\r\n\r\n\r\n if self.self_attn is not None:\r\n tgt = torch.cat([tgt,prompt_tokens],dim=0)\r\n tgt_query_pos = torch.cat([tgt_query_pos,promot_pos],dim=0)\r\n q = k = self.with_pos_embed(tgt, tgt_query_pos)\r\n tgt2 = self.self_attn(q, k, tgt, attn_mask=new_self_attn_mask)[0]\r\n tgt = tgt + self.dropout2(tgt2)\r\n tgt = self.norm2(tgt)\r\n tgt = tgt[:ori_size]\r\n tgt_query_pos = tgt_query_pos[:ori_size]\r\n else:\r\n if self.self_attn is not None:\r\n q = k = self.with_pos_embed(tgt, tgt_query_pos)\r\n tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]\r\n tgt = tgt + self.dropout2(tgt2)\r\n tgt = self.norm2(tgt)\r\n\r\n # cross attention\r\n if self.key_aware_type is not None:\r\n if self.key_aware_type == 'mean':\r\n tgt = tgt + memory.mean(0, keepdim=True)\r\n elif self.key_aware_type == 'proj_mean':\r\n tgt = tgt + self.key_aware_proj(memory).mean(0, keepdim=True)\r\n else:\r\n raise NotImplementedError(\"Unknown key_aware_type: {}\".format(self.key_aware_type))\r\n tgt2 = self.cross_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\r\n tgt_reference_points.transpose(0, 1).contiguous(),\r\n memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index,\r\n memory_key_padding_mask).transpose(0, 1)\r\n tgt = tgt + self.dropout1(tgt2)\r\n tgt = self.norm1(tgt)\r\n\r\n # ffn\r\n tgt = self.forward_ffn(tgt)\r\n\r\n return tgt\r"
},
{
"identifier": "MLP",
"path": "app/GLEE/glee/utils/utils.py",
"snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x"
},
{
"identifier": "gen_encoder_output_proposals",
"path": "app/GLEE/glee/utils/utils.py",
"snippet": "def gen_encoder_output_proposals(memory:Tensor, memory_padding_mask:Tensor, spatial_shapes:Tensor):\n \"\"\"\n Input:\n - memory: bs, \\sum{hw}, d_model\n - memory_padding_mask: bs, \\sum{hw}\n - spatial_shapes: nlevel, 2\n Output:\n - output_memory: bs, \\sum{hw}, d_model\n - output_proposals: bs, \\sum{hw}, 4\n \"\"\"\n N_, S_, C_ = memory.shape\n base_scale = 4.0\n proposals = []\n _cur = 0\n for lvl, (H_, W_) in enumerate(spatial_shapes):\n mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)\n valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),\n torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)\n proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)\n proposals.append(proposal)\n _cur += (H_ * W_)\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n output_proposals = torch.log(output_proposals / (1 - output_proposals))\n output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n\n output_memory = memory\n output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n return output_memory, output_proposals"
},
{
"identifier": "inverse_sigmoid",
"path": "app/GLEE/glee/utils/utils.py",
"snippet": "def inverse_sigmoid(x, eps=1e-5):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)"
},
{
"identifier": "box_ops",
"path": "app/GLEE/glee/utils/box_ops.py",
"snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_xywh_to_xyxy(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef masks_to_boxes(masks):"
}
] | import logging
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from detectron2.utils.registry import Registry
from detectron2.structures import BitMasks
from timm.models.layers import trunc_normal_
from .dino_decoder import TransformerDecoder, DeformableTransformerDecoderLayer
from ...utils.utils import MLP, gen_encoder_output_proposals, inverse_sigmoid
from ...utils import box_ops | 8,471 | 'map_known_indice': torch.as_tensor(map_known_indice).long(),
'known_lbs_bboxes': (known_labels, known_bboxs),
'know_idx': know_idx,
'pad_size': pad_size,
'scalar': scalar,
}
else:
if not refpoint_emb is None:
input_query_label = tgt.repeat(batch_size, 1, 1)
input_query_bbox = refpoint_emb.repeat(batch_size, 1, 1)
else:
input_query_label=None
input_query_bbox=None
attn_mask = None
mask_dict=None
# 100*batch*256
if not input_query_bbox is None:
input_query_label = input_query_label
input_query_bbox = input_query_bbox
return input_query_label,input_query_bbox,attn_mask,mask_dict
def dn_post_process(self,outputs_class,outputs_score,outputs_coord,mask_dict,outputs_mask):
"""
post process of dn after output from the transformer
put the dn part in the mask_dict
"""
assert mask_dict['pad_size'] > 0
output_known_class = outputs_class[:, :, :mask_dict['pad_size'], :]
outputs_class = outputs_class[:, :, mask_dict['pad_size']:, :]
output_known_score = outputs_score[:, :, :mask_dict['pad_size'], :]
outputs_score = outputs_score[:, :, mask_dict['pad_size']:, :]
output_known_coord = outputs_coord[:, :, :mask_dict['pad_size'], :]
outputs_coord = outputs_coord[:, :, mask_dict['pad_size']:, :]
if outputs_mask is not None:
output_known_mask = outputs_mask[:, :, :mask_dict['pad_size'], :]
outputs_mask = outputs_mask[:, :, mask_dict['pad_size']:, :]
out = {'pred_logits': output_known_class[-1], 'pred_scores':output_known_score[-1],'pred_boxes': output_known_coord[-1],'pred_masks': output_known_mask[-1]}
out['aux_outputs'] = self._set_aux_loss(output_known_class, output_known_score, output_known_mask, output_known_coord)
mask_dict['output_known_lbs_bboxes']=out
return outputs_class, outputs_score, outputs_coord, outputs_mask
def get_valid_ratio(self, mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def pred_box(self, reference, hs, ref0=None):
"""
:param reference: reference box coordinates from each decoder layer
:param hs: content
:param ref0: whether there are prediction from the first layer
"""
device = reference[0].device
if ref0 is None:
outputs_coord_list = []
else:
outputs_coord_list = [ref0.to(device)]
for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)):
layer_delta_unsig = layer_bbox_embed(layer_hs).to(device)
layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig).to(device)
layer_outputs_unsig = layer_outputs_unsig.sigmoid()
outputs_coord_list.append(layer_outputs_unsig)
outputs_coord_list = torch.stack(outputs_coord_list)
return outputs_coord_list
def forward(self, x, mask_features, extra, task, masks, targets=None):
"""
:param x: input, a list of multi-scale feature
:param mask_features: is the per-pixel embeddings with resolution 1/4 of the original image,
obtained by fusing backbone encoder encoded features. This is used to produce binary masks.
:param masks: mask in the original image
:param targets: used for denoising training
"""
if 'spatial_query_pos_mask' in extra:
visual_P = True
else:
visual_P = False
assert len(x) == self.num_feature_levels
device = x[0].device
size_list = []
# disable mask, it does not affect performance
enable_mask = 0
if masks is not None:
for src in x:
if src.size(2) % 32 or src.size(3) % 32:
enable_mask = 1
if enable_mask == 0:
masks = [torch.zeros((src.size(0), src.size(2), src.size(3)), device=src.device, dtype=torch.bool) for src in x]
src_flatten = []
mask_flatten = []
spatial_shapes = []
for i in range(self.num_feature_levels):
idx=self.num_feature_levels-1-i
bs, c , h, w=x[idx].shape
size_list.append(x[i].shape[-2:])
spatial_shapes.append(x[idx].shape[-2:])
src_flatten.append(self.input_proj[idx](x[idx]).flatten(2).transpose(1, 2))
mask_flatten.append(masks[i].flatten(1))
src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw}
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
predictions_federate = []
predictions_score = []
predictions_class = []
predictions_mask = []
if self.two_stage:
| # ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from Mask2Former https://github.com/facebookresearch/Mask2Former by Feng Li and Hao Zhang.
TRANSFORMER_DECODER_REGISTRY = Registry("TRANSFORMER_MODULE")
TRANSFORMER_DECODER_REGISTRY.__doc__ = """
Registry for transformer module in MaskDINO.
"""
def build_transformer_decoder(cfg, in_channels, lang_encoder, mask_classification=True):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.MaskDINO.TRANSFORMER_DECODER_NAME
return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, lang_encoder, mask_classification)
@TRANSFORMER_DECODER_REGISTRY.register()
class MaskDINODecoder(nn.Module):
@configurable
def __init__(
self,
in_channels,
lang_encoder,
mask_classification=True,
*,
num_classes: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dim_feedforward: int,
dec_layers: int,
mask_dim: int,
dim_projection: int,
enforce_input_project: bool,
two_stage: bool,
dn: str,
noise_scale:float,
dn_num:int,
initialize_box_type:bool,
initial_pred:bool,
learn_tgt: bool,
total_num_feature_levels: int = 4,
dropout: float = 0.0,
activation: str = 'relu',
nhead: int = 8,
dec_n_points: int = 4,
return_intermediate_dec: bool = True,
query_dim: int = 4,
dec_layer_share: bool = False,
semantic_ce_loss: bool = False,
cross_track_layer: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
d_model: transformer dimension
dropout: dropout rate
activation: activation function
nhead: num heads in multi-head attention
dec_n_points: number of sampling points in decoder
return_intermediate_dec: return the intermediate results of decoder
query_dim: 4 -> (x, y, w, h)
dec_layer_share: whether to share each decoder layer
semantic_ce_loss: use ce loss for semantic segmentation
"""
super().__init__()
assert mask_classification, "Only support mask classification model"
self.mask_classification = mask_classification
self.num_feature_levels = total_num_feature_levels
self.initial_pred = initial_pred
self.lang_encoder = lang_encoder
# define Transformer decoder here
self.dn=dn
self.learn_tgt = learn_tgt
self.noise_scale=noise_scale
self.dn_num=dn_num
self.num_heads = nheads
self.num_layers = dec_layers
self.two_stage=two_stage
self.initialize_box_type = initialize_box_type
self.total_num_feature_levels = total_num_feature_levels
self.num_queries = num_queries
self.semantic_ce_loss = semantic_ce_loss
# learnable query features
if not two_stage or self.learn_tgt:
self.query_feat = nn.Embedding(num_queries, hidden_dim)
if not two_stage and initialize_box_type == 'no':
self.query_embed = nn.Embedding(num_queries, 4)
if two_stage:
self.enc_output = nn.Linear(hidden_dim, hidden_dim)
self.enc_output_norm = nn.LayerNorm(hidden_dim)
self.input_proj = nn.ModuleList()
for _ in range(self.num_feature_levels):
if in_channels != hidden_dim or enforce_input_project:
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
weight_init.c2_xavier_fill(self.input_proj[-1])
else:
self.input_proj.append(nn.Sequential())
self.num_classes = {
'obj365':100,
'obj365_clip':100,
'lvis':100,
'openimage':100,
'lvis_clip':100,
'openimage_clip':100,
'grit':100,
'vg':200,
'coco':80,
'coco_clip':80,
'grounding':1,
'rvos':1,
'sa1b':1,
'sa1b_clip':1,
'bdd_det':10,
'bdd_inst':8,
'ytvis19':40,
'image_yt19':40,
'image_yt21':40,
'bdd_track_seg':8,
'bdd_track_box':8,
'ovis':25,
'image_o':25,
'ytvis21':40,
'uvo_video': 81,
'ytbvos':1,
}
# output FFNs
assert self.mask_classification, "why not class embedding?"
self.confidence_score = MLP(hidden_dim, hidden_dim, 1, 2)
self.category_embed = nn.Parameter(torch.rand(hidden_dim, dim_projection))
# trunc_normal_(self.category_embed, std=.02)
# self.track_embed = MLP(hidden_dim, hidden_dim, hidden_dim, 3)
self.coco_label_enc = nn.Embedding(80,hidden_dim)
self.obj365_label_enc = nn.Embedding(100, hidden_dim)
self.vg_label_enc = nn.Embedding(200, hidden_dim)
self.grounding_label_enc = nn.Embedding(1,hidden_dim)
self.ytvis19_label_enc = nn.Embedding(40,hidden_dim)
self.ytvis21_label_enc = nn.Embedding(40,hidden_dim)
self.ovis_label_enc = nn.Embedding(25,hidden_dim)
self.uvo_label_enc = nn.Embedding(81,hidden_dim)
self.bdd_det = nn.Embedding(10,hidden_dim)
self.bdd_inst = nn.Embedding(8,hidden_dim)
self.label_enc = {
'coco': self.coco_label_enc,
'coco_clip': self.coco_label_enc,
'coconomask': self.coco_label_enc,
'obj365': self.obj365_label_enc,
'lvis': self.obj365_label_enc,
'openimage': self.obj365_label_enc,
'grit': self.obj365_label_enc,
'vg': self.vg_label_enc,
'obj365_clip': self.obj365_label_enc,
'lvis_clip': self.obj365_label_enc,
'openimage_clip': self.obj365_label_enc,
'bdd_det':self.bdd_det,
'bdd_inst':self.bdd_inst,
'bdd_track_seg':self.bdd_inst,
'bdd_track_box':self.bdd_inst,
'sa1b': self.grounding_label_enc,
'sa1b_clip': self.grounding_label_enc,
'grounding': self.grounding_label_enc,
'rvos': self.grounding_label_enc,
'uvo_video':self.uvo_label_enc,
'ytvis19':self.ytvis19_label_enc,
'image_yt19': self.ytvis19_label_enc,
'ytvis21':self.ytvis21_label_enc,
'image_yt21':self.ytvis21_label_enc,
'ovis':self.ovis_label_enc,
'image_o': self.ovis_label_enc,
'burst':self.grounding_label_enc,
'ytbvos':self.grounding_label_enc,
}
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
# init decoder
self.decoder_norm = decoder_norm = nn.LayerNorm(hidden_dim)
decoder_layer = DeformableTransformerDecoderLayer(hidden_dim, dim_feedforward,
dropout, activation,
self.num_feature_levels, nhead, dec_n_points)
self.decoder = TransformerDecoder(decoder_layer, self.num_layers, decoder_norm,
return_intermediate=return_intermediate_dec,
d_model=hidden_dim, query_dim=query_dim,
num_feature_levels=self.num_feature_levels,
dec_layer_share=dec_layer_share,
cross_track_layer = cross_track_layer,
n_levels=self.num_feature_levels, n_heads=nhead, n_points=dec_n_points
)
self.cross_track_layer = cross_track_layer
self.hidden_dim = hidden_dim
self._bbox_embed = _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0)
box_embed_layerlist = [_bbox_embed for i in range(self.num_layers)] # share box prediction each layer
self.bbox_embed = nn.ModuleList(box_embed_layerlist)
self.decoder.bbox_embed = self.bbox_embed
@classmethod
def from_config(cls, cfg, in_channels, lang_encoder, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["lang_encoder"] = lang_encoder
ret["mask_classification"] = mask_classification
ret["dim_projection"] = cfg.MODEL.DIM_PROJ
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MaskDINO.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MaskDINO.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MaskDINO.NHEADS
ret["dim_feedforward"] = cfg.MODEL.MaskDINO.DIM_FEEDFORWARD
ret["dec_layers"] = cfg.MODEL.MaskDINO.DEC_LAYERS
ret["enforce_input_project"] = cfg.MODEL.MaskDINO.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["two_stage"] =cfg.MODEL.MaskDINO.TWO_STAGE
ret["initialize_box_type"] = cfg.MODEL.MaskDINO.INITIALIZE_BOX_TYPE # ['no', 'bitmask', 'mask2box']
ret["dn"]=cfg.MODEL.MaskDINO.DN
ret["noise_scale"] =cfg.MODEL.MaskDINO.DN_NOISE_SCALE
ret["dn_num"] =cfg.MODEL.MaskDINO.DN_NUM
ret["initial_pred"] =cfg.MODEL.MaskDINO.INITIAL_PRED
ret["learn_tgt"] = cfg.MODEL.MaskDINO.LEARN_TGT
ret["total_num_feature_levels"] = cfg.MODEL.SEM_SEG_HEAD.TOTAL_NUM_FEATURE_LEVELS
ret["semantic_ce_loss"] = cfg.MODEL.MaskDINO.TEST.SEMANTIC_ON and cfg.MODEL.MaskDINO.SEMANTIC_CE_LOSS and ~cfg.MODEL.MaskDINO.TEST.PANOPTIC_ON
ret["cross_track_layer"] = cfg.MODEL.CROSS_TRACK
return ret
def prepare_for_dn(self, targets, tgt, refpoint_emb, batch_size,task):
"""
modified from dn-detr. You can refer to dn-detr
https://github.com/IDEA-Research/DN-DETR/blob/main/models/dn_dab_deformable_detr/dn_components.py
for more details
:param dn_args: scalar, noise_scale
:param tgt: original tgt (content) in the matching part
:param refpoint_emb: positional anchor queries in the matching part
:param batch_size: bs
"""
if self.training:
scalar, noise_scale = self.dn_num,self.noise_scale
known = [(torch.ones_like(t['labels'])).cuda() for t in targets]
know_idx = [torch.nonzero(t) for t in known]
known_num = [sum(k) for k in known]
# use fix number of dn queries
if max(known_num)>0:
scalar = scalar//(int(max(known_num)))
else:
scalar = 0
if scalar == 0:
input_query_label = None
input_query_bbox = None
attn_mask = None
mask_dict = None
return input_query_label, input_query_bbox, attn_mask, mask_dict
# can be modified to selectively denosie some label or boxes; also known label prediction
unmask_bbox = unmask_label = torch.cat(known)
labels = torch.cat([t['labels'] for t in targets])
boxes = torch.cat([t['boxes'] for t in targets])
batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])
# known
known_indice = torch.nonzero(unmask_label + unmask_bbox)
known_indice = known_indice.view(-1)
# noise
known_indice = known_indice.repeat(scalar, 1).view(-1)
known_labels = labels.repeat(scalar, 1).view(-1)
known_bid = batch_idx.repeat(scalar, 1).view(-1)
known_bboxs = boxes.repeat(scalar, 1)
known_labels_expaned = known_labels.clone()
known_bbox_expand = known_bboxs.clone()
# noise on the label
if noise_scale > 0:
p = torch.rand_like(known_labels_expaned.float())
chosen_indice = torch.nonzero(p < (noise_scale * 0.5)).view(-1) # half of bbox prob
new_label = torch.randint_like(chosen_indice, 0, self.num_classes[task]) # randomly put a new one here
known_labels_expaned.scatter_(0, chosen_indice, new_label)
if noise_scale > 0:
diff = torch.zeros_like(known_bbox_expand)
diff[:, :2] = known_bbox_expand[:, 2:] / 2
diff[:, 2:] = known_bbox_expand[:, 2:]
known_bbox_expand += torch.mul((torch.rand_like(known_bbox_expand) * 2 - 1.0),
diff).cuda() * noise_scale
known_bbox_expand = known_bbox_expand.clamp(min=0.0, max=1.0)
m = known_labels_expaned.long().to('cuda')
input_label_embed = self.label_enc[task](m)
input_bbox_embed = inverse_sigmoid(known_bbox_expand)
single_pad = int(max(known_num))
pad_size = int(single_pad * scalar)
padding_label = torch.zeros(pad_size, self.hidden_dim).cuda()
padding_bbox = torch.zeros(pad_size, 4).cuda()
if not refpoint_emb is None:
input_query_label = torch.cat([padding_label, tgt], dim=0).repeat(batch_size, 1, 1)
input_query_bbox = torch.cat([padding_bbox, refpoint_emb], dim=0).repeat(batch_size, 1, 1)
else:
input_query_label=padding_label.repeat(batch_size, 1, 1)
input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)
# map
map_known_indice = torch.tensor([]).to('cuda')
if len(known_num):
map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]
map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(scalar)]).long()
if len(known_bid):
input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed
input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed
tgt_size = pad_size + self.num_queries
attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0
# match query cannot see the reconstruct
attn_mask[pad_size:, :pad_size] = True
# reconstruct cannot see each other
for i in range(scalar):
if i == 0:
attn_mask[single_pad * i:single_pad * (i + 1), single_pad * (i + 1):pad_size] = True
if i == scalar - 1:
attn_mask[single_pad * i:single_pad * (i + 1), :single_pad * i] = True
else:
attn_mask[single_pad * i:single_pad * (i + 1), single_pad * (i + 1):pad_size] = True
attn_mask[single_pad * i:single_pad * (i + 1), :single_pad * i] = True
mask_dict = {
'known_indice': torch.as_tensor(known_indice).long(),
'batch_idx': torch.as_tensor(batch_idx).long(),
'map_known_indice': torch.as_tensor(map_known_indice).long(),
'known_lbs_bboxes': (known_labels, known_bboxs),
'know_idx': know_idx,
'pad_size': pad_size,
'scalar': scalar,
}
else:
if not refpoint_emb is None:
input_query_label = tgt.repeat(batch_size, 1, 1)
input_query_bbox = refpoint_emb.repeat(batch_size, 1, 1)
else:
input_query_label=None
input_query_bbox=None
attn_mask = None
mask_dict=None
# 100*batch*256
if not input_query_bbox is None:
input_query_label = input_query_label
input_query_bbox = input_query_bbox
return input_query_label,input_query_bbox,attn_mask,mask_dict
def dn_post_process(self,outputs_class,outputs_score,outputs_coord,mask_dict,outputs_mask):
"""
post process of dn after output from the transformer
put the dn part in the mask_dict
"""
assert mask_dict['pad_size'] > 0
output_known_class = outputs_class[:, :, :mask_dict['pad_size'], :]
outputs_class = outputs_class[:, :, mask_dict['pad_size']:, :]
output_known_score = outputs_score[:, :, :mask_dict['pad_size'], :]
outputs_score = outputs_score[:, :, mask_dict['pad_size']:, :]
output_known_coord = outputs_coord[:, :, :mask_dict['pad_size'], :]
outputs_coord = outputs_coord[:, :, mask_dict['pad_size']:, :]
if outputs_mask is not None:
output_known_mask = outputs_mask[:, :, :mask_dict['pad_size'], :]
outputs_mask = outputs_mask[:, :, mask_dict['pad_size']:, :]
out = {'pred_logits': output_known_class[-1], 'pred_scores':output_known_score[-1],'pred_boxes': output_known_coord[-1],'pred_masks': output_known_mask[-1]}
out['aux_outputs'] = self._set_aux_loss(output_known_class, output_known_score, output_known_mask, output_known_coord)
mask_dict['output_known_lbs_bboxes']=out
return outputs_class, outputs_score, outputs_coord, outputs_mask
def get_valid_ratio(self, mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def pred_box(self, reference, hs, ref0=None):
"""
:param reference: reference box coordinates from each decoder layer
:param hs: content
:param ref0: whether there are prediction from the first layer
"""
device = reference[0].device
if ref0 is None:
outputs_coord_list = []
else:
outputs_coord_list = [ref0.to(device)]
for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)):
layer_delta_unsig = layer_bbox_embed(layer_hs).to(device)
layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig).to(device)
layer_outputs_unsig = layer_outputs_unsig.sigmoid()
outputs_coord_list.append(layer_outputs_unsig)
outputs_coord_list = torch.stack(outputs_coord_list)
return outputs_coord_list
def forward(self, x, mask_features, extra, task, masks, targets=None):
"""
:param x: input, a list of multi-scale feature
:param mask_features: is the per-pixel embeddings with resolution 1/4 of the original image,
obtained by fusing backbone encoder encoded features. This is used to produce binary masks.
:param masks: mask in the original image
:param targets: used for denoising training
"""
if 'spatial_query_pos_mask' in extra:
visual_P = True
else:
visual_P = False
assert len(x) == self.num_feature_levels
device = x[0].device
size_list = []
# disable mask, it does not affect performance
enable_mask = 0
if masks is not None:
for src in x:
if src.size(2) % 32 or src.size(3) % 32:
enable_mask = 1
if enable_mask == 0:
masks = [torch.zeros((src.size(0), src.size(2), src.size(3)), device=src.device, dtype=torch.bool) for src in x]
src_flatten = []
mask_flatten = []
spatial_shapes = []
for i in range(self.num_feature_levels):
idx=self.num_feature_levels-1-i
bs, c , h, w=x[idx].shape
size_list.append(x[i].shape[-2:])
spatial_shapes.append(x[idx].shape[-2:])
src_flatten.append(self.input_proj[idx](x[idx]).flatten(2).transpose(1, 2))
mask_flatten.append(masks[i].flatten(1))
src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw}
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
predictions_federate = []
predictions_score = []
predictions_class = []
predictions_mask = []
if self.two_stage: | output_memory, output_proposals = gen_encoder_output_proposals(src_flatten, mask_flatten, spatial_shapes) | 3 | 2023-12-15 01:12:36+00:00 | 12k |
SHI-Labs/VCoder | vcoder_llava/train/vcoder_it.py | [
{
"identifier": "IGNORE_INDEX",
"path": "vcoder_llava/constants.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "vcoder_llava/constants.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\""
},
{
"identifier": "DEFAULT_SEG_TOKEN",
"path": "vcoder_llava/constants.py",
"snippet": "DEFAULT_SEG_TOKEN = \"<seg>\""
},
{
"identifier": "LLaVATrainer",
"path": "vcoder_llava/train/llava_trainer.py",
"snippet": "class LLaVATrainer(Trainer):\n\n def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:\n if self.train_dataset is None or not has_length(self.train_dataset):\n return None\n\n if self.args.group_by_modality_length:\n lengths = self.train_dataset.modality_lengths\n return LengthGroupedSampler(\n self.args.train_batch_size,\n world_size=self.args.world_size * self.args.gradient_accumulation_steps,\n lengths=lengths,\n group_by_modality=True,\n )\n else:\n return super()._get_train_sampler()\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n \"\"\"\n if is_sagemaker_mp_enabled():\n return super().create_optimizer()\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n return super().create_optimizer()\n\n opt_model = self.model\n\n if self.optimizer is None:\n decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)\n decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n if self.args.mm_projector_lr is not None:\n projector_parameters = [name for name, _ in opt_model.named_parameters() if \"mm_projector\" in name]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n in decay_parameters and n not in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n not in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": self.args.weight_decay,\n \"lr\": self.args.mm_projector_lr,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n \"lr\": self.args.mm_projector_lr,\n },\n ]\n else:\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)\n\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n self.optimizer = OSS(\n params=optimizer_grouped_parameters,\n optim=optimizer_cls,\n **optimizer_kwargs,\n )\n else:\n self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n if optimizer_cls.__name__ == \"Adam8bit\":\n import bitsandbytes\n\n manager = bitsandbytes.optim.GlobalOptimManager.get_instance()\n\n skipped = 0\n for module in opt_model.modules():\n if isinstance(module, nn.Embedding):\n skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())\n logger.info(f\"skipped {module}: {skipped/2**20}M params\")\n manager.register_module_override(module, \"weight\", {\"optim_bits\": 32})\n logger.debug(f\"bitsandbytes: will optimize {module} in fp32\")\n logger.info(f\"skipped: {skipped/2**20}M params\")\n\n return self.optimizer\n\n def _save_checkpoint(self, model, trial, metrics=None):\n if getattr(self.args, 'tune_mm_mlp_adapter', False):\n from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n\n run_dir = self._get_output_dir(trial=trial)\n output_dir = os.path.join(run_dir, checkpoint_folder)\n\n # Only save Adapter\n keys_to_match = ['mm_projector', 'vision_resampler']\n if getattr(self.args, \"use_im_start_end\", False):\n keys_to_match.extend(['embed_tokens', 'embed_in'])\n\n weight_to_save = get_mm_adapter_state_maybe_zero_3(self.model.named_parameters(), keys_to_match)\n\n if self.args.local_rank == 0 or self.args.local_rank == -1:\n self.model.config.save_pretrained(output_dir)\n torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))\n else:\n super(LLaVATrainer, self)._save_checkpoint(model, trial, metrics)\n\n def _save(self, output_dir: Optional[str] = None, state_dict=None):\n if getattr(self.args, 'tune_mm_mlp_adapter', False):\n pass\n else:\n super(LLaVATrainer, self)._save(output_dir, state_dict)"
},
{
"identifier": "vcoder_conversation",
"path": "vcoder_llava/vcoder_conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass VCoderConversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = seg.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = depth.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = seg.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = depth.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def get_segs(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def get_depths(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):"
},
{
"identifier": "tokenizer_image_token",
"path": "vcoder_llava/mm_utils.py",
"snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "tokenizer_seg_token",
"path": "vcoder_llava/mm_utils.py",
"snippet": "def tokenizer_seg_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, seg_token_index=SEG_TOKEN_INDEX, return_tensors=None): \n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<seg>\\n<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n \n for x in insert_separator(prompt_chunks, [seg_token_index, image_token_index] * (offset + 1)):\n if seg_token_index in x:\n input_ids.extend(x[offset:-1])\n else:\n input_ids.extend(x[offset:])\n \n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "get_peft_state_maybe_zero_3",
"path": "vcoder_llava/train/train.py",
"snippet": "def get_peft_state_maybe_zero_3(named_params, bias):\n if bias == \"none\":\n to_return = {k: t for k, t in named_params if \"lora_\" in k}\n elif bias == \"all\":\n to_return = {k: t for k, t in named_params if \"lora_\" in k or \"bias\" in k}\n elif bias == \"lora_only\":\n to_return = {}\n maybe_lora_bias = {}\n lora_bias_names = set()\n for k, t in named_params:\n if \"lora_\" in k:\n to_return[k] = t\n bias_name = k.split(\"lora_\")[0] + \"bias\"\n lora_bias_names.add(bias_name)\n elif \"bias\" in k:\n maybe_lora_bias[k] = t\n for k, t in maybe_lora_bias:\n if bias_name in lora_bias_names:\n to_return[bias_name] = t\n else:\n raise NotImplementedError\n to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()}\n return to_return"
},
{
"identifier": "get_peft_state_non_lora_maybe_zero_3",
"path": "vcoder_llava/train/train.py",
"snippet": "def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):\n to_return = {k: t for k, t in named_params if \"lora_\" not in k}\n if require_grad_only:\n to_return = {k: t for k, t in to_return.items() if t.requires_grad}\n to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}\n return to_return"
},
{
"identifier": "find_all_linear_names",
"path": "vcoder_llava/train/train.py",
"snippet": "def find_all_linear_names(model):\n cls = torch.nn.Linear\n lora_module_names = set()\n multimodal_keywords = ['mm_projector', 'vision_tower', 'vision_resampler']\n for name, module in model.named_modules():\n if any(mm_keyword in name for mm_keyword in multimodal_keywords):\n continue\n if isinstance(module, cls):\n names = name.split('.')\n lora_module_names.add(names[0] if len(names) == 1 else names[-1])\n\n if 'lm_head' in lora_module_names: # needed for 16-bit\n lora_module_names.remove('lm_head')\n return list(lora_module_names)"
},
{
"identifier": "SEMANTIC_QUESTIONS",
"path": "vcoder_llava/questions.py",
"snippet": "SEMANTIC_QUESTIONS = [\n \"What objects can be seen in the image? Perceive as done for semantic segmentation.\",\n \"What items are depicted in the picture? Consider in terms of semantic segmentation.\",\n \"Which elements are present in the visual? Analyze as you would for semantic segmentation.\",\n \"Can you identify the objects in the image? Think from a semantic segmentation perspective.\",\n \"What are the components visible in the graphic? Examine as if segmenting semantically.\",\n \"Which entities can be spotted in the photo? View through the lens of semantic segmentation.\",\n \"What are the discernible objects in the snapshot? Envision in relation to semantic segmentation.\",\n \"What elements stand out in the illustration? Reflect upon it as for semantic segmentation.\",\n \"Can you spot any items within the visual representation? Contemplate in a semantic segmentation context.\",\n \"What features are evident in this visual content? Analyze with semantic segmentation in mind.\",\n \"Which objects are noticeable in the image? Think of it in terms of semantic layers.\",\n \"How would you categorize the objects in this picture? As if you're doing semantic segmentation.\",\n \"What constituents can you recognize in the image? Ponder considering semantic segmentation.\",\n \"Which components can be distinguished in the photo? Evaluate as per semantic segmentation guidelines.\",\n \"What items in the image can you point out? Interpret with a semantic segmentation approach.\",\n \"Can you enumerate the objects present in this visual? Think semantically.\",\n \"What do you observe in the graphic? Consider its semantic segments.\",\n \"How many distinct objects can you identify in the visual? Keeping semantic segmentation in perspective.\",\n \"Which items are apparent in this depiction? Assess as one would for semantic segmentation.\",\n \"What are the visible entities within this image? Delve into it semantically.\",\n \"Can you discern specific objects in the portrayal? Approach it from a semantic segmentation standpoint.\",\n]"
},
{
"identifier": "INSTANCE_QUESTIONS",
"path": "vcoder_llava/questions.py",
"snippet": "INSTANCE_QUESTIONS = [\n \"What objects can be seen in the image? Perceive as done for instance segmentation\",\n \"What items are visible in the picture? Analyze as you would for instance segmentation.\",\n \"Which elements are present in the visual? Consider from an instance segmentation perspective.\",\n \"What are the distinguishable objects in the image? Think in terms of instance segmentation.\",\n \"Can you identify the entities in the graphic? Approach it with instance segmentation in mind.\",\n \"What components are apparent in the photo? Examine as if performing instance segmentation.\",\n \"Which items can be detected in the snapshot? View it through the lens of instance segmentation.\",\n \"What features stand out in the illustration? Reflect upon it as for instance segmentation.\",\n \"How would you describe the objects in this image? Keeping instance segmentation as a reference.\",\n \"What constituents are evident in the visual content? Think from an instance segmentation standpoint.\",\n \"Which objects can you spot in the depiction? Evaluate as per instance segmentation guidelines.\",\n \"What do you observe in the graphic? Contemplate with instance segmentation considerations.\",\n \"Can you discern specific entities in the visual? Approach it in the context of instance segmentation.\",\n \"Which components in the image catch your eye? Think of it in relation to instance layers.\",\n \"How many distinct items can you pinpoint in the photo? With an instance segmentation approach.\",\n \"What elements are noticeable in this portrayal? Analyze while considering instance segmentation.\",\n \"Can you list the objects present in the visual representation? Reflecting on instance segmentation.\",\n \"What items in the snapshot can you recognize? Interpret with an instance segmentation perspective.\",\n \"Which entities are discernible in this depiction? Delve into it from an instance segmentation angle.\",\n \"What are the components you can spot within the image? Think instance-wise.\",\n \"Can you detail the objects in the visual? Assess as one would for instance segmentation.\",\n]"
},
{
"identifier": "PANOPTIC_QUESTIONS",
"path": "vcoder_llava/questions.py",
"snippet": "PANOPTIC_QUESTIONS = [\n \"What objects can be seen in the image? Perceive as done for panoptic segmentation\",\n \"What items are evident in the picture? Analyze with a panoptic segmentation perspective.\",\n \"Which elements emerge in the visual? Think in terms of panoptic segmentation.\",\n \"What are the discernible objects in the graphic? Approach it from a panoptic segmentation viewpoint.\",\n \"Can you identify the entities within the image? Consider it as you would for panoptic segmentation.\",\n \"What components stand out in the photo? Examine with panoptic segmentation in mind.\",\n \"Which items are detectable in the snapshot? Reflect upon it with panoptic segmentation considerations.\",\n \"What features can be observed in the illustration? View through the lens of panoptic segmentation.\",\n \"How would you describe the objects in this depiction? Keeping panoptic segmentation as a reference.\",\n \"What constituents are visible in the visual content? Think from a panoptic segmentation standpoint.\",\n \"Which objects can you pinpoint in the image? Evaluate as per panoptic segmentation guidelines.\",\n \"What do you perceive in the graphic? Delve into it with panoptic segmentation insights.\",\n \"Can you spot specific components in the visual? Contextualize with panoptic segmentation.\",\n \"What items in the portrayal catch your attention? Think in relation to panoptic layers.\",\n \"How many distinct entities can you recognize in the photo? With a panoptic segmentation approach.\",\n \"What elements are present in this visual? Analyze while keeping panoptic segmentation in mind.\",\n \"Can you list the objects depicted in the visual representation? Reflecting on panoptic segmentation.\",\n \"Which features in the image can you discern? Interpret considering panoptic segmentation.\",\n \"What are the components evident in this depiction? Approach it using a panoptic segmentation angle.\",\n \"What items can you detect in the visual content? Think panoptically.\",\n \"Can you detail the entities present in the image? Assess as one would when considering panoptic segmentation.\",\n]"
}
] | import os
import copy
import pathlib
import numpy as np
import random
import torch
import transformers
import json
import re
from dataclasses import dataclass, field
from typing import Dict, Optional, Sequence
from vcoder_llava.constants import IGNORE_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_SEG_TOKEN
from torch.utils.data import Dataset
from vcoder_llava.train.llava_trainer import LLaVATrainer
from vcoder_llava import vcoder_conversation as conversation_lib
from vcoder_llava.model import *
from vcoder_llava.mm_utils import tokenizer_image_token, tokenizer_seg_token
from .train import (
get_peft_state_maybe_zero_3,
get_peft_state_non_lora_maybe_zero_3,
find_all_linear_names,
)
from vcoder_llava.questions import SEMANTIC_QUESTIONS, INSTANCE_QUESTIONS, PANOPTIC_QUESTIONS
from PIL import Image
from transformers import BitsAndBytesConfig
from peft import prepare_model_for_kbit_training
from peft import LoraConfig, get_peft_model
from peft.tuners.lora import LoraLayer | 7,214 | word_found = True
else:
# Remove any preceding punctuation if it's just before this word
if i > 0 and tokens[i-1] in {',', '.'}:
result_tokens.pop()
else:
result_tokens.append(token)
# Join tokens and clean up spaces before punctuation
result_text = ' '.join(result_tokens)
result_text = re.sub(r'\s([,.](?:\s|$))', r'\1', result_text)
return result_text
with open(file_path) as f:
lines = f.readlines()
seg_labels = {}
for line in lines:
key = line.split("<IMG>")[1].strip("\n")
label = line.split("<IMG>")[2].strip("\n")
label = _remove_specific_word(label, "wall")
label = _remove_specific_word(label, "window")
seg_labels[key] = label
return seg_labels
def obtain_seg_data_splits(data_args):
def _get_labels(folder):
return _obtain_seg_texts(os.path.join(data_args.seg_image_folder, folder, "panoptic.txt"))
list_data_dict = []
data_dict = json.load(open(data_args.data_path, "r"))
for l in data_dict:
if "image" in l.keys():
if os.path.exists(os.path.join(data_args.image_folder, l["image"])):
prob_add_seg = np.random.uniform(0,1.)
if prob_add_seg > 0.5:
l["seg"] = l["image"].split("/")[-1]
if "coco" in l["image"]:
l["seg_folder"] = "coco_segm_text/train/panoptic_inference"
elif "gqa" in l["image"]:
l["seg_folder"] = "gqa/seg_images/panoptic_inference"
elif "VG_100K_2" in l["image"]:
l["seg_folder"] = "vg/vg/SEG_VG_100K_2/panoptic_inference"
elif "VG_100K" in l["image"]:
l["seg_folder"] = "vg/vg/SEG_VG_100K/panoptic_inference"
elif "ocr_vqa" in l["image"]:
l["seg_folder"] = "ocr_vqa/seg_images/panoptic_inference"
if "textvqa" in l["image"]:
l["seg_folder"] = "textvqa/seg_images/panoptic_inference"
conversations = []
for c in l["conversations"]:
if "<image>" in c["value"]:
c["value"] = c["value"].replace("<image>", "<image>\n<seg>")
conversations.append(c)
l["conversations"] = conversations
list_data_dict.append(l)
else:
list_data_dict.append(l)
labels_dict = {
"coco_segm_text/train": _get_labels("coco_segm_text/train/"),
"gqa/seg_images": _get_labels("gqa/seg_images/"),
"vg/vg/SEG_VG_100K": _get_labels("vg/vg/SEG_VG_100K/"),
"vg/vg/SEG_VG_100K_2": _get_labels("vg/vg/SEG_VG_100K_2/"),
"ocr_vqa/seg_images": _get_labels("ocr_vqa/seg_images"),
"textvqa/seg_images": _get_labels("textvqa/seg_images/"),
}
final_list_data_dict = []
for l in list_data_dict:
if "seg" in l.keys():
prob_add = np.random.uniform(0,1.)
if prob_add > 0.7:
labels = labels_dict[l["seg_folder"].split("/panoptic_inference")[0]]
conversations = l["conversations"]
even_indices = list(range(2, len(conversations) + 1, 2))
random_even_index = random.choice(even_indices)
question_prob = np.random.uniform(0,1.)
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
question = random.choice(PANOPTIC_QUESTIONS)
conv = [{
"from": "human",
"value": question
},
{
"from": "gpt",
"value": labels[l["seg"]]
}]
final_conversations = conversations[:random_even_index] + conv + conversations[random_even_index:]
l["conversations"] = final_conversations
final_list_data_dict.append(l)
return final_list_data_dict
def get_object_data_split(data_args):
list_data_dict = []
for bucket in ["train", "unlabeled", "test"]:
panoptic_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "panoptic.txt"))
semantic_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "semantic.txt"))
instance_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "instance.txt"))
for key in panoptic_labels.keys():
assert key in semantic_labels.keys() and key in instance_labels.keys(), "Instance, semantic, and panoptic labels should have the same keys."
prob_task = np.random.uniform(0,1.)
question_prob = np.random.uniform(0,1.)
if prob_task < 0.33:
answer = semantic_labels[key]
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
question = random.choice(SEMANTIC_QUESTIONS)
seg_folder = "semantic_inference"
elif prob_task < 0.66:
answer = instance_labels[key]
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
| # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
local_rank = None
def rank0_print(*args):
if local_rank == 0:
print(*args)
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
version: Optional[str] = field(default="v0")
freeze_backbone: bool = field(default=False)
tune_mm_mlp_adapter: bool = field(default=False)
vision_tower: Optional[str] = field(default=None)
mm_vision_select_layer: Optional[int] = field(default=-2) # default to the last layer
mm_projector_type: Optional[str] = field(default='linear')
pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
use_mm2_proj: bool = field(default=False)
pretrain_mm2_mlp_adapter: Optional[str] = field(default=None)
seg_tune_adapter: bool = field(default=False)
mm_seg_select_layer: Optional[int] = field(default=-2) # default to the last layer
seg_mm_projector_type: Optional[str] = field(default='linear')
mm_vision_select_feature: Optional[str] = field(default="patch")
mm_seg_select_feature: Optional[str] = field(default="patch")
@dataclass
class DataArguments:
data_path: str = field(default=None,
metadata={"help": "Path to the training data."})
seg_data_path: str = field(default=None,
metadata={"help": "Path to the seg training data."})
lazy_preprocess: bool = False
is_multimodal: bool = False
image_folder: Optional[str] = field(default=None)
seg_image_folder: Optional[str] = field(default=None)
image_aspect_ratio: str = 'square'
image_grid_pinpoints: Optional[str] = field(default=None)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
remove_unused_columns: bool = field(default=False)
freeze_mm_mlp_adapter: bool = field(default=False)
freeze_seg_mm_mlp_adapter: bool = field(default=False)
mpt_attn_impl: Optional[str] = field(default="triton")
model_max_length: int = field(
default=512,
metadata={
"help":
"Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
double_quant: bool = field(
default=True,
metadata={"help": "Compress the quantization statistics through double quantization."}
)
quant_type: str = field(
default="nf4",
metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."}
)
bits: int = field(
default=16,
metadata={"help": "How many bits to use."}
)
lora_enable: bool = False
lora_r: int = 64
lora_alpha: int = 16
lora_dropout: float = 0.05
lora_weight_path: str = ""
lora_bias: str = "none"
mm_projector_lr: Optional[float] = None
group_by_modality_length: bool = field(default=False)
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
output_dir: str):
"""Collects the state dict and dump to disk."""
if trainer.deepspeed:
torch.cuda.synchronize()
trainer.save_model(output_dir)
return
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {
key: value.cpu()
for key, value in state_dict.items()
}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def vcoder_preprocess_v1(
sources,
tokenizer: transformers.PreTrainedTokenizer,
has_image: bool = False,
has_seg: bool = False
) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
if has_image and has_seg:
input_ids = torch.stack([tokenizer_seg_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
elif has_image:
input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
else:
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
assert conv.sep_style == conversation_lib.SeparatorStyle.TWO
# Mask targets
sep = conv.sep + conv.roles[1] + ": "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for i, rou in enumerate(rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
if has_image and has_seg:
round_len = len(tokenizer_seg_token(rou, tokenizer))
instruction_len = len(tokenizer_seg_token(parts[0], tokenizer)) - 2
elif has_image:
round_len = len(tokenizer_image_token(rou, tokenizer))
instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
else:
round_len = len(tokenizer(rou).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_INDEX
print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
)
def vcoder_preprocess_multimodal(
sources: Sequence[str],
data_args: DataArguments
) -> Dict:
is_multimodal = data_args.is_multimodal
if not is_multimodal:
return sources
for source in sources:
for sentence in source:
if DEFAULT_IMAGE_TOKEN in sentence['value']:
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
sentence['value'] = DEFAULT_IMAGE_TOKEN + '\n' + sentence['value']
sentence['value'] = sentence['value'].strip()
replace_token = DEFAULT_IMAGE_TOKEN
sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
if DEFAULT_SEG_TOKEN in sentence['value']:
sentence['value'] = sentence['value'].replace(DEFAULT_SEG_TOKEN, '').strip()
sentence['value'] = DEFAULT_SEG_TOKEN + '\n' + sentence['value']
sentence['value'] = sentence['value'].strip()
replace_token = DEFAULT_SEG_TOKEN
sentence["value"] = sentence["value"].replace(DEFAULT_SEG_TOKEN, replace_token)
return sources
def preprocess(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
has_image: bool = False,
has_seg: bool = False
) -> Dict:
"""
Given a list of sources, each is a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
if conversation_lib.default_conversation.version.startswith("v1"):
return vcoder_preprocess_v1(sources, tokenizer, has_image=has_image, has_seg=has_seg)
raise ValueError(f"Unknown conversation version: {conversation_lib.default_conversation.version}")
def _obtain_seg_texts(file_path):
def _remove_specific_word(text, word_to_remove):
tokens = re.findall(r'\b\w+\b|[,.]', text)
result_tokens = []
word_found = False
for i, token in enumerate(tokens):
if token == word_to_remove:
if not word_found:
# Keep the first occurrence and mark it as found
result_tokens.append(token)
word_found = True
else:
# Remove any preceding punctuation if it's just before this word
if i > 0 and tokens[i-1] in {',', '.'}:
result_tokens.pop()
else:
result_tokens.append(token)
# Join tokens and clean up spaces before punctuation
result_text = ' '.join(result_tokens)
result_text = re.sub(r'\s([,.](?:\s|$))', r'\1', result_text)
return result_text
with open(file_path) as f:
lines = f.readlines()
seg_labels = {}
for line in lines:
key = line.split("<IMG>")[1].strip("\n")
label = line.split("<IMG>")[2].strip("\n")
label = _remove_specific_word(label, "wall")
label = _remove_specific_word(label, "window")
seg_labels[key] = label
return seg_labels
def obtain_seg_data_splits(data_args):
def _get_labels(folder):
return _obtain_seg_texts(os.path.join(data_args.seg_image_folder, folder, "panoptic.txt"))
list_data_dict = []
data_dict = json.load(open(data_args.data_path, "r"))
for l in data_dict:
if "image" in l.keys():
if os.path.exists(os.path.join(data_args.image_folder, l["image"])):
prob_add_seg = np.random.uniform(0,1.)
if prob_add_seg > 0.5:
l["seg"] = l["image"].split("/")[-1]
if "coco" in l["image"]:
l["seg_folder"] = "coco_segm_text/train/panoptic_inference"
elif "gqa" in l["image"]:
l["seg_folder"] = "gqa/seg_images/panoptic_inference"
elif "VG_100K_2" in l["image"]:
l["seg_folder"] = "vg/vg/SEG_VG_100K_2/panoptic_inference"
elif "VG_100K" in l["image"]:
l["seg_folder"] = "vg/vg/SEG_VG_100K/panoptic_inference"
elif "ocr_vqa" in l["image"]:
l["seg_folder"] = "ocr_vqa/seg_images/panoptic_inference"
if "textvqa" in l["image"]:
l["seg_folder"] = "textvqa/seg_images/panoptic_inference"
conversations = []
for c in l["conversations"]:
if "<image>" in c["value"]:
c["value"] = c["value"].replace("<image>", "<image>\n<seg>")
conversations.append(c)
l["conversations"] = conversations
list_data_dict.append(l)
else:
list_data_dict.append(l)
labels_dict = {
"coco_segm_text/train": _get_labels("coco_segm_text/train/"),
"gqa/seg_images": _get_labels("gqa/seg_images/"),
"vg/vg/SEG_VG_100K": _get_labels("vg/vg/SEG_VG_100K/"),
"vg/vg/SEG_VG_100K_2": _get_labels("vg/vg/SEG_VG_100K_2/"),
"ocr_vqa/seg_images": _get_labels("ocr_vqa/seg_images"),
"textvqa/seg_images": _get_labels("textvqa/seg_images/"),
}
final_list_data_dict = []
for l in list_data_dict:
if "seg" in l.keys():
prob_add = np.random.uniform(0,1.)
if prob_add > 0.7:
labels = labels_dict[l["seg_folder"].split("/panoptic_inference")[0]]
conversations = l["conversations"]
even_indices = list(range(2, len(conversations) + 1, 2))
random_even_index = random.choice(even_indices)
question_prob = np.random.uniform(0,1.)
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
question = random.choice(PANOPTIC_QUESTIONS)
conv = [{
"from": "human",
"value": question
},
{
"from": "gpt",
"value": labels[l["seg"]]
}]
final_conversations = conversations[:random_even_index] + conv + conversations[random_even_index:]
l["conversations"] = final_conversations
final_list_data_dict.append(l)
return final_list_data_dict
def get_object_data_split(data_args):
list_data_dict = []
for bucket in ["train", "unlabeled", "test"]:
panoptic_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "panoptic.txt"))
semantic_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "semantic.txt"))
instance_labels = _obtain_seg_texts(os.path.join(data_args.seg_image_folder, "coco_segm_text", bucket, "instance.txt"))
for key in panoptic_labels.keys():
assert key in semantic_labels.keys() and key in instance_labels.keys(), "Instance, semantic, and panoptic labels should have the same keys."
prob_task = np.random.uniform(0,1.)
question_prob = np.random.uniform(0,1.)
if prob_task < 0.33:
answer = semantic_labels[key]
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else:
question = random.choice(SEMANTIC_QUESTIONS)
seg_folder = "semantic_inference"
elif prob_task < 0.66:
answer = instance_labels[key]
if question_prob > 0.90:
question = "What objects can be seen in the image?"
else: | question = random.choice(INSTANCE_QUESTIONS) | 11 | 2023-12-17 07:46:27+00:00 | 12k |
DeepWok/mase | machop/chop/models/manual/opt_lora/modeling_opt_lora.py | [
{
"identifier": "LoraLayer",
"path": "machop/chop/models/manual/lora_modules.py",
"snippet": "class LoraLayer:\n def __init__(self, in_features: int, out_features: int, **kwargs):\n self.r = {}\n self.lora_alpha = {}\n self.scaling = {}\n self.lora_dropout = nn.ModuleDict({})\n self.lora_A = nn.ModuleDict({})\n self.lora_B = nn.ModuleDict({})\n # For Embedding layer\n self.lora_embedding_A = nn.ParameterDict({})\n self.lora_embedding_B = nn.ParameterDict({})\n # Mark the weight as unmerged\n self.merged = False\n self.disable_adapter = False\n self.in_features = in_features\n self.out_features = out_features\n self.kwargs = kwargs\n init_lora_weights = bool(field(default=True))\n\n def update_layer(\n self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights\n ):\n self.r[adapter_name] = r\n self.lora_alpha[adapter_name] = lora_alpha\n if lora_dropout > 0.0:\n lora_dropout_layer = nn.Dropout(p=lora_dropout)\n else:\n lora_dropout_layer = nn.Identity()\n\n self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))\n # Actual trainable parameters\n if self.disable_adapter == False:\n if r > 0:\n self.lora_A.update(\n nn.ModuleDict(\n {adapter_name: nn.Linear(self.in_features, r, bias=False)}\n )\n )\n self.lora_B.update(\n nn.ModuleDict(\n {adapter_name: nn.Linear(r, self.out_features, bias=False)}\n )\n )\n self.scaling[adapter_name] = lora_alpha / r\n else:\n pass\n\n if init_lora_weights:\n self.reset_lora_parameters(adapter_name)\n self.to(self.weight.device)\n\n def reset_lora_parameters(self, adapter_name):\n if adapter_name in self.lora_A.keys():\n # initialize A the same way as the default for nn.Linear and B to zero\n nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_B[adapter_name].weight)\n if adapter_name in self.lora_embedding_A.keys():\n # initialize a the same way as the default for nn.linear and b to zero\n nn.init.zeros_(self.lora_embedding_A[adapter_name])\n nn.init.normal_(self.lora_embedding_B[adapter_name])"
},
{
"identifier": "LinearLora",
"path": "machop/chop/models/manual/lora_modules.py",
"snippet": "class LinearLora(nn.Linear, LoraLayer):\n # Lora implemented in a dense layer\n def __init__(\n self,\n in_features: int,\n out_features: int,\n config: dict = None,\n **kwargs,\n ):\n self.config = config\n init_lora_weights = self.config.get(\"init_lora_weights\", True)\n\n r, lora_alpha, lora_dropout, adapter_name, disable_adapter = (\n config[\"r\"],\n config[\"lora_alpha\"],\n config[\"lora_dropout\"],\n config[\"adapter_name\"],\n config[\"disable_adapter\"],\n )\n lora_dropout = float(lora_dropout)\n\n nn.Linear.__init__(self, in_features, out_features, **kwargs)\n LoraLayer.__init__(self, in_features=in_features, out_features=out_features)\n # Freezing the pre-trained weight matrix\n self.weight.requires_grad = False\n self.disable_adapter = disable_adapter\n self.fan_in_fan_out = config.get(\"fan_in_fan_out\", False)\n self.is_target_conv_1d_layer = config.get(\"is_target_conv_1d_layer\", False)\n\n if self.fan_in_fan_out:\n self.weight.data = self.weight.data.T\n\n nn.Linear.reset_parameters(self)\n self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)\n self.active_adapter = adapter_name\n self.is_target_conv_1d_layer = self.is_target_conv_1d_layer\n\n def merge(self):\n if self.active_adapter not in self.lora_A.keys():\n return\n if self.merged:\n warnings.warn(\"Already merged. Nothing to do.\")\n return\n if self.r[self.active_adapter] > 0:\n self.weight.data += self.get_delta_weight(self.active_adapter)\n self.merged = True\n\n def unmerge(self):\n if self.active_adapter not in self.lora_A.keys():\n return\n if not self.merged:\n warnings.warn(\"Already unmerged. Nothing to do.\")\n return\n if self.r[self.active_adapter] > 0:\n self.weight.data -= self.get_delta_weight(self.active_adapter)\n self.merged = False\n\n def get_delta_weight(self, adapter):\n return (\n transpose(\n self.lora_B[adapter].weight @ self.lora_A[adapter].weight,\n self.fan_in_fan_out,\n )\n * self.scaling[adapter]\n )\n\n def _linear(self, input: torch.Tensor) -> torch.Tensor:\n return F.linear(\n input, transpose(self.weight, self.fan_in_fan_out), bias=self.bias\n )\n\n def forward(self, x: torch.Tensor):\n previous_dtype = x.dtype\n\n if self.active_adapter not in self.lora_A.keys():\n return self._linear(x)\n\n if self.disable_adapter:\n if self.r[self.active_adapter] > 0 and self.merged:\n self.unmerge()\n result = self._linear(x)\n\n elif self.r[self.active_adapter] == 0 or self.merged:\n result = self._linear(x)\n\n else:\n lora_A = self.lora_A[self.active_adapter]\n lora_B = self.lora_B[self.active_adapter]\n dropout = self.lora_dropout[self.active_adapter]\n scaling = self.scaling[self.active_adapter]\n\n result = self._linear(x)\n x = x.to(lora_A.weight.dtype)\n result += lora_B(lora_A(dropout(x))) * scaling\n\n result = result.to(previous_dtype)\n\n return result\n\n def extract_lora_params(self):\n lora_params = {\n \"lora_A\": self.lora_A[self.active_adapter].state_dict(),\n \"lora_B\": self.lora_B[self.active_adapter].state_dict(),\n }\n\n return lora_params\n\n # Helper function to bias the training towards either the target module or the entire model"
},
{
"identifier": "OPTLoraConfig",
"path": "machop/chop/models/manual/opt_lora/configuration_opt_lora.py",
"snippet": "class OPTLoraConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model\n according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the OPT\n [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 50272):\n Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`OPTModel`]\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of decoder layers.\n ffn_dim (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in decoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer decoder.\n activation_function (`str` or `function`, *optional*, defaults to `\"relu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n do_layer_norm_before (`bool`, *optional*, defaults to `True`):\n Whether to perform layer normalization before the attention block.\n word_embed_proj_dim (`int`, *optional*):\n `word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to\n `hidden_size`.\n dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n layerdrop: (`float`, *optional*, defaults to 0.0):\n The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more\n details.\n init_std (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n enable_bias (`bool`, *optional*, defaults to `True`):\n Whether or not if the linear layers in the attention blocks should use the bias term.\n layer_norm_elementwise_affine (`bool`, *optional*, defaults to `True`):\n Whether or not if the layer norms should have learnable parameters.\n\n Example:\n\n ```python\n >>> from transformers import OPTConfig, OPTModel\n\n >>> # Initializing a OPT facebook/opt-large style configuration\n >>> configuration = OPTConfig()\n\n >>> # Initializing a model (with random weights) from the facebook/opt-large style configuration\n >>> model = OPTModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"opt\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=50272,\n hidden_size=768,\n num_hidden_layers=12,\n ffn_dim=3072,\n max_position_embeddings=2048,\n do_layer_norm_before=True,\n _remove_final_layer_norm=False,\n word_embed_proj_dim=None,\n dropout=0.1,\n attention_dropout=0.0,\n num_attention_heads=12,\n activation_function=\"relu\",\n layerdrop=0.0,\n init_std=0.02,\n use_cache=False,\n pad_token_id=1,\n bos_token_id=2,\n eos_token_id=2,\n enable_bias=True,\n layer_norm_elementwise_affine=True,\n lora_config: dict = None,\n **kwargs,\n ):\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n **kwargs,\n )\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.num_attention_heads = num_attention_heads\n self.word_embed_proj_dim = (\n word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size\n )\n self.ffn_dim = ffn_dim\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation_function = activation_function\n self.init_std = init_std\n self.layerdrop = layerdrop\n self.use_cache = use_cache\n self.do_layer_norm_before = do_layer_norm_before\n # We keep these variables at `True` for backward compatibility.\n self.enable_bias = enable_bias\n self.layer_norm_elementwise_affine = layer_norm_elementwise_affine\n if lora_config is not None:\n lora_config = parse_opt_lora_config(lora_config, num_hidden_layers)\n self.lora_config = lora_config\n\n # Note that the only purpose of `_remove_final_layer_norm` is to keep backward compatibility\n # with checkpoints that have been fine-tuned before transformers v4.20.1\n # see https://github.com/facebookresearch/metaseq/pull/164\n self._remove_final_layer_norm = _remove_final_layer_norm\n\n def __setattr__(self, key, value):\n if key == \"lora_config\" and value is not None:\n value = parse_opt_lora_config(\n config=value, num_hidden_layers=self.num_hidden_layers\n )\n return super().__setattr__(key, value)"
},
{
"identifier": "OPTAttention_attention_get_dtype_min",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attention_get_dtype_min(attn_weights: Tensor) -> Tensor:\n return torch.tensor(torch.finfo(attn_weights.dtype).min)"
},
{
"identifier": "OPTAttention_attention_mask_shape_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attention_mask_shape_check(\n attention_mask: Tensor, bsz: int, tgt_len: int, src_len: int\n) -> bool:\n if attention_mask.size() != (bsz, 1, tgt_len, src_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n )"
},
{
"identifier": "OPTAttention_attn_output_shape_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attn_output_shape_check(\n attn_output: Tensor, bsz: int, num_heads: int, tgt_len: int, head_dim: int\n) -> bool:\n if attn_output.size() != (bsz * num_heads, tgt_len, head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, num_heads, tgt_len, head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )"
},
{
"identifier": "OPTAttention_attn_weight_dtype_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attn_weight_dtype_check(attn_weights: Tensor) -> bool:\n assert attn_weights.dtype != torch.float16, \"FP16 is not supported for OPTAttention\""
},
{
"identifier": "OPTAttention_attn_weights_shape_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_attn_weights_shape_check(\n attn_weights: Tensor, bsz: int, num_heads: int, tgt_len: int, src_len: int\n) -> bool:\n if attn_weights.size() != (bsz * num_heads, tgt_len, src_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz * num_heads, tgt_len, src_len)}, but is\"\n f\" {attn_weights.size()}\"\n )"
},
{
"identifier": "OPTAttention_layer_head_mask_shape_check",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_layer_head_mask_shape_check(\n layer_head_mask: Tensor, num_heads: int\n) -> bool:\n if layer_head_mask.size() != (num_heads,):\n raise ValueError(\n f\"Head mask for a single layer should be of size {(num_heads,)}, but is\"\n f\" {layer_head_mask.size()}\"\n )"
},
{
"identifier": "OPTAttention_reshape_qkv_back_for_bmm",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_reshape_qkv_back_for_bmm(\n query_states: Tensor,\n key_states: Tensor,\n value_states: Tensor,\n proj_shape: int,\n tgt_len: int,\n bsz: int,\n num_heads: int,\n head_dim: int,\n) -> Tuple[Tensor]:\n query_states = OPTAttention_self_shape(\n query_states, tgt_len, bsz, num_heads, head_dim\n ).view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n return query_states, key_states, value_states"
},
{
"identifier": "OPTAttention_self_shape",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTAttention_self_shape(\n tensor: Tensor, seq_len: int, bsz: int, num_heads: int, head_dim: int\n) -> Tensor:\n \"\"\"\n reshape and permute the Tensor for matmul\n [B, N, h*d_head] -> [B, N, h, d_head] -> [B, h, N, d_head]\n\n replaces `OPTAttention._shape` method\n \"\"\"\n return tensor.view(bsz, seq_len, num_heads, head_dim).transpose(1, 2).contiguous()"
},
{
"identifier": "OPTDecoder_check_head_mask",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTDecoder_check_head_mask(head_mask, decoder_layers) -> bool:\n for attn_mask, mask_name in zip([head_mask], [\"head_mask\"]):\n if attn_mask is not None:\n if attn_mask.size()[0] != (len(decoder_layers)):\n raise ValueError(\n f\"The `{mask_name}` should be specified for {len(decoder_layers)} layers, but it is for\"\n f\" {head_mask.size()[0]}.\"\n )"
},
{
"identifier": "OPTDecoder_self_prepare_decoder_attention",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTDecoder_self_prepare_decoder_attention(\n attention_mask: Tensor,\n input_shape,\n inputs_embeds: Tensor,\n past_key_values_length: int,\n) -> Tensor:\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(\n input_shape,\n inputs_embeds.dtype,\n past_key_values_length=past_key_values_length,\n ).to(inputs_embeds.device)\n\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n expanded_attn_mask = _expand_mask(\n attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]\n ).to(inputs_embeds.device)\n combined_attention_mask = (\n expanded_attn_mask\n if combined_attention_mask is None\n else expanded_attn_mask + combined_attention_mask\n )\n\n return combined_attention_mask"
},
{
"identifier": "OPTForCasualLM_compute_loss",
"path": "machop/chop/models/manual/opt_lora/utils_opt.py",
"snippet": "def OPTForCasualLM_compute_loss(logits, labels, self_config_vocab_size):\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss = torch.nn.functional.cross_entropy(\n shift_logits.view(-1, self_config_vocab_size), shift_labels.view(-1)\n )\n # loss = self_loss_fct(\n # shift_logits.view(-1, self_config_vocab_size), shift_labels.view(-1)\n # )\n return loss"
}
] | import random
import torch
import torch.utils.checkpoint
from typing import Optional, Tuple, Union
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging, replace_return_docstrings
from ..lora_modules import LoraLayer, LinearLora
from .configuration_opt_lora import OPTLoraConfig
from .utils_opt import (
OPTAttention_attention_get_dtype_min,
OPTAttention_attention_mask_shape_check,
OPTAttention_attn_output_shape_check,
OPTAttention_attn_weight_dtype_check,
OPTAttention_attn_weights_shape_check,
OPTAttention_layer_head_mask_shape_check,
OPTAttention_reshape_qkv_back_for_bmm,
OPTAttention_self_shape,
OPTDecoder_check_head_mask,
OPTDecoder_self_prepare_decoder_attention,
OPTForCasualLM_compute_loss,
) | 7,203 | self.project_out = nn.Linear(
config.hidden_size, config.word_embed_proj_dim, bias=False
)
else:
self.project_out = None
if config.word_embed_proj_dim != config.hidden_size:
self.project_in = nn.Linear(
config.word_embed_proj_dim, config.hidden_size, bias=False
)
else:
self.project_in = None
# Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
# with checkpoints that have been fine-tuned before transformers v4.20.1
# see https://github.com/facebookresearch/metaseq/pull/164
if config.do_layer_norm_before and not config._remove_final_layer_norm:
self.final_layer_norm = nn.LayerNorm(
config.hidden_size,
elementwise_affine=config.layer_norm_elementwise_affine,
)
else:
self.final_layer_norm = None
self.layers = nn.ModuleList(
[OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def forward(
self,
input_ids: torch.LongTensor,
attention_mask: torch.Tensor = None,
head_mask: Optional[torch.Tensor] = None,
# inputs_embeds: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = True,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
) -> Union[Tuple, BaseModelOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
"""
return_dict = self.config.return_dict if return_dict is None else return_dict
output_attentions = (
self.config.output_attentions
if output_attentions is None
else output_attentions
)
output_hidden_states = (
self.config.output_hidden_states
if output_hidden_states is None
else output_hidden_states
)
input_shape = input_ids.shape
input_ids = input_ids.view(-1, input_shape[-1])
# input_ids = OPTDecoder_view_input_ids(
# input_ids=input_ids, input_shape=input_shape
# )
past_key_values_length = 0
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
# TODO: check this?
if attention_mask is None:
attention_mask = torch.ones(
inputs_embeds.shape[:2], dtype=torch.bool, device=inputs_embeds.device
)
pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
attention_mask = OPTDecoder_self_prepare_decoder_attention(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
if self.project_in is not None:
inputs_embeds = self.project_in(inputs_embeds)
hidden_states = inputs_embeds + pos_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
| # coding=utf-8
# ----------------------------------------------
# This is a traceable version of OPTModel and OPTForCausalLanguageModeling
# modified code based on HuggingFace's opt
# ----------------------------------------------
# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch OPT model."""
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/opt-350m"
_CONFIG_FOR_DOC = "OPTLoraConfig"
# Base model docstring
_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/opt-125m",
"facebook/opt-350m",
"facebook/opt-1.3b",
"facebook/opt-2.7b",
"facebook/opt-6.7b",
"facebook/opt-13b",
"facebook/opt-30b",
# See all OPT models at https://huggingface.co/models?filter=opt
]
class OPTLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self, attention_mask: torch.LongTensor, past_key_values_length: int = 0
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return super().forward(positions + self.offset)
class OPTAttention(nn.Module):
"""
- FX-traceable Multi-headed attention from 'Attention Is All You Need' paper
- This module includes multi-head (k, q, v linear, attention), concat, and attention output linear
- To make this module traceable, `mode` must be one of integer 0, 1, 2, or 3.
- The default mode `None` (un-traceable mode) can be used for training (testing), but not for modify-sw.
"""
custom_node_leaf_patch = [
("embeddings", "BertEmbeddingsPatched", OPTLearnedPositionalEmbedding)
]
def __init__(
self,
config: OPTLoraConfig,
embed_dim: int,
num_heads: int,
layer_id: int = 0,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = False,
):
super().__init__()
self.config = config
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
lora_config = config.lora_config[f"model_layer_{layer_id}"]["self_attn"]
self.k_proj = LinearLora(
in_features=embed_dim,
out_features=embed_dim,
bias=bias,
config=lora_config["k_proj"],
)
self.v_proj = LinearLora(
in_features=embed_dim,
out_features=embed_dim,
bias=bias,
config=lora_config["v_proj"],
)
self.q_proj = LinearLora(
in_features=embed_dim,
out_features=embed_dim,
bias=bias,
config=lora_config["q_proj"],
)
self.o_proj = LinearLora(
in_features=embed_dim,
out_features=embed_dim,
bias=bias,
config=lora_config["o_proj"],
)
self.lora_config = lora_config
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
# key_value_states: Optional[torch.Tensor] = None,
# past_key_value: Optional[Tuple[torch.Tensor]] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, _ = hidden_states.shape
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# self_attention
# key_value_states is None, past_key_value is None
key_states = OPTAttention_self_shape(
self.k_proj(hidden_states),
seq_len=-1,
bsz=bsz,
num_heads=self.num_heads,
head_dim=self.head_dim,
)
value_states = OPTAttention_self_shape(
self.v_proj(hidden_states),
seq_len=-1,
bsz=bsz,
num_heads=self.num_heads,
head_dim=self.head_dim,
)
# proj_shape = OPTAttention_construct_proj_shape(
# bsz, self.num_heads, self.head_dim
# )
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states, key_states, value_states = OPTAttention_reshape_qkv_back_for_bmm(
query_states,
key_states,
value_states,
proj_shape=proj_shape,
tgt_len=tgt_len,
bsz=bsz,
num_heads=self.num_heads,
head_dim=self.head_dim,
)
src_len = key_states.shape[1]
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
OPTAttention_attn_weights_shape_check(
attn_weights, bsz, self.num_heads, tgt_len, src_len
)
if attention_mask is not None:
OPTAttention_attention_mask_shape_check(
attention_mask, bsz, tgt_len, src_len
)
attn_weights = (
attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attention_mask
)
attn_weights = torch.max(
attn_weights, OPTAttention_attention_get_dtype_min(attn_weights)
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# Patched OPTAttention does not support FP16
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
OPTAttention_attn_weight_dtype_check(attn_weights)
# *: Currently this model does not support torch.float16
# if attn_weights.dtype == torch.float16:
# attn_weights = nn.functional.softmax(
# attn_weights, dim=-1, dtype=torch.float32
# ).to(torch.float16)
# else:
# attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
OPTAttention_layer_head_mask_shape_check(layer_head_mask, self.num_heads)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights_reshaped.view(
bsz * self.num_heads, tgt_len, src_len
)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
)
attn_output = torch.bmm(attn_probs, value_states)
OPTAttention_attn_output_shape_check(
attn_output, bsz, self.num_heads, tgt_len, self.head_dim
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights_reshaped
class OPTDecoderLayer(nn.Module):
def __init__(self, config: OPTLoraConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = OPTAttention(
config=config,
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
bias=config.enable_bias,
)
self.do_layer_norm_before = config.do_layer_norm_before
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.self_attn_layer_norm = nn.LayerNorm(
self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine
)
self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=config.enable_bias)
self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=config.enable_bias)
self.final_layer_norm = nn.LayerNorm(
self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# *: key_value_states is always None
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
# past_key_value=None,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
# key_value_states=None,
)
hidden_states = nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
hidden_states = residual + hidden_states
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Fully Connected
hidden_states_shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, hidden_states.shape[-1])
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
hidden_states = (residual + hidden_states).view(hidden_states_shape)
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
class OPTPreTrainedModel(PreTrainedModel):
config_class = OPTLoraConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["OPTDecoderLayer"]
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, OPTDecoder):
module.gradient_checkpointing = value
class OPTDecoder(OPTPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`]
Args:
config: OPTConfig
"""
custom_node_leaf_patch = [
(
"embed_positions",
"OPTLearnedPositionalEmbedding",
OPTLearnedPositionalEmbedding,
)
]
def __init__(self, config: OPTLoraConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(
config.vocab_size, config.word_embed_proj_dim, self.padding_idx
)
self.embed_positions = OPTLearnedPositionalEmbedding(
config.max_position_embeddings, config.hidden_size
)
if config.word_embed_proj_dim != config.hidden_size:
self.project_out = nn.Linear(
config.hidden_size, config.word_embed_proj_dim, bias=False
)
else:
self.project_out = None
if config.word_embed_proj_dim != config.hidden_size:
self.project_in = nn.Linear(
config.word_embed_proj_dim, config.hidden_size, bias=False
)
else:
self.project_in = None
# Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
# with checkpoints that have been fine-tuned before transformers v4.20.1
# see https://github.com/facebookresearch/metaseq/pull/164
if config.do_layer_norm_before and not config._remove_final_layer_norm:
self.final_layer_norm = nn.LayerNorm(
config.hidden_size,
elementwise_affine=config.layer_norm_elementwise_affine,
)
else:
self.final_layer_norm = None
self.layers = nn.ModuleList(
[OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def forward(
self,
input_ids: torch.LongTensor,
attention_mask: torch.Tensor = None,
head_mask: Optional[torch.Tensor] = None,
# inputs_embeds: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = True,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
) -> Union[Tuple, BaseModelOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
"""
return_dict = self.config.return_dict if return_dict is None else return_dict
output_attentions = (
self.config.output_attentions
if output_attentions is None
else output_attentions
)
output_hidden_states = (
self.config.output_hidden_states
if output_hidden_states is None
else output_hidden_states
)
input_shape = input_ids.shape
input_ids = input_ids.view(-1, input_shape[-1])
# input_ids = OPTDecoder_view_input_ids(
# input_ids=input_ids, input_shape=input_shape
# )
past_key_values_length = 0
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
# TODO: check this?
if attention_mask is None:
attention_mask = torch.ones(
inputs_embeds.shape[:2], dtype=torch.bool, device=inputs_embeds.device
)
pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
attention_mask = OPTDecoder_self_prepare_decoder_attention(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
if self.project_in is not None:
inputs_embeds = self.project_in(inputs_embeds)
hidden_states = inputs_embeds + pos_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired | OPTDecoder_check_head_mask(head_mask, self.layers) | 11 | 2023-12-18 12:50:53+00:00 | 12k |
byeongjun-park/HarmonyView | ldm/models/diffusion/sync_dreamer.py | [
{
"identifier": "read_pickle",
"path": "ldm/base_utils.py",
"snippet": "def read_pickle(pkl_path):\n with open(pkl_path, 'rb') as f:\n return pickle.load(f)"
},
{
"identifier": "concat_images_list",
"path": "ldm/base_utils.py",
"snippet": "def concat_images_list(*args,vert=False):\n if len(args)==1: return args[0]\n img_out=args[0]\n for img in args[1:]:\n img_out=concat_images(img_out,img,vert)\n return img_out"
},
{
"identifier": "get_warp_coordinates",
"path": "ldm/models/diffusion/sync_dreamer_utils.py",
"snippet": "def get_warp_coordinates(volume_xyz, warp_size, input_size, Ks, warp_pose):\n B, _, D, H, W = volume_xyz.shape\n ratio = warp_size / input_size\n warp_proj = construct_project_matrix(ratio, ratio, Ks, warp_pose) # B,4,4\n warp_coords = project_and_normalize(volume_xyz.view(B,3,D*H*W), warp_proj, warp_size).view(B, D, H, W, 2)\n return warp_coords"
},
{
"identifier": "create_target_volume",
"path": "ldm/models/diffusion/sync_dreamer_utils.py",
"snippet": "def create_target_volume(depth_size, volume_size, input_image_size, pose_target, K, near=None, far=None):\n device, dtype = pose_target.device, pose_target.dtype\n\n # compute a depth range on the unit sphere\n H, W, D, B = volume_size, volume_size, depth_size, pose_target.shape[0]\n if near is not None and far is not None :\n # near, far b,1,h,w\n depth_values = torch.linspace(0, 1, steps=depth_size).to(near.device).to(near.dtype) # d\n depth_values = depth_values.view(1, D, 1, 1) # 1,d,1,1\n depth_values = depth_values * (far - near) + near # b d h w\n depth_values = depth_values.view(B, 1, D, H * W)\n else:\n near, far = near_far_from_unit_sphere_using_camera_poses(pose_target) # b 1\n depth_values = torch.linspace(0, 1, steps=depth_size).to(near.device).to(near.dtype) # d\n depth_values = depth_values[None,:,None] * (far[:,None,:] - near[:,None,:]) + near[:,None,:] # b d 1\n depth_values = depth_values.view(B, 1, D, 1).expand(B, 1, D, H*W)\n\n ratio = volume_size / input_image_size\n\n # creat a grid on the target (reference) view\n # H, W, D, B = volume_size, volume_size, depth_values.shape[1], depth_values.shape[0]\n\n # creat mesh grid: note reference also means target\n ref_grid = create_meshgrid(H, W, normalized_coordinates=False) # (1, H, W, 2)\n ref_grid = ref_grid.to(device).to(dtype)\n ref_grid = ref_grid.permute(0, 3, 1, 2) # (1, 2, H, W)\n ref_grid = ref_grid.reshape(1, 2, H*W) # (1, 2, H*W)\n ref_grid = ref_grid.expand(B, -1, -1) # (B, 2, H*W)\n ref_grid = torch.cat((ref_grid, torch.ones(B, 1, H*W, dtype=ref_grid.dtype, device=ref_grid.device)), dim=1) # (B, 3, H*W)\n ref_grid = ref_grid.unsqueeze(2) * depth_values # (B, 3, D, H*W)\n\n # unproject to space and transfer to world coordinates.\n Ks = K\n ref_proj = construct_project_matrix(ratio, ratio, Ks, pose_target) # B,4,4\n ref_proj_inv = torch.inverse(ref_proj) # B,4,4\n ref_grid = ref_proj_inv[:,:3,:3] @ ref_grid.view(B,3,D*H*W) + ref_proj_inv[:,:3,3:] # B,3,3 @ B,3,DHW + B,3,1 => B,3,DHW\n return ref_grid.reshape(B,3,D,H,W), depth_values.view(B,1,D,H,W)"
},
{
"identifier": "NoisyTargetViewEncoder",
"path": "ldm/models/diffusion/sync_dreamer_network.py",
"snippet": "class NoisyTargetViewEncoder(nn.Module):\n def __init__(self, time_embed_dim, viewpoint_dim, run_dim=16, output_dim=8):\n super().__init__()\n\n self.init_conv = nn.Conv2d(4, run_dim, 3, 1, 1)\n self.out_conv0 = Image2DResBlockWithTV(run_dim, time_embed_dim, viewpoint_dim)\n self.out_conv1 = Image2DResBlockWithTV(run_dim, time_embed_dim, viewpoint_dim)\n self.out_conv2 = Image2DResBlockWithTV(run_dim, time_embed_dim, viewpoint_dim)\n self.final_out = nn.Sequential(\n nn.GroupNorm(8, run_dim),\n nn.SiLU(True),\n nn.Conv2d(run_dim, output_dim, 3, 1, 1)\n )\n\n def forward(self, x, t, v):\n B, DT = t.shape\n t = t.view(B, DT, 1, 1)\n B, DV = v.shape\n v = v.view(B, DV, 1, 1)\n\n x = self.init_conv(x)\n x = self.out_conv0(x, t, v)\n x = self.out_conv1(x, t, v)\n x = self.out_conv2(x, t, v)\n x = self.final_out(x)\n return x"
},
{
"identifier": "SpatialTime3DNet",
"path": "ldm/models/diffusion/sync_dreamer_network.py",
"snippet": "class SpatialTime3DNet(nn.Module):\n def __init__(self, time_dim=256, input_dim=128, dims=(32, 64, 128, 256)):\n super().__init__()\n d0, d1, d2, d3 = dims\n dt = time_dim\n\n self.init_conv = nn.Conv3d(input_dim, d0, 3, 1, 1) # 32\n self.conv0 = SpatialTimeBlock(d0, dt, d0, stride=1)\n\n self.conv1 = SpatialTimeBlock(d0, dt, d1, stride=2)\n self.conv2_0 = SpatialTimeBlock(d1, dt, d1, stride=1)\n self.conv2_1 = SpatialTimeBlock(d1, dt, d1, stride=1)\n\n self.conv3 = SpatialTimeBlock(d1, dt, d2, stride=2)\n self.conv4_0 = SpatialTimeBlock(d2, dt, d2, stride=1)\n self.conv4_1 = SpatialTimeBlock(d2, dt, d2, stride=1)\n\n self.conv5 = SpatialTimeBlock(d2, dt, d3, stride=2)\n self.conv6_0 = SpatialTimeBlock(d3, dt, d3, stride=1)\n self.conv6_1 = SpatialTimeBlock(d3, dt, d3, stride=1)\n\n self.conv7 = SpatialUpTimeBlock(d3, dt, d2)\n self.conv8 = SpatialUpTimeBlock(d2, dt, d1)\n self.conv9 = SpatialUpTimeBlock(d1, dt, d0)\n\n def forward(self, x, t):\n B, C = t.shape\n t = t.view(B, C, 1, 1, 1)\n\n x = self.init_conv(x)\n conv0 = self.conv0(x, t)\n\n x = self.conv1(conv0, t)\n x = self.conv2_0(x, t)\n conv2 = self.conv2_1(x, t)\n\n x = self.conv3(conv2, t)\n x = self.conv4_0(x, t)\n conv4 = self.conv4_1(x, t)\n\n x = self.conv5(conv4, t)\n x = self.conv6_0(x, t)\n x = self.conv6_1(x, t)\n\n x = conv4 + self.conv7(x, t)\n x = conv2 + self.conv8(x, t)\n x = conv0 + self.conv9(x, t)\n return x"
},
{
"identifier": "FrustumTV3DNet",
"path": "ldm/models/diffusion/sync_dreamer_network.py",
"snippet": "class FrustumTV3DNet(nn.Module):\n def __init__(self, in_dim, t_dim, v_dim, dims=(32, 64, 128, 256)):\n super().__init__()\n self.conv0 = nn.Conv3d(in_dim, dims[0], 3, 1, 1) # 32\n\n self.conv1 = FrustumTVBlock(dims[0], t_dim, v_dim, dims[1], 2)\n self.conv2 = FrustumTVBlock(dims[1], t_dim, v_dim, dims[1], 1)\n\n self.conv3 = FrustumTVBlock(dims[1], t_dim, v_dim, dims[2], 2)\n self.conv4 = FrustumTVBlock(dims[2], t_dim, v_dim, dims[2], 1)\n\n self.conv5 = FrustumTVBlock(dims[2], t_dim, v_dim, dims[3], 2)\n self.conv6 = FrustumTVBlock(dims[3], t_dim, v_dim, dims[3], 1)\n\n self.up0 = FrustumTVUpBlock(dims[3], t_dim, v_dim, dims[2])\n self.up1 = FrustumTVUpBlock(dims[2], t_dim, v_dim, dims[1])\n self.up2 = FrustumTVUpBlock(dims[1], t_dim, v_dim, dims[0])\n\n def forward(self, x, t, v):\n B,DT = t.shape\n t = t.view(B,DT,1,1,1)\n B,DV = v.shape\n v = v.view(B,DV,1,1,1)\n\n b, _, d, h, w = x.shape\n x0 = self.conv0(x)\n x1 = self.conv2(self.conv1(x0, t, v), t, v)\n x2 = self.conv4(self.conv3(x1, t, v), t, v)\n x3 = self.conv6(self.conv5(x2, t, v), t, v)\n\n x2 = self.up0(x3, t, v) + x2\n x1 = self.up1(x2, t, v) + x1\n x0 = self.up2(x1, t, v) + x0\n return {w: x0, w//2: x1, w//4: x2, w//8: x3}"
},
{
"identifier": "make_ddim_timesteps",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):\n if ddim_discr_method == 'uniform':\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == 'quad':\n ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)\n else:\n raise NotImplementedError(f'There is no ddim discretization method called \"{ddim_discr_method}\"')\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f'Selected timesteps for ddim sampler: {steps_out}')\n return steps_out"
},
{
"identifier": "timestep_embedding",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "FrozenCLIPImageEmbedder",
"path": "ldm/modules/encoders/modules.py",
"snippet": "class FrozenCLIPImageEmbedder(AbstractEncoder):\n \"\"\"\n Uses the CLIP image encoder.\n Not actually frozen... If you want that set cond_stage_trainable=False in cfg\n \"\"\"\n def __init__(\n self,\n model='ViT-L/14',\n jit=False,\n device='cpu',\n antialias=False,\n ):\n super().__init__()\n self.model, _ = clip.load(name=model, device=device, jit=jit)\n # We don't use the text part so delete it\n del self.model.transformer\n self.antialias = antialias\n self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)\n self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)\n\n def preprocess(self, x):\n # Expects inputs in the range -1, 1\n x = kornia.geometry.resize(x, (224, 224),\n interpolation='bicubic',align_corners=True,\n antialias=self.antialias)\n x = (x + 1.) / 2.\n # renormalize according to clip\n x = kornia.enhance.normalize(x, self.mean, self.std)\n return x\n\n def forward(self, x):\n # x is assumed to be in range [-1,1]\n if isinstance(x, list):\n # [\"\"] denotes condition dropout for ucg\n device = self.model.visual.conv1.weight.device\n return torch.zeros(1, 768, device=device)\n return self.model.encode_image(self.preprocess(x)).float()\n\n def encode(self, im):\n return self(im).unsqueeze(1)"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
}
] | from pathlib import Path
from skimage.io import imsave
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
from ldm.base_utils import read_pickle, concat_images_list
from ldm.models.diffusion.sync_dreamer_utils import get_warp_coordinates, create_target_volume
from ldm.models.diffusion.sync_dreamer_network import NoisyTargetViewEncoder, SpatialTime3DNet, FrustumTV3DNet
from ldm.modules.diffusionmodules.util import make_ddim_timesteps, timestep_embedding
from ldm.modules.encoders.modules import FrozenCLIPImageEmbedder
from ldm.util import instantiate_from_config
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np | 7,478 | image_target = batch['target_image'].permute(0, 1, 4, 2, 3) # b,n,3,h,w
N = image_target.shape[1]
x = [self.encode_first_stage(image_target[:,ni], True) for ni in range(N)]
x = torch.stack(x, 1) # b,n,4,h//8,w//8
else:
x = None
image_input = batch['input_image'].permute(0, 3, 1, 2)
elevation_input = batch['input_elevation'][:, 0] # b
x_input = self.encode_first_stage(image_input)
input_info = {'image': image_input, 'elevation': elevation_input, 'x': x_input}
with torch.no_grad():
clip_embed = self.clip_image_encoder.encode(image_input)
return x, clip_embed, input_info
def embed_time(self, t):
t_embed = timestep_embedding(t, self.time_embed_dim, repeat_only=False) # B,TED
t_embed = self.time_embed(t_embed) # B,TED
return t_embed
def get_target_view_feats(self, x_input, spatial_volume, clip_embed, t_embed, v_embed, target_index):
"""
@param x_input: B,4,H,W
@param spatial_volume: B,C,V,V,V
@param clip_embed: B,1,768
@param t_embed: B,t_dim
@param v_embed: B,N,v_dim
@param target_index: B,TN
@return:
tensors of size B*TN,*
"""
B, _, H, W = x_input.shape
frustum_volume_feats, frustum_volume_depth = self.spatial_volume.construct_view_frustum_volume(spatial_volume, t_embed, v_embed, self.poses, self.Ks, target_index)
# clip
TN = target_index.shape[1]
v_embed_ = v_embed[torch.arange(B)[:,None], target_index].view(B*TN, self.viewpoint_dim) # B*TN,v_dim
clip_embed_ = clip_embed.unsqueeze(1).repeat(1,TN,1,1).view(B*TN,1,768)
clip_embed_ = self.cc_projection(torch.cat([clip_embed_, v_embed_.unsqueeze(1)], -1)) # B*TN,1,768
x_input_ = x_input.unsqueeze(1).repeat(1, TN, 1, 1, 1).view(B * TN, 4, H, W)
x_concat = x_input_
return clip_embed_, frustum_volume_feats, x_concat
def training_step(self, batch):
B = batch['target_image'].shape[0]
time_steps = torch.randint(0, self.num_timesteps, (B,), device=self.device).long()
x, clip_embed, input_info = self.prepare(batch)
x_noisy, noise = self.add_noise(x, time_steps) # B,N,4,H,W
N = self.view_num
target_index = torch.randint(0, N, (B, 1), device=self.device).long() # B, 1
v_embed = self.get_viewpoint_embedding(B, input_info['elevation']) # N,v_dim
t_embed = self.embed_time(time_steps)
spatial_volume = self.spatial_volume.construct_spatial_volume(x_noisy, t_embed, v_embed, self.poses, self.Ks)
clip_embed, volume_feats, x_concat = self.get_target_view_feats(input_info['x'], spatial_volume, clip_embed, t_embed, v_embed, target_index)
x_noisy_ = x_noisy[torch.arange(B)[:,None],target_index][:,0] # B,4,H,W
noise_predict = self.model(x_noisy_, time_steps, clip_embed, volume_feats, x_concat, is_train=True) # B,4,H,W
noise_target = noise[torch.arange(B)[:,None],target_index][:,0] # B,4,H,W
# loss simple for diffusion
loss_simple = torch.nn.functional.mse_loss(noise_target, noise_predict, reduction='none')
loss = loss_simple.mean()
self.log('sim', loss_simple.mean(), prog_bar=True, logger=True, on_step=True, on_epoch=True, rank_zero_only=True)
# log others
lr = self.optimizers().param_groups[0]['lr']
self.log('lr', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False, rank_zero_only=True)
self.log("step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, rank_zero_only=True)
return loss
def add_noise(self, x_start, t):
"""
@param x_start: B,*
@param t: B,
@return:
"""
B = x_start.shape[0]
noise = torch.randn_like(x_start) # B,*
sqrt_alphas_cumprod_ = self.sqrt_alphas_cumprod[t] # B,
sqrt_one_minus_alphas_cumprod_ = self.sqrt_one_minus_alphas_cumprod[t] # B
sqrt_alphas_cumprod_ = sqrt_alphas_cumprod_.view(B, *[1 for _ in range(len(x_start.shape)-1)])
sqrt_one_minus_alphas_cumprod_ = sqrt_one_minus_alphas_cumprod_.view(B, *[1 for _ in range(len(x_start.shape)-1)])
x_noisy = sqrt_alphas_cumprod_ * x_start + sqrt_one_minus_alphas_cumprod_ * noise
return x_noisy, noise
def sample(self, sampler, batch, cfg_scale, return_inter_results=False, inter_interval=50, inter_view_interval=2):
_, clip_embed, input_info = self.prepare(batch)
x_sample, inter = sampler.sample(input_info, clip_embed, unconditional_scale=cfg_scale, log_every_t=inter_interval)
N = x_sample.shape[1]
x_sample = torch.stack([self.decode_first_stage(x_sample[:, ni]) for ni in range(N)], 1)
if return_inter_results:
torch.cuda.synchronize()
torch.cuda.empty_cache()
inter = torch.stack(inter['x_inter'], 2) # # B,N,T,C,H,W
B,N,T,C,H,W = inter.shape
inter_results = []
for ni in tqdm(range(0, N, inter_view_interval)):
inter_results_ = []
for ti in range(T):
inter_results_.append(self.decode_first_stage(inter[:, ni, ti]))
inter_results.append(torch.stack(inter_results_, 1)) # B,T,3,H,W
inter_results = torch.stack(inter_results,1) # B,N,T,3,H,W
return x_sample, inter_results
else:
return x_sample
def log_image(self, x_sample, batch, step, output_dir):
process = lambda x: ((torch.clip(x, min=-1, max=1).cpu().numpy() * 0.5 + 0.5) * 255).astype(np.uint8)
B = x_sample.shape[0]
N = x_sample.shape[1]
image_cond = []
for bi in range(B):
|
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def disable_training_module(module: nn.Module):
module = module.eval()
module.train = disabled_train
for para in module.parameters():
para.requires_grad = False
return module
def repeat_to_batch(tensor, B, VN):
t_shape = tensor.shape
ones = [1 for _ in range(len(t_shape)-1)]
tensor_new = tensor.view(B,1,*t_shape[1:]).repeat(1,VN,*ones).view(B*VN,*t_shape[1:])
return tensor_new
class UNetWrapper(nn.Module):
def __init__(self, diff_model_config, drop_conditions=False, drop_scheme='default', use_zero_123=True):
super().__init__()
self.diffusion_model = instantiate_from_config(diff_model_config)
self.drop_conditions = drop_conditions
self.drop_scheme=drop_scheme
self.use_zero_123 = use_zero_123
def drop(self, cond, mask):
shape = cond.shape
B = shape[0]
cond = mask.view(B,*[1 for _ in range(len(shape)-1)]) * cond
return cond
def get_trainable_parameters(self):
return self.diffusion_model.get_trainable_parameters()
def get_drop_scheme(self, B, device):
if self.drop_scheme=='default':
random = torch.rand(B, dtype=torch.float32, device=device)
drop_clip = (random > 0.15) & (random <= 0.2)
drop_volume = (random > 0.1) & (random <= 0.15)
drop_concat = (random > 0.05) & (random <= 0.1)
drop_all = random <= 0.05
else:
raise NotImplementedError
return drop_clip, drop_volume, drop_concat, drop_all
def forward(self, x, t, clip_embed, volume_feats, x_concat, is_train=False):
"""
@param x: B,4,H,W
@param t: B,
@param clip_embed: B,M,768
@param volume_feats: B,C,D,H,W
@param x_concat: B,C,H,W
@param is_train:
@return:
"""
if self.drop_conditions and is_train:
B = x.shape[0]
drop_clip, drop_volume, drop_concat, drop_all = self.get_drop_scheme(B, x.device)
clip_mask = 1.0 - (drop_clip | drop_all).float()
clip_embed = self.drop(clip_embed, clip_mask)
volume_mask = 1.0 - (drop_volume | drop_all).float()
for k, v in volume_feats.items():
volume_feats[k] = self.drop(v, mask=volume_mask)
concat_mask = 1.0 - (drop_concat | drop_all).float()
x_concat = self.drop(x_concat, concat_mask)
if self.use_zero_123:
# zero123 does not multiply this when encoding, maybe a bug for zero123
first_stage_scale_factor = 0.18215
x_concat_ = x_concat * 1.0
x_concat_[:, :4] = x_concat_[:, :4] / first_stage_scale_factor
else:
x_concat_ = x_concat
x = torch.cat([x, x_concat_], 1)
pred = self.diffusion_model(x, t, clip_embed, source_dict=volume_feats)
return pred
def predict_with_unconditional_scale(self, x, t, clip_embed, volume_feats, x_concat, unconditional_scale):
x_ = torch.cat([x] * 2, 0)
t_ = torch.cat([t] * 2, 0)
clip_embed_ = torch.cat([clip_embed, torch.zeros_like(clip_embed)], 0)
v_ = {}
for k, v in volume_feats.items():
v_[k] = torch.cat([v, torch.zeros_like(v)], 0)
x_concat_ = torch.cat([x_concat, torch.zeros_like(x_concat)], 0)
if self.use_zero_123:
# zero123 does not multiply this when encoding, maybe a bug for zero123
first_stage_scale_factor = 0.18215
x_concat_[:, :4] = x_concat_[:, :4] / first_stage_scale_factor
x_ = torch.cat([x_, x_concat_], 1)
s, s_uc = self.diffusion_model(x_, t_, clip_embed_, source_dict=v_).chunk(2)
s = s_uc + unconditional_scale * (s - s_uc)
return s
def predict_with_decomposed_unconditional_scales(self, x, t, clip_embed, volume_feats, x_concat, unconditional_scales):
x_ = torch.cat([x] * 3, 0)
t_ = torch.cat([t] * 3, 0)
clip_embed_ = torch.cat([clip_embed, torch.zeros_like(clip_embed), clip_embed], 0)
x_concat_ = torch.cat([x_concat, torch.zeros_like(x_concat), x_concat*4], 0)
v_ = {}
for k, v in volume_feats.items():
v_[k] = torch.cat([v, v, torch.zeros_like(v)], 0)
if self.use_zero_123:
# zero123 does not multiply this when encoding, maybe a bug for zero123
first_stage_scale_factor = 0.18215
x_concat_[:, :4] = x_concat_[:, :4] / first_stage_scale_factor
x_ = torch.cat([x_, x_concat_], 1)
s, s_uc1, s_uc2 = self.diffusion_model(x_, t_, clip_embed_, source_dict=v_).chunk(3)
s = s + unconditional_scales[0] * (s - s_uc1) + unconditional_scales[1] * (s - s_uc2)
return s
class SpatialVolumeNet(nn.Module):
def __init__(self, time_dim, view_dim, view_num,
input_image_size=256, frustum_volume_depth=48,
spatial_volume_size=32, spatial_volume_length=0.5,
frustum_volume_length=0.86603 # sqrt(3)/2
):
super().__init__()
self.target_encoder = NoisyTargetViewEncoder(time_dim, view_dim, output_dim=16)
self.spatial_volume_feats = SpatialTime3DNet(input_dim=16 * view_num, time_dim=time_dim, dims=(64, 128, 256, 512))
self.frustum_volume_feats = FrustumTV3DNet(64, time_dim, view_dim, dims=(64, 128, 256, 512))
self.frustum_volume_length = frustum_volume_length
self.input_image_size = input_image_size
self.spatial_volume_size = spatial_volume_size
self.spatial_volume_length = spatial_volume_length
self.frustum_volume_size = self.input_image_size // 8
self.frustum_volume_depth = frustum_volume_depth
self.time_dim = time_dim
self.view_dim = view_dim
self.default_origin_depth = 1.5 # our rendered images are 1.5 away from the origin, we assume camera is 1.5 away from the origin
def construct_spatial_volume(self, x, t_embed, v_embed, target_poses, target_Ks):
"""
@param x: B,N,4,H,W
@param t_embed: B,t_dim
@param v_embed: B,N,v_dim
@param target_poses: N,3,4
@param target_Ks: N,3,3
@return:
"""
B, N, _, H, W = x.shape
V = self.spatial_volume_size
device = x.device
spatial_volume_verts = torch.linspace(-self.spatial_volume_length, self.spatial_volume_length, V, dtype=torch.float32, device=device)
spatial_volume_verts = torch.stack(torch.meshgrid(spatial_volume_verts, spatial_volume_verts, spatial_volume_verts, indexing='ij'), -1)
spatial_volume_verts = spatial_volume_verts.reshape(1, V ** 3, 3)[:, :, (2, 1, 0)]
spatial_volume_verts = spatial_volume_verts.view(1, V, V, V, 3).permute(0, 4, 1, 2, 3).repeat(B, 1, 1, 1, 1)
# encode source features
t_embed_ = t_embed.view(B, 1, self.time_dim).repeat(1, N, 1).view(B, N, self.time_dim)
v_embed_ = v_embed
target_Ks = target_Ks.unsqueeze(0).repeat(B, 1, 1, 1)
target_poses = target_poses.unsqueeze(0).repeat(B, 1, 1, 1)
# extract 2D image features
spatial_volume_feats = []
# project source features
for ni in range(0, N):
pose_source_ = target_poses[:, ni]
K_source_ = target_Ks[:, ni]
x_ = self.target_encoder(x[:, ni], t_embed_[:, ni], v_embed_[:, ni])
C = x_.shape[1]
coords_source = get_warp_coordinates(spatial_volume_verts, x_.shape[-1], self.input_image_size, K_source_, pose_source_).view(B, V, V * V, 2)
unproj_feats_ = F.grid_sample(x_, coords_source, mode='bilinear', padding_mode='zeros', align_corners=True)
unproj_feats_ = unproj_feats_.view(B, C, V, V, V)
spatial_volume_feats.append(unproj_feats_)
spatial_volume_feats = torch.stack(spatial_volume_feats, 1) # B,N,C,V,V,V
N = spatial_volume_feats.shape[1]
spatial_volume_feats = spatial_volume_feats.view(B, N*C, V, V, V)
spatial_volume_feats = self.spatial_volume_feats(spatial_volume_feats, t_embed) # b,64,32,32,32
return spatial_volume_feats
def construct_view_frustum_volume(self, spatial_volume, t_embed, v_embed, poses, Ks, target_indices):
"""
@param spatial_volume: B,C,V,V,V
@param t_embed: B,t_dim
@param v_embed: B,N,v_dim
@param poses: N,3,4
@param Ks: N,3,3
@param target_indices: B,TN
@return: B*TN,C,H,W
"""
B, TN = target_indices.shape
H, W = self.frustum_volume_size, self.frustum_volume_size
D = self.frustum_volume_depth
V = self.spatial_volume_size
near = torch.ones(B * TN, 1, H, W, dtype=spatial_volume.dtype, device=spatial_volume.device) * self.default_origin_depth - self.frustum_volume_length
far = torch.ones(B * TN, 1, H, W, dtype=spatial_volume.dtype, device=spatial_volume.device) * self.default_origin_depth + self.frustum_volume_length
target_indices = target_indices.view(B*TN) # B*TN
poses_ = poses[target_indices] # B*TN,3,4
Ks_ = Ks[target_indices] # B*TN,3,4
volume_xyz, volume_depth = create_target_volume(D, self.frustum_volume_size, self.input_image_size, poses_, Ks_, near, far) # B*TN,3 or 1,D,H,W
volume_xyz_ = volume_xyz / self.spatial_volume_length # since the spatial volume is constructed in [-spatial_volume_length,spatial_volume_length]
volume_xyz_ = volume_xyz_.permute(0, 2, 3, 4, 1) # B*TN,D,H,W,3
spatial_volume_ = spatial_volume.unsqueeze(1).repeat(1, TN, 1, 1, 1, 1).view(B * TN, -1, V, V, V)
volume_feats = F.grid_sample(spatial_volume_, volume_xyz_, mode='bilinear', padding_mode='zeros', align_corners=True) # B*TN,C,D,H,W
v_embed_ = v_embed[torch.arange(B)[:,None], target_indices.view(B,TN)].view(B*TN, -1) # B*TN
t_embed_ = t_embed.unsqueeze(1).repeat(1,TN,1).view(B*TN,-1)
volume_feats_dict = self.frustum_volume_feats(volume_feats, t_embed_, v_embed_)
return volume_feats_dict, volume_depth
class SyncMultiviewDiffusion(pl.LightningModule):
def __init__(self, unet_config, scheduler_config,
finetune_unet=False, finetune_projection=True,
view_num=16, image_size=256,
cfg_scale=3.0, output_num=8, batch_view_num=4,
drop_conditions=False, drop_scheme='default',
clip_image_encoder_path="/apdcephfs/private_rondyliu/projects/clip/ViT-L-14.pt",
sample_type='ddim', sample_steps=200):
super().__init__()
self.finetune_unet = finetune_unet
self.finetune_projection = finetune_projection
self.view_num = view_num
self.viewpoint_dim = 4
self.output_num = output_num
self.image_size = image_size
self.batch_view_num = batch_view_num
self.cfg_scale = cfg_scale
self.clip_image_encoder_path = clip_image_encoder_path
self._init_time_step_embedding()
self._init_first_stage()
self._init_schedule()
self._init_multiview()
self._init_clip_image_encoder()
self._init_clip_projection()
self.spatial_volume = SpatialVolumeNet(self.time_embed_dim, self.viewpoint_dim, self.view_num)
self.model = UNetWrapper(unet_config, drop_conditions=drop_conditions, drop_scheme=drop_scheme)
self.scheduler_config = scheduler_config
latent_size = image_size//8
if sample_type=='ddim':
self.sampler = SyncDDIMSampler(self, sample_steps , "uniform", 1.0, latent_size=latent_size)
else:
raise NotImplementedError
def _init_clip_projection(self):
self.cc_projection = nn.Linear(772, 768)
nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])
nn.init.zeros_(list(self.cc_projection.parameters())[1])
self.cc_projection.requires_grad_(True)
if not self.finetune_projection:
disable_training_module(self.cc_projection)
def _init_multiview(self):
K, azs, _, _, poses = read_pickle(f'meta_info/camera-{self.view_num}.pkl')
default_image_size = 256
ratio = self.image_size/default_image_size
K = np.diag([ratio,ratio,1]) @ K
K = torch.from_numpy(K.astype(np.float32)) # [3,3]
K = K.unsqueeze(0).repeat(self.view_num,1,1) # N,3,3
poses = torch.from_numpy(poses.astype(np.float32)) # N,3,4
self.register_buffer('poses', poses)
self.register_buffer('Ks', K)
azs = (azs + np.pi) % (np.pi * 2) - np.pi # scale to [-pi,pi] and the index=0 has az=0
self.register_buffer('azimuth', torch.from_numpy(azs.astype(np.float32)))
def get_viewpoint_embedding(self, batch_size, elevation_ref):
"""
@param batch_size:
@param elevation_ref: B
@return:
"""
azimuth_input = self.azimuth[0].unsqueeze(0) # 1
azimuth_target = self.azimuth # N
elevation_input = -elevation_ref # note that zero123 use a negative elevation here!!!
elevation_target = -np.deg2rad(30)
d_e = elevation_target - elevation_input # B
N = self.azimuth.shape[0]
B = batch_size
d_e = d_e.unsqueeze(1).repeat(1, N)
d_a = azimuth_target - azimuth_input # N
d_a = d_a.unsqueeze(0).repeat(B, 1)
d_z = torch.zeros_like(d_a)
embedding = torch.stack([d_e, torch.sin(d_a), torch.cos(d_a), d_z], -1) # B,N,4
return embedding
def _init_first_stage(self):
first_stage_config={
"target": "ldm.models.autoencoder.AutoencoderKL",
"params": {
"embed_dim": 4,
"monitor": "val/rec_loss",
"ddconfig":{
"double_z": True,
"z_channels": 4,
"resolution": self.image_size,
"in_channels": 3,
"out_ch": 3,
"ch": 128,
"ch_mult": [1,2,4,4],
"num_res_blocks": 2,
"attn_resolutions": [],
"dropout": 0.0
},
"lossconfig": {"target": "torch.nn.Identity"},
}
}
self.first_stage_scale_factor = 0.18215
self.first_stage_model = instantiate_from_config(first_stage_config)
self.first_stage_model = disable_training_module(self.first_stage_model)
def _init_clip_image_encoder(self):
self.clip_image_encoder = FrozenCLIPImageEmbedder(model=self.clip_image_encoder_path)
self.clip_image_encoder = disable_training_module(self.clip_image_encoder)
def _init_schedule(self):
self.num_timesteps = 1000
linear_start = 0.00085
linear_end = 0.0120
num_timesteps = 1000
betas = torch.linspace(linear_start ** 0.5, linear_end ** 0.5, num_timesteps, dtype=torch.float32) ** 2 # T
assert betas.shape[0] == self.num_timesteps
# all in float64 first
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0) # T
alphas_cumprod_prev = torch.cat([torch.ones(1, dtype=torch.float64), alphas_cumprod[:-1]], 0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) # T
posterior_log_variance_clipped = torch.log(torch.clamp(posterior_variance, min=1e-20))
posterior_log_variance_clipped = torch.clamp(posterior_log_variance_clipped, min=-10)
self.register_buffer("betas", betas.float())
self.register_buffer("alphas", alphas.float())
self.register_buffer("alphas_cumprod", alphas_cumprod.float())
self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod).float())
self.register_buffer("sqrt_one_minus_alphas_cumprod", torch.sqrt(1 - alphas_cumprod).float())
self.register_buffer("posterior_variance", posterior_variance.float())
self.register_buffer('posterior_log_variance_clipped', posterior_log_variance_clipped.float())
def _init_time_step_embedding(self):
self.time_embed_dim = 256
self.time_embed = nn.Sequential(
nn.Linear(self.time_embed_dim, self.time_embed_dim),
nn.SiLU(True),
nn.Linear(self.time_embed_dim, self.time_embed_dim),
)
def encode_first_stage(self, x, sample=True):
with torch.no_grad():
posterior = self.first_stage_model.encode(x) # b,4,h//8,w//8
if sample:
return posterior.sample().detach() * self.first_stage_scale_factor
else:
return posterior.mode().detach() * self.first_stage_scale_factor
def decode_first_stage(self, z):
with torch.no_grad():
z = 1. / self.first_stage_scale_factor * z
return self.first_stage_model.decode(z)
def prepare(self, batch):
# encode target
if 'target_image' in batch:
image_target = batch['target_image'].permute(0, 1, 4, 2, 3) # b,n,3,h,w
N = image_target.shape[1]
x = [self.encode_first_stage(image_target[:,ni], True) for ni in range(N)]
x = torch.stack(x, 1) # b,n,4,h//8,w//8
else:
x = None
image_input = batch['input_image'].permute(0, 3, 1, 2)
elevation_input = batch['input_elevation'][:, 0] # b
x_input = self.encode_first_stage(image_input)
input_info = {'image': image_input, 'elevation': elevation_input, 'x': x_input}
with torch.no_grad():
clip_embed = self.clip_image_encoder.encode(image_input)
return x, clip_embed, input_info
def embed_time(self, t):
t_embed = timestep_embedding(t, self.time_embed_dim, repeat_only=False) # B,TED
t_embed = self.time_embed(t_embed) # B,TED
return t_embed
def get_target_view_feats(self, x_input, spatial_volume, clip_embed, t_embed, v_embed, target_index):
"""
@param x_input: B,4,H,W
@param spatial_volume: B,C,V,V,V
@param clip_embed: B,1,768
@param t_embed: B,t_dim
@param v_embed: B,N,v_dim
@param target_index: B,TN
@return:
tensors of size B*TN,*
"""
B, _, H, W = x_input.shape
frustum_volume_feats, frustum_volume_depth = self.spatial_volume.construct_view_frustum_volume(spatial_volume, t_embed, v_embed, self.poses, self.Ks, target_index)
# clip
TN = target_index.shape[1]
v_embed_ = v_embed[torch.arange(B)[:,None], target_index].view(B*TN, self.viewpoint_dim) # B*TN,v_dim
clip_embed_ = clip_embed.unsqueeze(1).repeat(1,TN,1,1).view(B*TN,1,768)
clip_embed_ = self.cc_projection(torch.cat([clip_embed_, v_embed_.unsqueeze(1)], -1)) # B*TN,1,768
x_input_ = x_input.unsqueeze(1).repeat(1, TN, 1, 1, 1).view(B * TN, 4, H, W)
x_concat = x_input_
return clip_embed_, frustum_volume_feats, x_concat
def training_step(self, batch):
B = batch['target_image'].shape[0]
time_steps = torch.randint(0, self.num_timesteps, (B,), device=self.device).long()
x, clip_embed, input_info = self.prepare(batch)
x_noisy, noise = self.add_noise(x, time_steps) # B,N,4,H,W
N = self.view_num
target_index = torch.randint(0, N, (B, 1), device=self.device).long() # B, 1
v_embed = self.get_viewpoint_embedding(B, input_info['elevation']) # N,v_dim
t_embed = self.embed_time(time_steps)
spatial_volume = self.spatial_volume.construct_spatial_volume(x_noisy, t_embed, v_embed, self.poses, self.Ks)
clip_embed, volume_feats, x_concat = self.get_target_view_feats(input_info['x'], spatial_volume, clip_embed, t_embed, v_embed, target_index)
x_noisy_ = x_noisy[torch.arange(B)[:,None],target_index][:,0] # B,4,H,W
noise_predict = self.model(x_noisy_, time_steps, clip_embed, volume_feats, x_concat, is_train=True) # B,4,H,W
noise_target = noise[torch.arange(B)[:,None],target_index][:,0] # B,4,H,W
# loss simple for diffusion
loss_simple = torch.nn.functional.mse_loss(noise_target, noise_predict, reduction='none')
loss = loss_simple.mean()
self.log('sim', loss_simple.mean(), prog_bar=True, logger=True, on_step=True, on_epoch=True, rank_zero_only=True)
# log others
lr = self.optimizers().param_groups[0]['lr']
self.log('lr', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False, rank_zero_only=True)
self.log("step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, rank_zero_only=True)
return loss
def add_noise(self, x_start, t):
"""
@param x_start: B,*
@param t: B,
@return:
"""
B = x_start.shape[0]
noise = torch.randn_like(x_start) # B,*
sqrt_alphas_cumprod_ = self.sqrt_alphas_cumprod[t] # B,
sqrt_one_minus_alphas_cumprod_ = self.sqrt_one_minus_alphas_cumprod[t] # B
sqrt_alphas_cumprod_ = sqrt_alphas_cumprod_.view(B, *[1 for _ in range(len(x_start.shape)-1)])
sqrt_one_minus_alphas_cumprod_ = sqrt_one_minus_alphas_cumprod_.view(B, *[1 for _ in range(len(x_start.shape)-1)])
x_noisy = sqrt_alphas_cumprod_ * x_start + sqrt_one_minus_alphas_cumprod_ * noise
return x_noisy, noise
def sample(self, sampler, batch, cfg_scale, return_inter_results=False, inter_interval=50, inter_view_interval=2):
_, clip_embed, input_info = self.prepare(batch)
x_sample, inter = sampler.sample(input_info, clip_embed, unconditional_scale=cfg_scale, log_every_t=inter_interval)
N = x_sample.shape[1]
x_sample = torch.stack([self.decode_first_stage(x_sample[:, ni]) for ni in range(N)], 1)
if return_inter_results:
torch.cuda.synchronize()
torch.cuda.empty_cache()
inter = torch.stack(inter['x_inter'], 2) # # B,N,T,C,H,W
B,N,T,C,H,W = inter.shape
inter_results = []
for ni in tqdm(range(0, N, inter_view_interval)):
inter_results_ = []
for ti in range(T):
inter_results_.append(self.decode_first_stage(inter[:, ni, ti]))
inter_results.append(torch.stack(inter_results_, 1)) # B,T,3,H,W
inter_results = torch.stack(inter_results,1) # B,N,T,3,H,W
return x_sample, inter_results
else:
return x_sample
def log_image(self, x_sample, batch, step, output_dir):
process = lambda x: ((torch.clip(x, min=-1, max=1).cpu().numpy() * 0.5 + 0.5) * 255).astype(np.uint8)
B = x_sample.shape[0]
N = x_sample.shape[1]
image_cond = []
for bi in range(B): | img_pr_ = concat_images_list(process(batch['input_image'][bi]),*[process(x_sample[bi, ni].permute(1, 2, 0)) for ni in range(N)]) | 1 | 2023-12-21 04:44:00+00:00 | 12k |
OPPOMKLab/u-LLaVA | models/segment_anything/automatic_mask_generator.py | [
{
"identifier": "Sam",
"path": "models/segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\n \"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False\n )\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], dim=0\n )\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n\n dtype = masks.dtype\n\n masks = F.interpolate(\n masks.float(),\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n # masks = masks.to(dtype)\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(\n masks, original_size, mode=\"bilinear\", align_corners=False\n )\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "SamPredictor",
"path": "models/segment_anything/predictor.py",
"snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[\n None, :, :, :\n ]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(\n point_coords, dtype=torch.float, device=self.device\n )\n labels_torch = torch.as_tensor(\n point_labels, dtype=torch.int, device=self.device\n )\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(\n mask_input, dtype=torch.float, device=self.device\n )\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(\n low_res_masks, self.input_size, self.original_size\n )\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert (\n self.features is not None\n ), \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None"
},
{
"identifier": "MaskData",
"path": "models/segment_anything/utils/amg.py",
"snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()"
},
{
"identifier": "area_from_rle",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])"
},
{
"identifier": "batch_iterator",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]"
},
{
"identifier": "batched_mask_to_box",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out"
},
{
"identifier": "box_xyxy_to_xywh",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh"
},
{
"identifier": "build_all_layer_point_grids",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer"
},
{
"identifier": "calculate_stability_score",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions"
},
{
"identifier": "coco_encode_rle",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle"
},
{
"identifier": "generate_crop_boxes",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs"
},
{
"identifier": "is_box_near_crop_edge",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)"
},
{
"identifier": "mask_to_rle_pytorch",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out"
},
{
"identifier": "remove_small_regions",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True"
},
{
"identifier": "rle_to_mask",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order"
},
{
"identifier": "uncrop_boxes_xyxy",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset"
},
{
"identifier": "uncrop_masks",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)"
},
{
"identifier": "uncrop_points",
"path": "models/segment_anything/utils/amg.py",
"snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset"
}
] | from typing import Any, Dict, List, Optional, Tuple
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from .modeling import Sam
from .predictor import SamPredictor
from .utils.amg import (MaskData, area_from_rle, batch_iterator,
batched_mask_to_box, box_xyxy_to_xywh,
build_all_layer_point_grids, calculate_stability_score,
coco_encode_rle, generate_crop_boxes,
is_box_near_crop_edge, mask_to_rle_pytorch,
remove_small_regions, rle_to_mask, uncrop_boxes_xyxy,
uncrop_masks, uncrop_points)
from pycocotools import \
mask as mask_utils # type: ignore # noqa: F401
import numpy as np
import torch
import cv2 # type: ignore # noqa: F401 | 10,538 | self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [
coco_encode_rle(rle) for rle in mask_data["rles"]
]
elif self.output_mode == "binary_mask":
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
else:
mask_data["segmentations"] = mask_data["rles"]
# Write mask records
curr_anns = []
for idx in range(len(mask_data["segmentations"])):
ann = {
"segmentation": mask_data["segmentations"][idx],
"area": area_from_rle(mask_data["rles"][idx]),
"bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
"predicted_iou": mask_data["iou_preds"][idx].item(),
"point_coords": [mask_data["points"][idx].tolist()],
"stability_score": mask_data["stability_score"][idx].item(),
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
}
curr_anns.append(ann)
return curr_anns
def _generate_masks(self, image: np.ndarray) -> MaskData:
orig_size = image.shape[:2]
crop_boxes, layer_idxs = generate_crop_boxes(
orig_size, self.crop_n_layers, self.crop_overlap_ratio
)
# Iterate over image crops
data = MaskData()
for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
data.cat(crop_data)
# Remove duplicate masks between crops
if len(crop_boxes) > 1:
# Prefer masks from smaller crops
scores = 1 / box_area(data["crop_boxes"])
scores = scores.to(data["boxes"].device)
keep_by_nms = batched_nms(
data["boxes"].float(),
scores,
torch.zeros_like(data["boxes"][:, 0]), # categories
iou_threshold=self.crop_nms_thresh,
)
data.filter(keep_by_nms)
data.to_numpy()
return data
def _process_crop(
self,
image: np.ndarray,
crop_box: List[int],
crop_layer_idx: int,
orig_size: Tuple[int, ...],
) -> MaskData:
# Crop the image and calculate embeddings
x0, y0, x1, y1 = crop_box
cropped_im = image[y0:y1, x0:x1, :]
cropped_im_size = cropped_im.shape[:2]
self.predictor.set_image(cropped_im)
# Get points for this crop
points_scale = np.array(cropped_im_size)[None, ::-1]
points_for_image = self.point_grids[crop_layer_idx] * points_scale
# Generate masks for this crop in batches
data = MaskData()
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamAutomaticMaskGenerator:
def __init__(
self,
model: Sam,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
min_mask_region_area: int = 0,
output_mode: str = "binary_mask",
) -> None:
"""
Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.predictor = SamPredictor(model)
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [
coco_encode_rle(rle) for rle in mask_data["rles"]
]
elif self.output_mode == "binary_mask":
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
else:
mask_data["segmentations"] = mask_data["rles"]
# Write mask records
curr_anns = []
for idx in range(len(mask_data["segmentations"])):
ann = {
"segmentation": mask_data["segmentations"][idx],
"area": area_from_rle(mask_data["rles"][idx]),
"bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
"predicted_iou": mask_data["iou_preds"][idx].item(),
"point_coords": [mask_data["points"][idx].tolist()],
"stability_score": mask_data["stability_score"][idx].item(),
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
}
curr_anns.append(ann)
return curr_anns
def _generate_masks(self, image: np.ndarray) -> MaskData:
orig_size = image.shape[:2]
crop_boxes, layer_idxs = generate_crop_boxes(
orig_size, self.crop_n_layers, self.crop_overlap_ratio
)
# Iterate over image crops
data = MaskData()
for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
data.cat(crop_data)
# Remove duplicate masks between crops
if len(crop_boxes) > 1:
# Prefer masks from smaller crops
scores = 1 / box_area(data["crop_boxes"])
scores = scores.to(data["boxes"].device)
keep_by_nms = batched_nms(
data["boxes"].float(),
scores,
torch.zeros_like(data["boxes"][:, 0]), # categories
iou_threshold=self.crop_nms_thresh,
)
data.filter(keep_by_nms)
data.to_numpy()
return data
def _process_crop(
self,
image: np.ndarray,
crop_box: List[int],
crop_layer_idx: int,
orig_size: Tuple[int, ...],
) -> MaskData:
# Crop the image and calculate embeddings
x0, y0, x1, y1 = crop_box
cropped_im = image[y0:y1, x0:x1, :]
cropped_im_size = cropped_im.shape[:2]
self.predictor.set_image(cropped_im)
# Get points for this crop
points_scale = np.array(cropped_im_size)[None, ::-1]
points_for_image = self.point_grids[crop_layer_idx] * points_scale
# Generate masks for this crop in batches
data = MaskData() | for (points,) in batch_iterator(self.points_per_batch, points_for_image): | 4 | 2023-12-21 08:10:23+00:00 | 12k |
chinhsuanwu/ifusion | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "isimage",
"path": "ldm/util.py",
"snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "mean_flat",
"path": "ldm/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
},
{
"identifier": "count_params",
"path": "ldm/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config, **kwargs):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**kwargs, **config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "ldm/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "normal_kl",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "VQModelInterface",
"path": "ldm/models/autoencoder.py",
"snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec"
},
{
"identifier": "IdentityFirstStage",
"path": "ldm/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "AutoencoderKL",
"path": "ldm/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x"
},
{
"identifier": "make_beta_schedule",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n # for i, step in enumerate(iterator):\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec"
},
{
"identifier": "CrossAttention",
"path": "ldm/modules/attention.py",
"snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)"
}
] | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities import rank_zero_only
from omegaconf import ListConfig
from ldm.util import (
log_txt_as_img,
exists,
default,
ismap,
isimage,
mean_flat,
count_params,
instantiate_from_config,
)
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import (
normal_kl,
DiagonalGaussianDistribution,
)
from ldm.models.autoencoder import (
VQModelInterface,
IdentityFirstStage,
AutoencoderKL,
)
from ldm.modules.diffusionmodules.util import (
make_beta_schedule,
extract_into_tensor,
noise_like,
)
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.modules.attention import CrossAttention | 10,369 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image_target",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(
f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
)
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image_target",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(
f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
)
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema: | self.model_ema = LitEma(self.model) | 8 | 2023-12-17 12:45:38+00:00 | 12k |
wangzhecheng/SkyScript | src/training/main.py | [
{
"identifier": "create_model_and_transforms",
"path": "src/open_clip/factory.py",
"snippet": "def create_model_and_transforms(\n model_name: str,\n pretrained: Optional[str] = None,\n precision: str = 'fp32',\n device: Union[str, torch.device] = 'cpu',\n jit: bool = False,\n force_quick_gelu: bool = False,\n force_custom_text: bool = False,\n force_patch_dropout: Optional[float] = None,\n force_image_size: Optional[Union[int, Tuple[int, int]]] = None,\n pretrained_image: bool = False,\n pretrained_hf: bool = True,\n image_mean: Optional[Tuple[float, ...]] = None,\n image_std: Optional[Tuple[float, ...]] = None,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n cache_dir: Optional[str] = None,\n output_dict: Optional[bool] = None,\n):\n model = create_model(\n model_name,\n pretrained,\n precision=precision,\n device=device,\n jit=jit,\n force_quick_gelu=force_quick_gelu,\n force_custom_text=force_custom_text,\n force_patch_dropout=force_patch_dropout,\n force_image_size=force_image_size,\n pretrained_image=pretrained_image,\n pretrained_hf=pretrained_hf,\n cache_dir=cache_dir,\n output_dict=output_dict,\n )\n\n image_mean = image_mean or getattr(model.visual, 'image_mean', None)\n image_std = image_std or getattr(model.visual, 'image_std', None)\n preprocess_train = image_transform(\n model.visual.image_size,\n is_train=True,\n mean=image_mean,\n std=image_std,\n aug_cfg=aug_cfg,\n )\n preprocess_val = image_transform(\n model.visual.image_size,\n is_train=False,\n mean=image_mean,\n std=image_std,\n )\n\n return model, preprocess_train, preprocess_val"
},
{
"identifier": "get_tokenizer",
"path": "src/open_clip/factory.py",
"snippet": "def get_tokenizer(model_name):\n if model_name.startswith(HF_HUB_PREFIX):\n tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])\n else:\n config = get_model_config(model_name)\n tokenizer = HFTokenizer(\n config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize\n return tokenizer"
},
{
"identifier": "create_loss",
"path": "src/open_clip/factory.py",
"snippet": "def create_loss(args):\n if args.distill:\n return DistillClipLoss(\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )\n elif \"coca\" in args.model.lower():\n return CoCaLoss(\n caption_loss_weight=args.coca_caption_loss_weight,\n clip_loss_weight=args.coca_contrastive_loss_weight,\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )\n return ClipLoss(\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )"
},
{
"identifier": "trace_model",
"path": "src/open_clip/model.py",
"snippet": "def trace_model(model, batch_size=256, device=torch.device('cpu')):\n model.eval()\n image_size = model.visual.image_size\n example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)\n example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)\n model = torch.jit.trace_module(\n model,\n inputs=dict(\n forward=(example_images, example_text),\n encode_text=(example_text,),\n encode_image=(example_images,)\n ))\n model.visual.image_size = image_size\n return model"
},
{
"identifier": "get_data",
"path": "src/training/data.py",
"snippet": "def get_data(args, preprocess_fns, epoch=0, tokenizer=None):\n preprocess_train, preprocess_val = preprocess_fns\n data = {}\n\n if args.train_data or args.dataset_type == \"synthetic\":\n data[\"train\"] = get_dataset_fn(args.train_data, args.dataset_type)(\n args, preprocess_train, is_train=True, epoch=epoch, tokenizer=tokenizer)\n\n if args.val_data:\n data[\"val\"] = get_dataset_fn(args.val_data, args.dataset_type)(\n args, preprocess_val, is_train=False, tokenizer=tokenizer)\n\n if args.imagenet_val is not None:\n data[\"imagenet-val\"] = get_imagenet(args, preprocess_fns, \"val\")\n\n if args.imagenet_v2 is not None:\n data[\"imagenet-v2\"] = get_imagenet(args, preprocess_fns, \"v2\")\n\n return data"
},
{
"identifier": "is_master",
"path": "src/training/distributed.py",
"snippet": "def is_master(args, local=False):\n return is_local_master(args) if local else is_global_master(args)"
},
{
"identifier": "init_distributed_device",
"path": "src/training/distributed.py",
"snippet": "def init_distributed_device(args):\n # Distributed training = training on more than one GPU.\n # Works in both single and multi-node scenarios.\n args.distributed = False\n args.world_size = 1\n args.rank = 0 # global rank\n args.local_rank = 0\n if args.horovod:\n assert hvd is not None, \"Horovod is not installed\"\n hvd.init()\n args.local_rank = int(hvd.local_rank())\n args.rank = hvd.rank()\n args.world_size = hvd.size()\n args.distributed = True\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n elif is_using_distributed():\n if 'SLURM_PROCID' in os.environ:\n # DDP via SLURM\n args.local_rank, args.rank, args.world_size = world_info_from_env()\n # SLURM var -> torch.distributed vars in case needed\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n else:\n # DDP via torchrun, torch.distributed.launch\n args.local_rank, _, _ = world_info_from_env()\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url)\n args.world_size = torch.distributed.get_world_size()\n args.rank = torch.distributed.get_rank()\n args.distributed = True\n\n if torch.cuda.is_available():\n if args.distributed and not args.no_set_device_rank:\n device = 'cuda:%d' % args.local_rank\n else:\n device = 'cuda:0'\n torch.cuda.set_device(device)\n else:\n device = 'cpu'\n args.device = device\n device = torch.device(device)\n return device"
},
{
"identifier": "broadcast_object",
"path": "src/training/distributed.py",
"snippet": "def broadcast_object(args, obj, src=0):\n # broadcast a pickle-able python object from rank-0 to all ranks\n if args.horovod:\n return hvd.broadcast_object(obj, root_rank=src)\n else:\n if args.rank == src:\n objects = [obj]\n else:\n objects = [None]\n dist.broadcast_object_list(objects, src=src)\n return objects[0]"
},
{
"identifier": "setup_logging",
"path": "src/training/logger.py",
"snippet": "def setup_logging(log_file, level, include_host=False):\n if include_host:\n import socket\n hostname = socket.gethostname()\n formatter = logging.Formatter(\n f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n else:\n formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n\n logging.root.setLevel(level)\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n for logger in loggers:\n logger.setLevel(level)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logging.root.addHandler(stream_handler)\n\n if log_file:\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setFormatter(formatter)\n logging.root.addHandler(file_handler)"
},
{
"identifier": "parse_args",
"path": "src/training/params.py",
"snippet": "def parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--train-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with training data. When using webdataset, multiple datasources can be combined using the `::` separator.\",\n )\n parser.add_argument(\n \"--train-data-upsampling-factors\",\n type=str,\n default=None,\n help=(\n \"When using multiple data sources with webdataset and sampling with replacement, this can be used to upsample specific data sources. \"\n \"Similar to --train-data, this should be a string with as many numbers as there are data sources, separated by `::` (e.g. 1::2::0.5) \"\n \"By default, datapoints are sampled uniformly regardless of the dataset sizes.\"\n )\n )\n parser.add_argument(\n \"--val-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with validation data\",\n )\n parser.add_argument(\n \"--train-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Required for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--val-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Useful for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--dataset-type\",\n choices=[\"webdataset\", \"csv\", \"synthetic\", \"auto\"],\n default=\"auto\",\n help=\"Which type of dataset to process.\"\n )\n parser.add_argument(\n \"--dataset-resampled\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use sampling with replacement for webdataset shard selection.\"\n )\n parser.add_argument(\n \"--csv-separator\",\n type=str,\n default=\"\\t\",\n help=\"For csv-like datasets, which separator to use.\"\n )\n parser.add_argument(\n \"--csv-img-key\",\n type=str,\n default=\"filepath\",\n help=\"For csv-like datasets, the name of the key for the image paths.\"\n )\n parser.add_argument(\n \"--csv-caption-key\",\n type=str,\n default=\"title\",\n help=\"For csv-like datasets, the name of the key for the captions.\"\n )\n parser.add_argument(\n \"--imagenet-val\",\n type=str,\n default=None,\n help=\"Path to imagenet val set for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--imagenet-v2\",\n type=str,\n default=None,\n help=\"Path to imagenet v2 for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--logs\",\n type=str,\n default=\"./logs/\",\n help=\"Where to store tensorboard logs. Use None to avoid storing logs.\",\n )\n parser.add_argument(\n \"--log-local\",\n action=\"store_true\",\n default=False,\n help=\"log files on local master, otherwise global master only.\",\n )\n parser.add_argument(\n \"--name\",\n type=str,\n default=None,\n help=\"Optional identifier for the experiment when storing logs. Otherwise use current time.\",\n )\n parser.add_argument(\n \"--workers\", type=int, default=1, help=\"Number of dataloader workers per GPU.\"\n )\n parser.add_argument(\n \"--batch-size\", type=int, default=64, help=\"Batch size per GPU.\"\n )\n parser.add_argument(\n \"--epochs\", type=int, default=32, help=\"Number of epochs to train for.\"\n )\n parser.add_argument(\n \"--epochs-cooldown\", type=int, default=None,\n help=\"When scheduler w/ cooldown used, perform cooldown from total_epochs - cooldown_epochs onwards.\"\n )\n parser.add_argument(\"--lr\", type=float, default=None, help=\"Learning rate.\")\n parser.add_argument(\"--beta1\", type=float, default=None, help=\"Adam beta 1.\")\n parser.add_argument(\"--beta2\", type=float, default=None, help=\"Adam beta 2.\")\n parser.add_argument(\"--eps\", type=float, default=None, help=\"Adam epsilon.\")\n parser.add_argument(\"--wd\", type=float, default=0.2, help=\"Weight decay.\")\n parser.add_argument(\n \"--warmup\", type=int, default=10000, help=\"Number of steps to warmup for.\"\n )\n parser.add_argument(\n \"--use-bn-sync\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use batch norm sync.\")\n parser.add_argument(\n \"--skip-scheduler\",\n action=\"store_true\",\n default=False,\n help=\"Use this flag to skip the learning rate decay.\",\n )\n parser.add_argument(\n \"--lr-scheduler\",\n type=str,\n default='cosine',\n help=\"LR scheduler. One of: 'cosine', 'const' (constant), 'const-cooldown' (constant w/ cooldown). Default: cosine\",\n )\n parser.add_argument(\n \"--lr-cooldown-end\", type=float, default=0.0,\n help=\"End learning rate for cooldown schedule. Default: 0\"\n )\n parser.add_argument(\n \"--lr-cooldown-power\", type=float, default=1.0,\n help=\"Power for polynomial cooldown schedule. Default: 1.0 (linear decay)\"\n )\n parser.add_argument(\n \"--save-frequency\", type=int, default=1, help=\"How often to save checkpoints.\"\n )\n parser.add_argument(\n \"--save-most-recent\",\n action=\"store_true\",\n default=False,\n help=\"Always save the most recent model trained to epoch_latest.pt.\",\n )\n parser.add_argument(\n \"--zeroshot-frequency\", type=int, default=2, help=\"How often to run zero shot.\"\n )\n parser.add_argument(\n \"--val-frequency\", type=int, default=1, help=\"How often to run evaluation with val data.\"\n )\n parser.add_argument(\n \"--resume\",\n default=None,\n type=str,\n help=\"path to latest checkpoint (default: none)\",\n )\n parser.add_argument(\n \"--precision\",\n choices=[\"amp\", \"amp_bf16\", \"amp_bfloat16\", \"bf16\", \"fp16\", \"pure_bf16\", \"pure_fp16\", \"fp32\"],\n default=\"amp\",\n help=\"Floating point precision.\"\n )\n parser.add_argument(\n \"--model\",\n type=str,\n default=\"RN50\",\n help=\"Name of the vision backbone to use.\",\n )\n parser.add_argument(\n \"--pretrained\",\n default='',\n type=str,\n help=\"Use a pretrained CLIP model weights with the specified tag or file path.\",\n )\n parser.add_argument(\n \"--pretrained-image\",\n default=False,\n action='store_true',\n help=\"Load imagenet pretrained weights for image tower backbone if available.\",\n )\n parser.add_argument(\n \"--lock-image\",\n default=False,\n action='store_true',\n help=\"Lock full image tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-image-unlocked-groups\",\n type=int,\n default=0,\n help=\"Leave last n image tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-image-freeze-bn-stats\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in image tower for any locked layers.\",\n )\n parser.add_argument(\n '--image-mean', type=float, nargs='+', default=None, metavar='MEAN',\n help='Override default image mean value of dataset')\n parser.add_argument(\n '--image-std', type=float, nargs='+', default=None, metavar='STD',\n help='Override default image std deviation of of dataset')\n parser.add_argument('--aug-cfg', nargs='*', default={}, action=ParseKwargs)\n parser.add_argument(\n \"--grad-checkpointing\",\n default=False,\n action='store_true',\n help=\"Enable gradient checkpointing.\",\n )\n parser.add_argument(\n \"--local-loss\",\n default=False,\n action=\"store_true\",\n help=\"calculate loss w/ local features @ global (instead of realizing full global @ global matrix)\"\n )\n parser.add_argument(\n \"--gather-with-grad\",\n default=False,\n action=\"store_true\",\n help=\"enable full distributed gradient for feature gather\"\n )\n parser.add_argument(\n '--force-image-size', type=int, nargs='+', default=None,\n help='Override default image size'\n )\n parser.add_argument(\n \"--force-quick-gelu\",\n default=False,\n action='store_true',\n help=\"Force use of QuickGELU activation for non-OpenAI transformer models.\",\n )\n parser.add_argument(\n \"--force-patch-dropout\",\n default=None,\n type=float,\n help=\"Override the patch dropout during training, for fine tuning with no dropout near the end as in the paper\",\n )\n parser.add_argument(\n \"--force-custom-text\",\n default=False,\n action='store_true',\n help=\"Force use of CustomTextCLIP model (separate text-tower).\",\n )\n parser.add_argument(\n \"--torchscript\",\n default=False,\n action='store_true',\n help=\"torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'\",\n )\n parser.add_argument(\n \"--torchcompile\",\n default=False,\n action='store_true',\n help=\"torch.compile() the model, requires pytorch 2.0 or later.\",\n )\n parser.add_argument(\n \"--trace\",\n default=False,\n action='store_true',\n help=\"torch.jit.trace the model for inference / eval only\",\n )\n parser.add_argument(\n \"--accum-freq\", type=int, default=1, help=\"Update the model every --acum-freq steps.\"\n )\n # arguments for distributed training\n parser.add_argument(\n \"--dist-url\",\n default=\"env://\",\n type=str,\n help=\"url used to set up distributed training\",\n )\n parser.add_argument(\n \"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\"\n )\n parser.add_argument(\n \"--report-to\",\n default='',\n type=str,\n help=\"Options are ['wandb', 'tensorboard', 'wandb,tensorboard']\"\n )\n parser.add_argument(\n \"--wandb-notes\",\n default='',\n type=str,\n help=\"Notes if logging with wandb\"\n )\n parser.add_argument(\n \"--wandb-project-name\",\n type=str,\n default='open-clip',\n help=\"Name of the project if logging with wandb.\",\n )\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"If true, more information is logged.\"\n )\n parser.add_argument(\n \"--copy-codebase\",\n default=False,\n action=\"store_true\",\n help=\"If true, we copy the entire base on the log directory, and execute from there.\"\n )\n parser.add_argument(\n \"--horovod\",\n default=False,\n action=\"store_true\",\n help=\"Use horovod for distributed training.\"\n )\n parser.add_argument(\n \"--ddp-static-graph\",\n default=False,\n action='store_true',\n help=\"Enable static graph optimization for DDP in PyTorch >= 1.11.\",\n )\n parser.add_argument(\n \"--no-set-device-rank\",\n default=False,\n action=\"store_true\",\n help=\"Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).\"\n )\n parser.add_argument(\n \"--seed\", type=int, default=0, help=\"Default random seed.\"\n )\n parser.add_argument(\n \"--grad-clip-norm\", type=float, default=None, help=\"Gradient clip.\"\n )\n parser.add_argument(\n \"--lock-text\",\n default=False,\n action='store_true',\n help=\"Lock full text tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-text-unlocked-layers\",\n type=int,\n default=0,\n help=\"Leave last n text tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-text-freeze-layer-norm\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in text tower for any locked layers.\",\n )\n parser.add_argument(\n \"--log-every-n-steps\",\n type=int,\n default=100,\n help=\"Log every n steps to tensorboard/console/wandb.\",\n )\n parser.add_argument(\n \"--coca-caption-loss-weight\",\n type=float,\n default=2.0,\n help=\"Weight assigned to caption loss in CoCa.\"\n )\n parser.add_argument(\n \"--coca-contrastive-loss-weight\",\n type=float,\n default=1.0,\n help=\"Weight assigned to contrastive loss when training CoCa.\"\n )\n parser.add_argument(\n \"--remote-sync\",\n type=str,\n default=None,\n help=\"Optinoally sync with a remote path specified by this arg\",\n )\n parser.add_argument(\n \"--remote-sync-frequency\",\n type=int,\n default=300,\n help=\"How frequently to sync to a remote directly if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--remote-sync-protocol\",\n choices=[\"s3\", \"fsspec\"],\n default=\"s3\",\n help=\"How to do the remote sync backup if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--delete-previous-checkpoint\",\n default=False,\n action=\"store_true\",\n help=\"If true, delete previous checkpoint after storing a new one.\"\n )\n parser.add_argument(\n \"--distill-model\",\n default=None,\n help='Which model arch to distill from, if any.'\n )\n parser.add_argument(\n \"--distill-pretrained\",\n default=None,\n help='Which pre-trained weights to distill from, if any.'\n )\n parser.add_argument(\n \"--use-bnb-linear\",\n default=None,\n help='Replace the network linear layers from the bitsandbytes library. '\n 'Allows int8 training/inference, etc.'\n )\n args = parser.parse_args(args)\n\n # If some params are not passed, we use the default values based on model name.\n default_params = get_default_params(args.model)\n for name, val in default_params.items():\n if getattr(args, name) is None:\n setattr(args, name, val)\n\n return args"
},
{
"identifier": "cosine_lr",
"path": "src/training/scheduler.py",
"snippet": "def cosine_lr(optimizer, base_lr, warmup_length, steps):\n def _lr_adjuster(step):\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n e = step - warmup_length\n es = steps - warmup_length\n lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster"
},
{
"identifier": "const_lr",
"path": "src/training/scheduler.py",
"snippet": "def const_lr(optimizer, base_lr, warmup_length, steps):\n def _lr_adjuster(step):\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n lr = base_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster"
},
{
"identifier": "const_lr_cooldown",
"path": "src/training/scheduler.py",
"snippet": "def const_lr_cooldown(optimizer, base_lr, warmup_length, steps, cooldown_steps, cooldown_power=1.0, cooldown_end_lr=0.):\n def _lr_adjuster(step):\n start_cooldown_step = steps - cooldown_steps\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n if step < start_cooldown_step:\n lr = base_lr\n else:\n e = step - start_cooldown_step\n es = steps - start_cooldown_step\n # linear decay if power == 1; polynomial decay otherwise;\n decay = (1 - (e/es)) ** cooldown_power\n lr = decay * (base_lr - cooldown_end_lr) + cooldown_end_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster"
},
{
"identifier": "train_one_epoch",
"path": "src/training/train.py",
"snippet": "def train_one_epoch(model, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args, tb_writer=None):\n device = torch.device(args.device)\n autocast = get_autocast(args.precision)\n input_dtype = get_input_dtype(args.precision)\n\n\n model.train()\n if args.distill:\n dist_model.eval()\n\n data['train'].set_epoch(epoch) # set epoch in process safe manner via sampler or shared_epoch\n dataloader = data['train'].dataloader\n num_batches_per_epoch = dataloader.num_batches // args.accum_freq\n sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))\n\n if args.accum_freq > 1:\n accum_images, accum_texts, accum_features = [], [], {}\n\n losses_m = {}\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n end = time.time()\n for i, batch in enumerate(dataloader):\n i_accum = i // args.accum_freq\n step = num_batches_per_epoch * epoch + i_accum\n\n if not args.skip_scheduler:\n scheduler(step)\n\n images, texts = batch\n images = images.to(device=device, dtype=input_dtype, non_blocking=True)\n texts = texts.to(device=device, non_blocking=True)\n\n data_time_m.update(time.time() - end)\n optimizer.zero_grad()\n\n if args.accum_freq == 1:\n with autocast():\n model_out = model(images, texts)\n logit_scale = model_out[\"logit_scale\"]\n if args.distill:\n with torch.no_grad():\n dist_model_out = dist_model(images, texts)\n model_out.update({f'dist_{k}' : v for k, v in dist_model_out.items()})\n losses = loss(**model_out, output_dict=True)\n\n total_loss = sum(losses.values())\n losses[\"loss\"] = total_loss\n\n backward(total_loss, scaler)\n else:\n # First, cache the features without any gradient tracking.\n with torch.no_grad():\n with autocast():\n model_out = model(images, texts)\n model_out.pop(\"logit_scale\")\n for key, val in model_out.items():\n if key in accum_features:\n accum_features[key].append(val)\n else:\n accum_features[key] = [val]\n\n accum_images.append(images)\n accum_texts.append(texts)\n\n # If (i + 1) % accum_freq is not zero, move on to the next batch.\n if ((i + 1) % args.accum_freq) > 0:\n # FIXME this makes data time logging unreliable when accumulating\n continue\n\n # Now, ready to take gradients for the last accum_freq batches.\n # Re-do the forward pass for those batches, and use the cached features from the other batches as negatives.\n # Call backwards each time, but only step optimizer at the end.\n optimizer.zero_grad()\n for j in range(args.accum_freq):\n images = accum_images[j]\n texts = accum_texts[j]\n with autocast():\n model_out = model(images, texts)\n logit_scale = model_out.pop(\"logit_scale\")\n inputs = {}\n for key, val in accum_features.items():\n accumulated = accum_features[key]\n inputs[key] = torch.cat(accumulated[:j] + [model_out[key]] + accumulated[j + 1:])\n losses = loss(**inputs, logit_scale=logit_scale, output_dict=True)\n del inputs\n total_loss = sum(losses.values())\n losses[\"loss\"] = total_loss\n backward(total_loss, scaler)\n\n if scaler is not None:\n if args.horovod:\n optimizer.synchronize()\n scaler.unscale_(optimizer)\n if args.grad_clip_norm is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n with optimizer.skip_synchronize():\n scaler.step(optimizer)\n else:\n if args.grad_clip_norm is not None:\n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n scaler.step(optimizer)\n scaler.update()\n else:\n if args.grad_clip_norm is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n optimizer.step()\n\n # reset gradient accum, if enabled\n if args.accum_freq > 1:\n accum_images, accum_texts, accum_features = [], [], {}\n\n # Note: we clamp to 4.6052 = ln(100), as in the original paper.\n with torch.no_grad():\n unwrap_model(model).logit_scale.clamp_(0, math.log(100))\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n batch_count = i_accum + 1\n if is_master(args) and (i_accum % args.log_every_n_steps == 0 or batch_count == num_batches_per_epoch):\n batch_size = len(images)\n num_samples = batch_count * batch_size * args.accum_freq * args.world_size\n samples_per_epoch = dataloader.num_samples\n percent_complete = 100.0 * batch_count / num_batches_per_epoch\n\n # NOTE loss is coarsely sampled, just master node and per log update\n for key, val in losses.items():\n if key not in losses_m:\n losses_m[key] = AverageMeter()\n losses_m[key].update(val.item(), batch_size)\n\n logit_scale_scalar = logit_scale.item()\n loss_log = \" \".join(\n [\n f\"{loss_name.capitalize()}: {loss_m.val:#.5g} ({loss_m.avg:#.5g})\" \n for loss_name, loss_m in losses_m.items()\n ]\n )\n samples_per_second = args.accum_freq * args.batch_size * args.world_size / batch_time_m.val\n samples_per_second_per_gpu = args.accum_freq * args.batch_size / batch_time_m.val\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {samples_per_second:#g}/s, {samples_per_second_per_gpu:#g}/s/gpu \"\n f\"LR: {optimizer.param_groups[0]['lr']:5f} \"\n f\"Logit Scale: {logit_scale_scalar:.3f} \" + loss_log\n )\n\n # Save train loss / etc. Using non avg meter values as loggers have their own smoothing\n log_data = {\n \"data_time\": data_time_m.val,\n \"batch_time\": batch_time_m.val,\n \"samples_per_second\": samples_per_second,\n \"samples_per_second_per_gpu\": samples_per_second_per_gpu,\n \"scale\": logit_scale_scalar,\n \"lr\": optimizer.param_groups[0][\"lr\"]\n } \n log_data.update({name:val.val for name,val in losses_m.items()})\n\n for name, val in log_data.items():\n name = \"train/\" + name\n if tb_writer is not None:\n tb_writer.add_scalar(name, val, step)\n if args.wandb:\n assert wandb is not None, 'Please install wandb.'\n wandb.log({name: val, 'step': step})\n\n # resetting batch / data time meters per log window\n batch_time_m.reset()\n data_time_m.reset()\n # end for"
},
{
"identifier": "evaluate",
"path": "src/training/train.py",
"snippet": "def evaluate(model, data, epoch, args, tb_writer=None):\n metrics = {}\n if not is_master(args):\n return metrics\n device = torch.device(args.device)\n model.eval()\n\n zero_shot_metrics = zero_shot_eval(model, data, epoch, args)\n metrics.update(zero_shot_metrics)\n\n autocast = get_autocast(args.precision)\n input_dtype = get_input_dtype(args.precision)\n\n if 'val' in data and (args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)):\n dataloader = data['val'].dataloader\n num_samples = 0\n samples_per_val = dataloader.num_samples\n\n # FIXME this does not scale past small eval datasets\n # all_image_features @ all_text_features will blow up memory and compute very quickly\n cumulative_loss = 0.0\n cumulative_gen_loss = 0.0\n all_image_features, all_text_features = [], []\n with torch.no_grad():\n for i, batch in enumerate(dataloader):\n images, texts = batch\n images = images.to(device=device, dtype=input_dtype, non_blocking=True)\n texts = texts.to(device=device, non_blocking=True)\n\n with autocast():\n model_out = model(images, texts)\n image_features = model_out[\"image_features\"]\n text_features = model_out[\"text_features\"]\n logit_scale = model_out[\"logit_scale\"]\n # features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly\n # however, system RAM is easily exceeded and compute time becomes problematic\n all_image_features.append(image_features.cpu())\n all_text_features.append(text_features.cpu())\n logit_scale = logit_scale.mean()\n logits_per_image = logit_scale * image_features @ text_features.t()\n logits_per_text = logits_per_image.t()\n\n batch_size = images.shape[0]\n labels = torch.arange(batch_size, device=device).long()\n total_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n gen_loss = maybe_compute_generative_loss(model_out)\n\n cumulative_loss += total_loss * batch_size\n num_samples += batch_size\n if is_master(args) and (i % 100) == 0:\n logging.info(\n f\"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]\\t\"\n f\"Clip Loss: {cumulative_loss / num_samples:.6f}\\t\")\n\n if gen_loss is not None:\n cumulative_gen_loss += gen_loss * batch_size\n logging.info(\n f\"Generative Loss: {cumulative_gen_loss / num_samples:.6f}\\t\")\n\n val_metrics = get_clip_metrics(\n image_features=torch.cat(all_image_features),\n text_features=torch.cat(all_text_features),\n logit_scale=logit_scale.cpu(),\n )\n loss = cumulative_loss / num_samples\n metrics.update(\n {**val_metrics, \"clip_val_loss\": loss.item(), \"epoch\": epoch, \"num_samples\": num_samples}\n )\n if gen_loss is not None:\n gen_loss = cumulative_gen_loss / num_samples\n metrics.update({\"val_generative_loss\": gen_loss.item()})\n\n if not metrics:\n return metrics\n\n logging.info(\n f\"Eval Epoch: {epoch} \"\n + \"\\t\".join([f\"{k}: {round(v, 4):.4f}\" for k, v in metrics.items()])\n )\n\n if args.save_logs:\n for name, val in metrics.items():\n if tb_writer is not None:\n tb_writer.add_scalar(f\"val/{name}\", val, epoch)\n\n with open(os.path.join(args.checkpoint_path, \"results.jsonl\"), \"a+\") as f:\n f.write(json.dumps(metrics))\n f.write(\"\\n\")\n\n if args.wandb:\n assert wandb is not None, 'Please install wandb.'\n for name, val in metrics.items():\n wandb.log({f\"val/{name}\": val, 'epoch': epoch})\n\n return metrics"
},
{
"identifier": "pt_load",
"path": "src/training/file_utils.py",
"snippet": "def pt_load(file_path, map_location=None):\n if file_path.startswith('s3'):\n logging.info('Loading remote checkpoint, which may take a bit.')\n of = fsspec.open(file_path, \"rb\")\n with of as f:\n out = torch.load(f, map_location=map_location)\n return out"
},
{
"identifier": "check_exists",
"path": "src/training/file_utils.py",
"snippet": "def check_exists(file_path):\n try:\n with fsspec.open(file_path):\n pass\n except FileNotFoundError:\n return False\n return True"
},
{
"identifier": "start_sync_process",
"path": "src/training/file_utils.py",
"snippet": "def start_sync_process(sync_every, local_dir, remote_dir, protocol):\n p = multiprocessing.Process(target=keep_running_remote_sync, args=(sync_every, local_dir, remote_dir, protocol))\n return p"
},
{
"identifier": "remote_sync",
"path": "src/training/file_utils.py",
"snippet": "def remote_sync(local_dir, remote_dir, protocol):\n logging.info('Starting remote sync.')\n if protocol == 's3':\n return remote_sync_s3(local_dir, remote_dir)\n elif protocol == 'fsspec':\n return remote_sync_fsspec(local_dir, remote_dir)\n else:\n logging.error('Remote protocol not known')\n return False"
}
] | import glob
import logging
import os
import re
import subprocess
import sys
import random
import numpy as np
import torch
import wandb
import torch.utils.tensorboard as tensorboard
import horovod.torch as hvd
import bitsandbytes as bnb
from datetime import datetime
from torch import optim
from torch.cuda.amp import GradScaler
from src.open_clip.factory import create_model_and_transforms, get_tokenizer, create_loss
from src.open_clip.model import trace_model
from src.training.data import get_data
from src.training.distributed import is_master, init_distributed_device, broadcast_object
from src.training.logger import setup_logging
from src.training.params import parse_args
from src.training.scheduler import cosine_lr, const_lr, const_lr_cooldown
from src.training.train import train_one_epoch, evaluate
from src.training.file_utils import pt_load, check_exists, start_sync_process, remote_sync
from open_clip.utils import replace_linear
from open_clip.utils import convert_int8_model_to_inference_mode
from shutil import copytree, ignore_patterns | 10,346 | """
Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt
"""
try:
except ImportError:
wandb = None
try:
except ImportError:
tensorboard = None
try:
except ImportError:
hvd = None
LATEST_CHECKPOINT_NAME = "epoch_latest.pt"
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def get_latest_checkpoint(path: str, remote : bool):
# as writen, this glob recurses, so can pick up checkpoints across multiple sub-folders
if remote:
result = subprocess.run(["aws", "s3", "ls", path + "/"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(result)
if result.returncode == 1:
return None
checkpoints = [os.path.join(path, x.split(' ')[-1]) for x in result.stdout.decode().split('\n')[:-1]]
else:
checkpoints = glob.glob(path + '**/*.pt', recursive=True)
if checkpoints:
checkpoints = sorted(checkpoints, key=natural_key)
return checkpoints[-1]
return None
def main(args):
args = parse_args(args)
if torch.cuda.is_available():
# This enables tf32 on Ampere GPUs which is only 8% slower than
# float16 and almost as accurate as float32
# This was a default in pytorch until 1.12
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
# fully initialize distributed device environment
device = init_distributed_device(args)
# get the name of the experiments
if args.name is None:
# sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?
model_name_safe = args.model.replace('/', '-')
date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
if args.distributed:
# sync date_str from master to all ranks
date_str = broadcast_object(args, date_str)
args.name = '-'.join([
date_str,
f"model_{model_name_safe}",
f"lr_{args.lr}",
f"b_{args.batch_size}",
f"j_{args.workers}",
f"p_{args.precision}",
])
resume_latest = args.resume == 'latest'
log_base_path = os.path.join(args.logs, args.name)
args.log_path = None
if is_master(args, local=args.log_local):
os.makedirs(log_base_path, exist_ok=True)
log_filename = f'out-{args.rank}' if args.log_local else 'out.log'
args.log_path = os.path.join(log_base_path, log_filename)
if os.path.exists(args.log_path) and not resume_latest:
print(
"Error. Experiment already exists. Use --name {} to specify a new experiment."
)
return -1
# Setup text logger
args.log_level = logging.DEBUG if args.debug else logging.INFO
setup_logging(args.log_path, args.log_level)
# Setup wandb, tensorboard, checkpoint logging
args.wandb = 'wandb' in args.report_to or 'all' in args.report_to
args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to
args.checkpoint_path = os.path.join(log_base_path, "checkpoints")
if is_master(args):
args.tensorboard_path = os.path.join(log_base_path, "tensorboard") if args.tensorboard else ''
for dirname in [args.tensorboard_path, args.checkpoint_path]:
if dirname:
os.makedirs(dirname, exist_ok=True)
else:
args.tensorboard_path = ''
if resume_latest:
resume_from = None
checkpoint_path = args.checkpoint_path
# If using remote_sync, need to check the remote instead of the local checkpoints folder.
| """
Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt
"""
try:
except ImportError:
wandb = None
try:
except ImportError:
tensorboard = None
try:
except ImportError:
hvd = None
LATEST_CHECKPOINT_NAME = "epoch_latest.pt"
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def get_latest_checkpoint(path: str, remote : bool):
# as writen, this glob recurses, so can pick up checkpoints across multiple sub-folders
if remote:
result = subprocess.run(["aws", "s3", "ls", path + "/"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(result)
if result.returncode == 1:
return None
checkpoints = [os.path.join(path, x.split(' ')[-1]) for x in result.stdout.decode().split('\n')[:-1]]
else:
checkpoints = glob.glob(path + '**/*.pt', recursive=True)
if checkpoints:
checkpoints = sorted(checkpoints, key=natural_key)
return checkpoints[-1]
return None
def main(args):
args = parse_args(args)
if torch.cuda.is_available():
# This enables tf32 on Ampere GPUs which is only 8% slower than
# float16 and almost as accurate as float32
# This was a default in pytorch until 1.12
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
# fully initialize distributed device environment
device = init_distributed_device(args)
# get the name of the experiments
if args.name is None:
# sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?
model_name_safe = args.model.replace('/', '-')
date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
if args.distributed:
# sync date_str from master to all ranks
date_str = broadcast_object(args, date_str)
args.name = '-'.join([
date_str,
f"model_{model_name_safe}",
f"lr_{args.lr}",
f"b_{args.batch_size}",
f"j_{args.workers}",
f"p_{args.precision}",
])
resume_latest = args.resume == 'latest'
log_base_path = os.path.join(args.logs, args.name)
args.log_path = None
if is_master(args, local=args.log_local):
os.makedirs(log_base_path, exist_ok=True)
log_filename = f'out-{args.rank}' if args.log_local else 'out.log'
args.log_path = os.path.join(log_base_path, log_filename)
if os.path.exists(args.log_path) and not resume_latest:
print(
"Error. Experiment already exists. Use --name {} to specify a new experiment."
)
return -1
# Setup text logger
args.log_level = logging.DEBUG if args.debug else logging.INFO
setup_logging(args.log_path, args.log_level)
# Setup wandb, tensorboard, checkpoint logging
args.wandb = 'wandb' in args.report_to or 'all' in args.report_to
args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to
args.checkpoint_path = os.path.join(log_base_path, "checkpoints")
if is_master(args):
args.tensorboard_path = os.path.join(log_base_path, "tensorboard") if args.tensorboard else ''
for dirname in [args.tensorboard_path, args.checkpoint_path]:
if dirname:
os.makedirs(dirname, exist_ok=True)
else:
args.tensorboard_path = ''
if resume_latest:
resume_from = None
checkpoint_path = args.checkpoint_path
# If using remote_sync, need to check the remote instead of the local checkpoints folder. | if args.remote_sync is not None: | 18 | 2023-12-19 11:50:56+00:00 | 12k |
Lavreniuk/EVP | depth/models_depth/model.py | [
{
"identifier": "UNetWrapper",
"path": "evp/models.py",
"snippet": "class UNetWrapper(nn.Module):\n def __init__(self, unet, use_attn=True, base_size=512, max_attn_size=None, attn_selector='up_cross+down_cross') -> None:\n super().__init__()\n self.unet = unet\n self.attention_store = AttentionStore(base_size=base_size // 8, max_size=max_attn_size)\n self.size16 = base_size // 32\n self.size32 = base_size // 16\n self.size64 = base_size // 8\n self.use_attn = use_attn\n if self.use_attn:\n register_attention_control(unet, self.attention_store)\n register_hier_output(unet)\n self.attn_selector = attn_selector.split('+')\n\n def forward(self, *args, **kwargs):\n if self.use_attn:\n self.attention_store.reset()\n out_list = self.unet(*args, **kwargs)\n if self.use_attn:\n avg_attn = self.attention_store.get_average_attention()\n attn16, attn32, attn64 = self.process_attn(avg_attn)\n out_list[1] = torch.cat([out_list[1], attn16], dim=1)\n out_list[2] = torch.cat([out_list[2], attn32], dim=1)\n if attn64 is not None:\n out_list[3] = torch.cat([out_list[3], attn64], dim=1)\n return out_list[::-1]\n\n def process_attn(self, avg_attn):\n attns = {self.size16: [], self.size32: [], self.size64: []}\n for k in self.attn_selector:\n for up_attn in avg_attn[k]:\n size = int(math.sqrt(up_attn.shape[1]))\n attns[size].append(rearrange(up_attn, 'b (h w) c -> b c h w', h=size))\n attn16 = torch.stack(attns[self.size16]).mean(0)\n attn32 = torch.stack(attns[self.size32]).mean(0)\n if len(attns[self.size64]) > 0:\n attn64 = torch.stack(attns[self.size64]).mean(0)\n else:\n attn64 = None\n return attn16, attn32, attn64"
},
{
"identifier": "TextAdapterRefer",
"path": "evp/models.py",
"snippet": "class TextAdapterRefer(nn.Module):\n def __init__(self, text_dim=768):\n super().__init__()\n \n self.fc = nn.Sequential(\n nn.Linear(text_dim, text_dim),\n nn.GELU(),\n nn.Linear(text_dim, text_dim)\n )\n\n def forward(self, latents, texts, gamma):\n texts_after = self.fc(texts)\n texts = texts + gamma * texts_after\n return texts"
},
{
"identifier": "FrozenCLIPEmbedder",
"path": "evp/models.py",
"snippet": "class FrozenCLIPEmbedder(nn.Module):\n \"\"\"Uses the CLIP transformer encoder for text (from Hugging Face)\"\"\"\n def __init__(self, version=\"openai/clip-vit-large-patch14\", device=\"cuda\", max_length=77, pool=True):\n super().__init__()\n self.tokenizer = CLIPTokenizer.from_pretrained(version)\n self.transformer = CLIPTextModel.from_pretrained(version)\n self.device = device\n self.max_length = max_length\n self.freeze()\n\n self.pool = pool\n\n def freeze(self):\n self.transformer = self.transformer.eval()\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, text):\n batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,\n return_overflowing_tokens=False, padding=\"max_length\", return_tensors=\"pt\")\n tokens = batch_encoding[\"input_ids\"].to(self.device)\n outputs = self.transformer(input_ids=tokens)\n\n if self.pool:\n z = outputs.pooler_output\n else:\n z = outputs.last_hidden_state\n return z\n\n def encode(self, text):\n return self(text)"
},
{
"identifier": "mViT",
"path": "depth/models_depth/miniViT.py",
"snippet": "class mViT(nn.Module):\n def __init__(self, in_channels, n_query_channels=128, patch_size=16, dim_out=256,\n embedding_dim=128, num_heads=4, norm='linear'):\n super(mViT, self).__init__()\n self.norm = norm\n self.n_query_channels = n_query_channels\n self.patch_transformer = PatchTransformerEncoder(in_channels, patch_size, embedding_dim, num_heads)\n self.dot_product_layer = PixelWiseDotProduct()\n\n self.conv3x3 = nn.Conv2d(in_channels, embedding_dim, kernel_size=3, stride=1, padding=1)\n self.regressor = nn.Sequential(nn.Linear(embedding_dim, 256),\n nn.LeakyReLU(),\n nn.Linear(256, 256),\n nn.LeakyReLU(),\n nn.Linear(256, dim_out))\n\n def forward(self, x):\n # n, c, h, w = x.size()\n tgt = self.patch_transformer(x.clone()) # .shape = S, N, E\n\n x = self.conv3x3(x)\n\n regression_head, queries = tgt[0, ...], tgt[1:self.n_query_channels + 1, ...]\n\n # Change from S, N, E to N, S, E\n queries = queries.permute(1, 0, 2)\n range_attention_maps = self.dot_product_layer(x, queries) # .shape = n, n_query_channels, h, w\n\n y = self.regressor(regression_head) # .shape = N, dim_out\n if self.norm == 'linear':\n y = torch.relu(y)\n eps = 0.1\n y = y + eps\n elif self.norm == 'softmax':\n return torch.softmax(y, dim=1), range_attention_maps\n else:\n y = torch.sigmoid(y)\n y = y / y.sum(dim=1, keepdim=True)\n return y, range_attention_maps"
},
{
"identifier": "AttractorLayer",
"path": "depth/models_depth/attractor.py",
"snippet": "class AttractorLayer(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n eps = 1e-3\n A = A + eps\n n, c, h, w = A.shape\n A = A.view(n, self.n_attractors, 2, h, w)\n A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w\n A_normed = A[:, :, 0, ...] # n, na, h, w\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(dist(A_normed.unsqueeze(\n 2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n # .shape N, nbins, h, w\n delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers)\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = (self.max_depth - self.min_depth) * \\\n b_new_centers + self.min_depth\n B_centers, _ = torch.sort(B_centers, dim=1)\n B_centers = torch.clip(B_centers, self.min_depth, self.max_depth)\n return b_new_centers, B_centers"
},
{
"identifier": "AttractorLayerUnnormed",
"path": "depth/models_depth/attractor.py",
"snippet": "class AttractorLayerUnnormed(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are unbounded\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n n, c, h, w = A.shape\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(\n dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n delta_c += dist(A[:, i, ...].unsqueeze(1) -\n b_centers) # .shape N, nbins, h, w\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = b_new_centers\n\n return b_new_centers, B_centers"
},
{
"identifier": "ConditionalLogBinomial",
"path": "depth/models_depth/dist_layers.py",
"snippet": "class ConditionalLogBinomial(nn.Module):\n def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):\n \"\"\"Conditional Log Binomial distribution\n\n Args:\n in_features (int): number of input channels in main feature\n condition_dim (int): number of input channels in condition feature\n n_classes (int, optional): Number of classes. Defaults to 256.\n bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.\n p_eps (float, optional): small eps value. Defaults to 1e-4.\n max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.\n min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.\n \"\"\"\n super().__init__()\n self.p_eps = p_eps\n self.max_temp = max_temp\n self.min_temp = min_temp\n self.log_binomial_transform = LogBinomial(n_classes, act=act)\n bottleneck = (in_features + condition_dim) // bottleneck_factor\n self.mlp = nn.Sequential(\n nn.Conv2d(in_features + condition_dim, bottleneck,\n kernel_size=1, stride=1, padding=0),\n nn.GELU(),\n # 2 for p linear norm, 2 for t linear norm\n nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0),\n nn.Softplus()\n )\n\n def forward(self, x, cond):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor - NCHW): Main feature\n cond (torch.Tensor - NCHW): condition feature\n\n Returns:\n torch.Tensor: Output log binomial distribution\n \"\"\"\n pt = self.mlp(torch.concat((x, cond), dim=1))\n p, t = pt[:, :2, ...], pt[:, 2:, ...]\n\n p = p + self.p_eps\n p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])\n\n t = t + self.p_eps\n t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])\n t = t.unsqueeze(1)\n t = (self.max_temp - self.min_temp) * t + self.min_temp\n\n return self.log_binomial_transform(p, t)"
},
{
"identifier": "Projector",
"path": "depth/models_depth/localbins_layers.py",
"snippet": "class Projector(nn.Module):\n def __init__(self, in_features, out_features, mlp_dim=128):\n \"\"\"Projector MLP\n\n Args:\n in_features (int): input channels\n out_features (int): output channels\n mlp_dim (int, optional): hidden dimension. Defaults to 128.\n \"\"\"\n super().__init__()\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, out_features, 1, 1, 0),\n )\n\n def forward(self, x):\n return self._net(x)"
},
{
"identifier": "SeedBinRegressor",
"path": "depth/models_depth/localbins_layers.py",
"snippet": "class SeedBinRegressor(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Min depth value. Defaults to 1e-3.\n max_depth (float, optional): Max depth value. Defaults to 10.\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self.min_depth = min_depth\n self.max_depth = max_depth\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B = self._net(x)\n eps = 1e-3\n B = B + eps\n B_widths_normed = B / B.sum(dim=1, keepdim=True)\n B_widths = (self.max_depth - self.min_depth) * \\\n B_widths_normed # .shape NCHW\n # pad has the form (left, right, top, bottom, front, back)\n B_widths = nn.functional.pad(\n B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)\n B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW\n\n B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...])\n return B_widths_normed, B_centers"
},
{
"identifier": "SeedBinRegressorUnnormed",
"path": "depth/models_depth/localbins_layers.py",
"snippet": "class SeedBinRegressorUnnormed(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are unbounded\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B_centers = self._net(x)\n return B_centers, B_centers"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from timm.models.layers import trunc_normal_, DropPath
from mmcv.cnn import (build_conv_layer, build_norm_layer, build_upsample_layer,
constant_init, normal_init)
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from evp.models import UNetWrapper, TextAdapterRefer, FrozenCLIPEmbedder
from .miniViT import mViT
from .attractor import AttractorLayer, AttractorLayerUnnormed
from .dist_layers import ConditionalLogBinomial
from .localbins_layers import (Projector, SeedBinRegressor, SeedBinRegressorUnnormed) | 7,438 | x = x * channel_attention
# Apply convolutional layers
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
x = self.conv2(x)
x = self.group_norm(x)
x = self.relu(x)
# Upsample
x = self.upscale(x)
return x
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvLayer, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.GroupNorm(20, out_channels),
nn.ReLU(),
)
def forward(self, x):
x = self.conv1(x)
return x
class InverseMultiAttentiveFeatureRefinement(nn.Module):
def __init__(self, in_channels_list):
super(InverseMultiAttentiveFeatureRefinement, self).__init__()
self.layer1 = AttentionModule(in_channels_list[0], in_channels_list[0])
self.layer2 = AttentionDownsamplingModule(in_channels_list[0], in_channels_list[0]//2, scale_factor = 2)
self.layer3 = ConvLayer(in_channels_list[0]//2 + in_channels_list[1], in_channels_list[1])
self.layer4 = AttentionDownsamplingModule(in_channels_list[1], in_channels_list[1]//2, scale_factor = 2)
self.layer5 = ConvLayer(in_channels_list[1]//2 + in_channels_list[2], in_channels_list[2])
self.layer6 = AttentionDownsamplingModule(in_channels_list[2], in_channels_list[2]//2, scale_factor = 2)
self.layer7 = ConvLayer(in_channels_list[2]//2 + in_channels_list[3], in_channels_list[3])
'''
self.layer8 = AttentionUpsamplingModule(in_channels_list[3], in_channels_list[3])
self.layer9 = ConvLayer(in_channels_list[2] + in_channels_list[3], in_channels_list[2])
self.layer10 = AttentionUpsamplingModule(in_channels_list[2], in_channels_list[2])
self.layer11 = ConvLayer(in_channels_list[1] + in_channels_list[2], in_channels_list[1])
self.layer12 = AttentionUpsamplingModule(in_channels_list[1], in_channels_list[1])
self.layer13 = ConvLayer(in_channels_list[0] + in_channels_list[1], in_channels_list[0])
'''
def forward(self, inputs):
x_c4, x_c3, x_c2, x_c1 = inputs
x_c4 = self.layer1(x_c4)
x_c4_3 = self.layer2(x_c4)
x_c3 = torch.cat([x_c4_3, x_c3], dim=1)
x_c3 = self.layer3(x_c3)
x_c3_2 = self.layer4(x_c3)
x_c2 = torch.cat([x_c3_2, x_c2], dim=1)
x_c2 = self.layer5(x_c2)
x_c2_1 = self.layer6(x_c2)
x_c1 = torch.cat([x_c2_1, x_c1], dim=1)
x_c1 = self.layer7(x_c1)
'''
x_c1_2 = self.layer8(x_c1)
x_c2 = torch.cat([x_c1_2, x_c2], dim=1)
x_c2 = self.layer9(x_c2)
x_c2_3 = self.layer10(x_c2)
x_c3 = torch.cat([x_c2_3, x_c3], dim=1)
x_c3 = self.layer11(x_c3)
x_c3_4 = self.layer12(x_c3)
x_c4 = torch.cat([x_c3_4, x_c4], dim=1)
x_c4 = self.layer13(x_c4)
'''
return [x_c4, x_c3, x_c2, x_c1]
class EVPDepthEncoder(nn.Module):
def __init__(self, out_dim=1024, ldm_prior=[320, 680, 1320+1280], sd_path=None, text_dim=768,
dataset='nyu', caption_aggregation=False
):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
nn.GroupNorm(16, ldm_prior[0]),
nn.ReLU(),
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
)
self.layer2 = nn.Sequential(
nn.Conv2d(ldm_prior[1], ldm_prior[1], 3, stride=2, padding=1),
)
self.out_layer = nn.Sequential(
nn.Conv2d(sum(ldm_prior), out_dim, 1),
nn.GroupNorm(16, out_dim),
nn.ReLU(),
)
self.aggregation = InverseMultiAttentiveFeatureRefinement([320, 680, 1320, 1280])
self.apply(self._init_weights)
### stable diffusion layers
config = OmegaConf.load('./v1-inference.yaml')
if sd_path is None:
if os.path.exists('../checkpoints/v1-5-pruned-emaonly.ckpt'):
config.model.params.ckpt_path = '../checkpoints/v1-5-pruned-emaonly.ckpt'
else:
config.model.params.ckpt_path = None
else:
config.model.params.ckpt_path = f'../{sd_path}'
sd_model = instantiate_from_config(config.model)
self.encoder_vq = sd_model.first_stage_model
| # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# The deconvolution code is based on Simple Baseline.
# (https://github.com/microsoft/human-pose-estimation.pytorch/blob/master/lib/models/pose_resnet.py)
# Modified by Zigang Geng ([email protected]).
# ------------------------------------------------------------------------------
def icnr(x, scale=2, init=nn.init.kaiming_normal_):
"""
Checkerboard artifact free sub-pixel convolution
https://arxiv.org/abs/1707.02937
"""
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
x.data.copy_(k)
class PixelShuffle(nn.Module):
"""
Real-Time Single Image and Video Super-Resolution
https://arxiv.org/abs/1609.05158
"""
def __init__(self, n_channels, scale):
super(PixelShuffle, self).__init__()
self.conv = nn.Conv2d(n_channels, n_channels*(scale**2), kernel_size=1)
icnr(self.conv.weight)
self.shuf = nn.PixelShuffle(scale)
self.relu = nn.ReLU()
def forward(self,x):
x = self.shuf(self.relu(self.conv(x)))
return x
class AttentionModule(nn.Module):
def __init__(self, in_channels, out_channels):
super(AttentionModule, self).__init__()
# Convolutional Layers
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
# Group Normalization
self.group_norm = nn.GroupNorm(20, out_channels)
# ReLU Activation
self.relu = nn.ReLU()
# Spatial Attention
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, 1, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply convolutional layer
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
return x
class AttentionDownsamplingModule(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super(AttentionDownsamplingModule, self).__init__()
# Spatial Attention
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, 1, kernel_size=1),
nn.Sigmoid()
)
# Channel Attention
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // 8, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels // 8, in_channels, kernel_size=1),
nn.Sigmoid()
)
# Convolutional Layers
if scale_factor == 2:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
elif scale_factor == 4:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1)
# Group Normalization
self.group_norm = nn.GroupNorm(20, out_channels)
# ReLU Activation
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply channel attention
channel_attention = self.channel_attention(x)
x = x * channel_attention
# Apply convolutional layers
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
x = self.conv2(x)
x = self.group_norm(x)
x = self.relu(x)
return x
class AttentionUpsamplingModule(nn.Module):
def __init__(self, in_channels, out_channels):
super(AttentionUpsamplingModule, self).__init__()
# Spatial Attention for outs[2]
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, 1, kernel_size=1),
nn.Sigmoid()
)
# Channel Attention for outs[2]
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // 8, kernel_size=1),
nn.ReLU(),
nn.Conv2d(in_channels // 8, in_channels, kernel_size=1),
nn.Sigmoid()
)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
# Group Normalization
self.group_norm = nn.GroupNorm(20, out_channels)
# ReLU Activation
self.relu = nn.ReLU()
self.upscale = PixelShuffle(in_channels, 2)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply channel attention
channel_attention = self.channel_attention(x)
x = x * channel_attention
# Apply convolutional layers
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
x = self.conv2(x)
x = self.group_norm(x)
x = self.relu(x)
# Upsample
x = self.upscale(x)
return x
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvLayer, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.GroupNorm(20, out_channels),
nn.ReLU(),
)
def forward(self, x):
x = self.conv1(x)
return x
class InverseMultiAttentiveFeatureRefinement(nn.Module):
def __init__(self, in_channels_list):
super(InverseMultiAttentiveFeatureRefinement, self).__init__()
self.layer1 = AttentionModule(in_channels_list[0], in_channels_list[0])
self.layer2 = AttentionDownsamplingModule(in_channels_list[0], in_channels_list[0]//2, scale_factor = 2)
self.layer3 = ConvLayer(in_channels_list[0]//2 + in_channels_list[1], in_channels_list[1])
self.layer4 = AttentionDownsamplingModule(in_channels_list[1], in_channels_list[1]//2, scale_factor = 2)
self.layer5 = ConvLayer(in_channels_list[1]//2 + in_channels_list[2], in_channels_list[2])
self.layer6 = AttentionDownsamplingModule(in_channels_list[2], in_channels_list[2]//2, scale_factor = 2)
self.layer7 = ConvLayer(in_channels_list[2]//2 + in_channels_list[3], in_channels_list[3])
'''
self.layer8 = AttentionUpsamplingModule(in_channels_list[3], in_channels_list[3])
self.layer9 = ConvLayer(in_channels_list[2] + in_channels_list[3], in_channels_list[2])
self.layer10 = AttentionUpsamplingModule(in_channels_list[2], in_channels_list[2])
self.layer11 = ConvLayer(in_channels_list[1] + in_channels_list[2], in_channels_list[1])
self.layer12 = AttentionUpsamplingModule(in_channels_list[1], in_channels_list[1])
self.layer13 = ConvLayer(in_channels_list[0] + in_channels_list[1], in_channels_list[0])
'''
def forward(self, inputs):
x_c4, x_c3, x_c2, x_c1 = inputs
x_c4 = self.layer1(x_c4)
x_c4_3 = self.layer2(x_c4)
x_c3 = torch.cat([x_c4_3, x_c3], dim=1)
x_c3 = self.layer3(x_c3)
x_c3_2 = self.layer4(x_c3)
x_c2 = torch.cat([x_c3_2, x_c2], dim=1)
x_c2 = self.layer5(x_c2)
x_c2_1 = self.layer6(x_c2)
x_c1 = torch.cat([x_c2_1, x_c1], dim=1)
x_c1 = self.layer7(x_c1)
'''
x_c1_2 = self.layer8(x_c1)
x_c2 = torch.cat([x_c1_2, x_c2], dim=1)
x_c2 = self.layer9(x_c2)
x_c2_3 = self.layer10(x_c2)
x_c3 = torch.cat([x_c2_3, x_c3], dim=1)
x_c3 = self.layer11(x_c3)
x_c3_4 = self.layer12(x_c3)
x_c4 = torch.cat([x_c3_4, x_c4], dim=1)
x_c4 = self.layer13(x_c4)
'''
return [x_c4, x_c3, x_c2, x_c1]
class EVPDepthEncoder(nn.Module):
def __init__(self, out_dim=1024, ldm_prior=[320, 680, 1320+1280], sd_path=None, text_dim=768,
dataset='nyu', caption_aggregation=False
):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
nn.GroupNorm(16, ldm_prior[0]),
nn.ReLU(),
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
)
self.layer2 = nn.Sequential(
nn.Conv2d(ldm_prior[1], ldm_prior[1], 3, stride=2, padding=1),
)
self.out_layer = nn.Sequential(
nn.Conv2d(sum(ldm_prior), out_dim, 1),
nn.GroupNorm(16, out_dim),
nn.ReLU(),
)
self.aggregation = InverseMultiAttentiveFeatureRefinement([320, 680, 1320, 1280])
self.apply(self._init_weights)
### stable diffusion layers
config = OmegaConf.load('./v1-inference.yaml')
if sd_path is None:
if os.path.exists('../checkpoints/v1-5-pruned-emaonly.ckpt'):
config.model.params.ckpt_path = '../checkpoints/v1-5-pruned-emaonly.ckpt'
else:
config.model.params.ckpt_path = None
else:
config.model.params.ckpt_path = f'../{sd_path}'
sd_model = instantiate_from_config(config.model)
self.encoder_vq = sd_model.first_stage_model
| self.unet = UNetWrapper(sd_model.model, use_attn=True) | 0 | 2023-12-15 14:13:59+00:00 | 12k |
penghao-wu/vstar | LLaVA/llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "LLaVA/llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "LLaVA/llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "LLaVA/llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "LLaVA/llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "LLaVA/llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "LLaVA/llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "LLaVA/llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "LLaVA/llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "LLaVA/llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "LLaVA/llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "LLaVA/llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "LLaVA/llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "LLaVA/llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,399 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-12-15 14:58:24+00:00 | 12k |
worm128/AI-YinMei | text-generation-webui/extensions/Training_PRO/script.py | [
{
"identifier": "FPSchedulerTrainer",
"path": "text-generation-webui/extensions/Training_PRO/custom_scheduler.py",
"snippet": "class FPSchedulerTrainer(transformers.Trainer):\n def __init__(self,neftune_noise_alpha:float = 0.0, model = None, *args, **kwargs):\n self.neftune_noise_alpha = neftune_noise_alpha\n if self.neftune_noise_alpha > 0.0:\n model = self._activate_neftune(model)\n super().__init__(model = model, *args, **kwargs)\n\n \n def _activate_neftune(self, model):\n r\"\"\"\n Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914\n \"\"\"\n print(f\"Activating {RED}NEFtune{RESET} with scale: {self.neftune_noise_alpha}\")\n if isinstance(model, transformers.PreTrainedModel):\n embeddings = model.get_input_embeddings()\n elif isinstance(model, PeftModel):\n embeddings = model.base_model.get_input_embeddings()\n\n embeddings.neftune_noise_alpha = self.neftune_noise_alpha\n old_forward = embeddings.forward\n\n # This hack seems to be needed to properly use a custom forward pass\n # all credits to: https://discuss.pytorch.org/t/how-can-i-replace-the-forward-method-of-a-predefined-torchvision-model-with-my-customized-forward-function/54224/11\n bound_method = neftune_forward.__get__(embeddings, embeddings.__class__)\n setattr(embeddings, \"forward\", bound_method)\n\n # embeddings.forward = neftune_forward\n embeddings._trl_old_forward = old_forward\n\n return model\n \n def train(self, *args, **kwargs):\n output = super().train(*args, **kwargs)\n\n # After training we make sure to retrieve back the original forward pass method\n # for the embedding layer\n if self.neftune_noise_alpha is not None:\n\n if isinstance(self.model, transformers.PreTrainedModel):\n embeddings = self.model.get_input_embeddings()\n elif isinstance(self.model, PeftModel):\n embeddings = self.model.base_model.get_input_embeddings()\n\n if hasattr(embeddings, \"_trl_old_forward\"):\n embeddings.forward = embeddings._trl_old_forward\n del embeddings._trl_old_forward\n del embeddings.neftune_noise_alpha\n\n return output\n\n\n def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):\n #Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.\n \n num_train_epochs = self.args.num_train_epochs\n num_warmup_steps=self.args.get_warmup_steps(num_training_steps)\n num_firstepoch_steps = math.ceil(num_training_steps/num_train_epochs)\n num_warmup_acc = num_warmup_steps*self.args.gradient_accumulation_steps \n num_firstepoch_steps_acc = num_firstepoch_steps*self.args.gradient_accumulation_steps\n num_training_steps_acc = num_training_steps*self.args.gradient_accumulation_steps\n\n custom_scheduler_params.update({'dynamic_scheduler_stop': False})\n \n print (f\"Warm-up steps aligned to Gradient accumulation ({self.args.gradient_accumulation_steps}) = {num_warmup_acc} actual warmup steps\")\n if self.args.lr_scheduler_type == 'cosine':\n \n num_warmup_acc_min = min(num_warmup_acc, num_firstepoch_steps_acc)\n\n if num_warmup_acc>num_firstepoch_steps_acc:\n print(f\"\\033[1;31;1mWARNING: The number of warmup steps is set too high! It will be clamped to 1 epoch, essentially going from warmup to annealing.\\033[0;37;0m\")\n print (f\"FP Scheduler Warmup: 0-[{num_warmup_acc_min}], Hold [{num_warmup_acc_min}]-{num_firstepoch_steps_acc}, Annealing {num_firstepoch_steps_acc}-{num_training_steps_acc}\")\n else:\n print (f\"FP Scheduler Warmup: 0-{num_warmup_acc_min}, Hold {num_warmup_acc_min}-{num_firstepoch_steps_acc}, Annealing {num_firstepoch_steps_acc}-{num_training_steps_acc}\")\n\n self.lr_scheduler = custom_cosine_scheduler_with_warmup(\n optimizer=self.optimizer if optimizer is None else optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps, \n num_firstepoch_steps = num_firstepoch_steps,\n )\n self._created_lr_scheduler = True\n return self.lr_scheduler\n elif self.args.lr_scheduler_type == 'constant':\n \n half_step_acc = num_training_steps_acc//2\n num_warmup_acc_min = min(num_warmup_acc, half_step_acc)\n\n if num_warmup_acc>half_step_acc:\n print(f\"\\033[1;31;1mWARNING: The number of warmup steps is set too high! It will be clamped to half of all epochs, essentially going from warmup to annealing in the middle.\\033[0;37;0m\")\n print (f\"FP Scheduler Warmup: 0-[{num_warmup_acc_min}], Hold [{num_warmup_acc_min}]-{half_step_acc}, Annealing {half_step_acc}-{num_training_steps_acc}\")\n else:\n print (f\"FP Scheduler Warmup: 0-{num_warmup_acc_min}, Hold {num_warmup_acc_min}-{half_step_acc}, Annealing {half_step_acc}-{num_training_steps_acc}\")\n\n self.lr_scheduler = custom_half_scheduler_with_warmup(\n optimizer=self.optimizer if optimizer is None else optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps, \n num_firstepoch_steps = num_firstepoch_steps,\n )\n self._created_lr_scheduler = True\n return self.lr_scheduler\n elif self.args.lr_scheduler_type == 'constant_with_warmup':\n \n half_step_acc = num_training_steps_acc//2\n \n if num_warmup_steps>0:\n print(f\"Warmup doesn't apply to this scheduler [Raise-Fall]\")\n\n print (f\"Scheduler Raise: 0-{half_step_acc}, Fall {half_step_acc}-{num_training_steps_acc}\")\n\n self.lr_scheduler = custom_raise_fall_scheduler_with_warmup(\n optimizer=self.optimizer if optimizer is None else optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps, \n num_firstepoch_steps = num_firstepoch_steps,\n )\n self._created_lr_scheduler = True\n return self.lr_scheduler \n else:\n return super().create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)"
},
{
"identifier": "FPNEFtuneTrainer",
"path": "text-generation-webui/extensions/Training_PRO/custom_scheduler.py",
"snippet": "class FPNEFtuneTrainer(transformers.Trainer):\n def __init__(self,neftune_noise_alpha:float = 0.0, model = None, *args, **kwargs):\n self.neftune_noise_alpha = neftune_noise_alpha\n if self.neftune_noise_alpha > 0.0:\n model = self._activate_neftune(model)\n super().__init__(model = model, *args, **kwargs)\n\n \n def _activate_neftune(self, model):\n r\"\"\"\n Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914\n \"\"\"\n print(f\"Activating {RED}NEFtune{RESET} with scale: {self.neftune_noise_alpha}\")\n if isinstance(model, transformers.PreTrainedModel):\n embeddings = model.get_input_embeddings()\n elif isinstance(model, PeftModel):\n embeddings = model.base_model.get_input_embeddings()\n\n embeddings.neftune_noise_alpha = self.neftune_noise_alpha\n old_forward = embeddings.forward\n\n # This hack seems to be needed to properly use a custom forward pass\n # all credits to: https://discuss.pytorch.org/t/how-can-i-replace-the-forward-method-of-a-predefined-torchvision-model-with-my-customized-forward-function/54224/11\n bound_method = neftune_forward.__get__(embeddings, embeddings.__class__)\n setattr(embeddings, \"forward\", bound_method)\n\n # embeddings.forward = neftune_forward\n embeddings._trl_old_forward = old_forward\n\n return model\n \n def train(self, *args, **kwargs):\n output = super().train(*args, **kwargs)\n\n # After training we make sure to retrieve back the original forward pass method\n # for the embedding layer\n if self.neftune_noise_alpha is not None:\n\n if isinstance(self.model, transformers.PreTrainedModel):\n embeddings = self.model.get_input_embeddings()\n elif isinstance(self.model, PeftModel):\n embeddings = self.model.base_model.get_input_embeddings()\n\n if hasattr(embeddings, \"_trl_old_forward\"):\n embeddings.forward = embeddings._trl_old_forward\n del embeddings._trl_old_forward\n del embeddings.neftune_noise_alpha\n\n return output"
},
{
"identifier": "create_graph",
"path": "text-generation-webui/extensions/Training_PRO/matplotgraph.py",
"snippet": "def create_graph(lora_path, lora_name):\n try:\n import matplotlib.pyplot as plt\n from matplotlib.ticker import ScalarFormatter\n \n peft_model_path = f'{lora_path}/training_graph.json'\n image_model_path = f'{lora_path}/training_graph.png'\n # Check if the JSON file exists\n if os.path.exists(peft_model_path):\n # Load data from JSON file\n with open(peft_model_path, 'r') as file:\n data = json.load(file)\n # Extract x, y1, and y2 values\n x = [item['epoch'] for item in data]\n y1 = [item['learning_rate'] for item in data]\n y2 = [item['loss'] for item in data]\n\n # Create the line chart\n fig, ax1 = plt.subplots(figsize=(10, 6))\n \n\n # Plot y1 (learning rate) on the first y-axis\n ax1.plot(x, y1, 'b-', label='Learning Rate')\n ax1.set_xlabel('Epoch')\n ax1.set_ylabel('Learning Rate', color='b')\n ax1.tick_params('y', colors='b')\n\n # Create a second y-axis\n ax2 = ax1.twinx()\n\n # Plot y2 (loss) on the second y-axis\n ax2.plot(x, y2, 'r-', label='Loss')\n ax2.set_ylabel('Loss', color='r')\n ax2.tick_params('y', colors='r')\n\n # Set the y-axis formatter to display numbers in scientific notation\n ax1.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))\n ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n\n # Add grid\n ax1.grid(True)\n\n # Combine the legends for both plots\n lines, labels = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n ax2.legend(lines + lines2, labels + labels2, loc='best')\n\n # Set the title\n plt.title(f'{lora_name} LR and Loss vs Epoch')\n\n # Save the chart as an image\n plt.savefig(image_model_path)\n\n print(f\"Graph saved in {image_model_path}\")\n else:\n print(f\"File 'training_graph.json' does not exist in the {lora_path}\")\n \n except ImportError:\n print(\"matplotlib is not installed. Please install matplotlib to create PNG graphs\")"
},
{
"identifier": "get_available_loras_local",
"path": "text-generation-webui/extensions/Training_PRO/train_utils.py",
"snippet": "def get_available_loras_local(_sortedByTime):\n \n model_dir = shared.args.lora_dir # Update with the appropriate directory path\n subfolders = []\n if _sortedByTime:\n subfolders = list_subfoldersByTime(model_dir)\n else:\n subfolders = utils.get_available_loras() \n\n return subfolders"
},
{
"identifier": "precise_cut",
"path": "text-generation-webui/extensions/Training_PRO/train_utils.py",
"snippet": "def precise_cut(text: str, overlap: bool, min_chars_cut: int, eos_to_hc: bool, cutoff_len: int, hard_cut_string: str, debug_slicer:bool):\n\n EOSX_str = '<//>' #hardcut placeholder\n EOS_str = '</s>' \n print(\"Precise raw text slicer: ON\")\n \n cut_string = hard_cut_string.replace('\\\\n', '\\n')\n text = text.replace(cut_string, EOSX_str)\n sentences = split_sentences(text, cutoff_len)\n\n print(f\"Sentences: {len(sentences)}\")\n sentencelist = []\n currentSentence = ''\n totalLength = 0\n max_cut = cutoff_len-1\n half_cut = cutoff_len//2\n halfcut_length = 0\n\n edgeindex = []\n half_index = 0\n\n for index, item in enumerate(sentences):\n \n if halfcut_length+ item['size'] < half_cut:\n halfcut_length += item['size']\n half_index = index\n else:\n edgeindex.append(half_index)\n halfcut_length = -2 * max_cut\n\n\n if totalLength + item['size'] < max_cut and not currentSentence.endswith(EOSX_str): \n currentSentence += item['text']\n totalLength += item['size']\n else:\n\n if len(currentSentence.strip()) > min_chars_cut:\n sentencelist.append(currentSentence.strip())\n\n currentSentence = item['text']\n totalLength = item['size']\n halfcut_length = item['size']\n \n if len(currentSentence.strip()) > min_chars_cut: \n sentencelist.append(currentSentence.strip())\n\n unique_blocks = len(sentencelist)\n print(f\"Text Blocks: {unique_blocks}\")\n\n #overlap strategies: \n # don't overlap across HARD CUT (EOSX)\n if overlap:\n for edge_idx in edgeindex:\n currentSentence = ''\n totalLength = 0\n\n for item in sentences[edge_idx:]:\n if totalLength + item['size'] < max_cut:\n currentSentence += item['text']\n totalLength += item['size']\n else:\n #if by chance EOSX is at the end then it's acceptable\n if currentSentence.endswith(EOSX_str) and len(currentSentence.strip()) > min_chars_cut:\n sentencelist.append(currentSentence.strip()) \n # otherwise don't cross hard cut \n elif EOSX_str not in currentSentence and len(currentSentence.strip()) > min_chars_cut:\n sentencelist.append(currentSentence.strip())\n \n currentSentence = ''\n totalLength = 0\n break\n \n print(f\"+ Overlapping blocks: {len(sentencelist)-unique_blocks}\")\n\n num_EOS = 0\n for i in range(len(sentencelist)):\n if eos_to_hc:\n sentencelist[i] = sentencelist[i].replace(EOSX_str, EOS_str)\n else:\n sentencelist[i] = sentencelist[i].replace(EOSX_str, '')\n \n #someone may have had stop strings in the raw text...\n sentencelist[i] = sentencelist[i].replace(\"</s></s>\", EOS_str)\n num_EOS += sentencelist[i].count(EOS_str)\n\n if num_EOS > 0:\n print(f\"+ EOS count: {num_EOS}\")\n\n #final check for useless lines\n sentencelist = [item for item in sentencelist if item.strip() != \"</s>\"]\n sentencelist = [item for item in sentencelist if item.strip() != \"\"]\n\n\n if debug_slicer:\n # Write the log file\n Path('logs').mkdir(exist_ok=True)\n sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}\n output_file = \"logs/sentencelist.json\"\n with open(output_file, 'w') as f:\n json.dump(sentencelist_dict, f,indent=2)\n \n print(\"Saved sentencelist.json in logs folder\")\n \n return sentencelist "
},
{
"identifier": "sliding_block_cut",
"path": "text-generation-webui/extensions/Training_PRO/train_utils.py",
"snippet": "def sliding_block_cut(text: str, min_chars_cut: int, eos_to_hc: bool, cutoff_len: int, hard_cut_string: str, debug_slicer:bool):\n\n EOSX_str = '<//>' #hardcut placeholder\n EOS_str = '</s>' \n print(\"Mega Block Overlap: ON\")\n \n cut_string = hard_cut_string.replace('\\\\n', '\\n')\n text = text.replace(cut_string, EOSX_str)\n sentences = split_sentences(text, cutoff_len)\n\n print(f\"Sentences: {len(sentences)}\")\n sentencelist = []\n \n max_cut = cutoff_len-1\n\n #print(f\"max_cut: {max_cut}\")\n advancing_to = 0\n\n prev_block_lastsentence = \"\"\n \n\n for i in range(len(sentences)):\n totalLength = 0\n currentSentence = ''\n lastsentence = \"\"\n \n if i >= advancing_to:\n for k in range(i, len(sentences)):\n \n current_length = sentences[k]['size']\n\n if totalLength + current_length <= max_cut and not currentSentence.endswith(EOSX_str):\n currentSentence += sentences[k]['text']\n totalLength += current_length\n lastsentence = sentences[k]['text']\n else:\n if len(currentSentence.strip()) > min_chars_cut:\n if prev_block_lastsentence!=lastsentence:\n sentencelist.append(currentSentence.strip())\n prev_block_lastsentence = lastsentence\n \n advancing_to = 0\n if currentSentence.endswith(EOSX_str):\n advancing_to = k\n\n currentSentence = \"\"\n totalLength = 0\n break\n \n if currentSentence != \"\":\n if len(currentSentence.strip()) > min_chars_cut:\n sentencelist.append(currentSentence.strip())\n\n unique_blocks = len(sentencelist)\n print(f\"Text Blocks: {unique_blocks}\")\n num_EOS = 0\n for i in range(len(sentencelist)):\n if eos_to_hc:\n sentencelist[i] = sentencelist[i].replace(EOSX_str, EOS_str)\n else:\n sentencelist[i] = sentencelist[i].replace(EOSX_str, '')\n \n #someone may have had stop strings in the raw text...\n sentencelist[i] = sentencelist[i].replace(\"</s></s>\", EOS_str)\n num_EOS += sentencelist[i].count(EOS_str)\n\n if num_EOS > 0:\n print(f\"+ EOS count: {num_EOS}\")\n\n #final check for useless lines\n sentencelist = [item for item in sentencelist if item.strip() != \"</s>\"]\n sentencelist = [item for item in sentencelist if item.strip() != \"\"]\n\n\n if debug_slicer:\n # Write the log file\n Path('logs').mkdir(exist_ok=True)\n sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}\n output_file = \"logs/sentencelist.json\"\n with open(output_file, 'w') as f:\n json.dump(sentencelist_dict, f,indent=2)\n \n print(\"Saved sentencelist.json in logs folder\")\n \n return sentencelist "
},
{
"identifier": "download_file_from_url",
"path": "text-generation-webui/extensions/Training_PRO/train_utils.py",
"snippet": "def download_file_from_url(url, overwrite, output_dir_in, valid_extensions = {'.txt', '.json'}):\n try:\n # Validate and sanitize the URL\n #parsed_url = urllib.parse.urlparse(url)\n #if not parsed_url.netloc:\n # raise ValueError(\"Invalid URL\")\n #filename = os.path.basename(parsed_url.path)\n\n # Get the filename from the URL\n\n session = requests.Session()\n headers = {}\n mode = 'wb'\n filename = url.split('/')[-1]\n\n output_dir = str(output_dir_in)\n # Construct the full path to the output file\n local_filename = os.path.join(output_dir, filename)\n\n # Check if the local file already exists\n overw = ''\n if os.path.exists(local_filename):\n if not overwrite:\n yield f\"File '{local_filename}' already exists. Aborting.\"\n return\n else:\n overw = ' [Overwrite existing]'\n\n filename_lower = filename.lower()\n\n # Send an HTTP GET request to the URL with a timeout\n file_extension = os.path.splitext(filename_lower)[-1]\n \n if file_extension not in valid_extensions:\n yield f\"Invalid file extension: {file_extension}. Only {valid_extensions} files are supported.\"\n return\n\n with session.get(url, stream=True, headers=headers, timeout=10) as r:\n r.raise_for_status() \n # total size can be wildly inaccurate\n #total_size = int(r.headers.get('content-length', 0))\n \n block_size = 1024 * 4 \n with open(local_filename, mode) as f:\n count = 0\n for data in r.iter_content(block_size):\n f.write(data)\n count += len(data)\n\n yield f\"Downloaded: {count} \" + overw\n\n # Verify file size if possible\n if os.path.exists(local_filename):\n downloaded_size = os.path.getsize(local_filename)\n if downloaded_size > 0:\n yield f\"File '{filename}' downloaded to '{output_dir}' ({downloaded_size} bytes).\"\n print(\"File Downloaded\")\n else:\n print(\"Downloaded file is zero\")\n yield f\"Failed. Downloaded file size is zero).\"\n else:\n print(f\"Error: {local_filename} failed to download.\")\n yield f\"Error: {local_filename} failed to download\"\n\n except Exception as e:\n print(f\"An error occurred: {e}\")\n yield f\"An error occurred: {e}\"\n\n finally:\n # Close the session to release resources\n session.close()"
}
] | import os
import json
import math
import random
import shutil
import sys
import threading
import time
import traceback
import gradio as gr
import pandas as pd
import torch
import transformers
import inspect
from datetime import datetime
from pathlib import Path
from functools import partial
from .custom_scheduler import FPSchedulerTrainer, FPNEFtuneTrainer
from .matplotgraph import create_graph
from .train_utils import get_available_loras_local, precise_cut, sliding_block_cut, download_file_from_url
from datasets import Dataset, load_dataset
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_kbit_training,
set_peft_model_state_dict
)
from peft.utils.other import \
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING as model_to_lora_modules
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
)
from modules import shared, utils
from modules.ui import create_refresh_button
from modules.evaluate import (
calculate_perplexity,
generate_markdown_table,
save_past_evaluations
)
from modules.logging_colors import logger
from modules.models import reload_model
from modules.utils import natural_keys
from typing import Callable, Optional, Tuple, ContextManager
from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
replace_peft_model_with_int4_lora_model
)
from alpaca_lora_4bit.autograd_4bit import Autograd4bitQuantLinear
from alpaca_lora_4bit.models import Linear4bitLt | 8,744 |
def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, logs, **kwargs):
train_log.update(logs)
current_steps_offset = tracked.current_steps + non_serialized_params['checkpoint_offset']
current_epoch_offset = train_log.get('epoch', 0.0) + non_serialized_params['epoch_offset']
train_log.update({"current_steps": tracked.current_steps})
train_log.update({"current_steps_adjusted": current_steps_offset})
train_log.update({"epoch_adjusted": current_epoch_offset})
if WANT_INTERRUPT:
print("\033[1;31;1mInterrupted by user\033[0;37;0m")
if non_serialized_params['checkpoint_offset']>0:
print(f"\033[1;30;40mStep: {tracked.current_steps:6} [+{non_serialized_params['checkpoint_offset']}] \033[0;37;0m", end='')
else:
print(f"\033[1;30;40mStep: {tracked.current_steps:6} \033[0;37;0m", end='')
graphentry = {
'current_steps': int(train_log.get('current_steps_adjusted',0)),
'loss': float(train_log.get('loss', 0.0)),
'learning_rate': float(train_log.get('learning_rate', 0.0)),
'epoch': float(train_log.get('epoch_adjusted', 0.0))
}
cur_loss = float(train_log.get('loss', 0.0))
cur_lr = float(train_log.get('learning_rate', 0.0))
cur_epoch = float(train_log.get('epoch', 0.0))
if len(statistics['loss']) == 1:
first_epoch = statistics['loss'][0]['epoch']
first_value = statistics['loss'][0]['value']
if first_value ==0:
statistics['loss'] = []
statistics['loss'].append({'epoch': cur_epoch, 'value': cur_loss})
statistics['lr'].append({'epoch': cur_epoch, 'value': cur_lr})
# Add the entry to the continuous log
train_log_graph.append(graphentry)
# Save the graph log for now, we can later generate full graph
with open(f"{lora_file_path}/training_graph.json", 'w') as file:
json.dump(train_log_graph, file, indent=4)
if 'loss' in logs:
loss = float(logs['loss'])
if loss <= stop_at_loss:
control.should_epoch_stop = True
control.should_training_stop = True
print(f"{RED}Stop Loss {stop_at_loss} reached.{RESET}")
# FPHAM SAMPLE REQ Transformers error handling
gradient_accumulation_max = int(train_data.num_rows)//micro_batch_size
if gradient_accumulation_max < gradient_accumulation_steps:
print(f"{RED}WARNING:{RESET} Current gradient accumulation is {RED}too high{RESET} for the amount of training data.")
print(f"Gradient accumulation: {gradient_accumulation_steps} should be less than: {gradient_accumulation_max}. {RED}This could crash Accelerate/Transformers{RESET}")
#min_batchSize = sample_req*micro_batch_size
print(f"Preferable fix: {RED}Increase the size of dataset{RESET}")
print(f"... or Decrerase Gradient Accumulation {RED}{gradient_accumulation_steps}{RESET} to below {GREEN}{gradient_accumulation_max}{RESET}")
gradient_accumulation_steps = max(1,gradient_accumulation_max-1)
print(f"Last resort fix for this run: Lowering Gradient accumulation to {GREEN}{gradient_accumulation_steps}{RESET} [Good luck]")
else:
print(f"Data Size Check: Gradient accumulation: {YELLOW}{gradient_accumulation_steps}{RESET} <= Blocks/Batch {gradient_accumulation_max} ... {GREEN}[OK]{RESET}")
#END OF FPHAM SAMPLE REQ
# FPHAM Custom Scheduler ==
custom_scheduller = False
lr_scheduler_type_arg = lr_scheduler_type
if lr_scheduler_type == 'FP_low_epoch_annealing':
custom_scheduller = True
lr_scheduler_type_arg = 'cosine'
elif lr_scheduler_type == 'FP_half_time_annealing':
custom_scheduller = True
lr_scheduler_type_arg = 'constant'
elif lr_scheduler_type =='FP_raise_fall_creative':
custom_scheduller = True
lr_scheduler_type_arg = 'constant_with_warmup'
#gradient_checkpointing=True
args=transformers.TrainingArguments(
report_to=report_to if report_to != "None" else None,
per_device_train_batch_size=micro_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
warmup_steps=math.ceil(warmup_steps / gradient_accumulation_steps),
warmup_ratio = warmup_ratio,
num_train_epochs=epochs,
learning_rate=actual_lr,
fp16=False if shared.args.cpu else True,
optim=optimizer,
logging_steps=1,
evaluation_strategy="steps" if eval_data is not None else "no",
eval_steps=math.ceil(eval_steps / gradient_accumulation_steps) if eval_data is not None else None,
save_strategy="steps" if eval_data is not None else "no",
output_dir=lora_file_path,
lr_scheduler_type=lr_scheduler_type_arg,
load_best_model_at_end=eval_data is not None,
# TODO: Enable multi-device support
ddp_find_unused_parameters=None,
no_cuda=shared.args.cpu,
)
if custom_scheduller:
trainer = FPSchedulerTrainer(
neftune_noise_alpha=neft_noise_alpha,
model=lora_model,
train_dataset=train_data,
eval_dataset=eval_data,
args=args,
data_collator=transformers.DataCollatorForLanguageModeling(shared.tokenizer, mlm=False),
callbacks=list([Callbacks()])
)
elif neft_noise_alpha > 0:
|
os.environ["WANDB_MODE"] = "offline"
# os.environ["WANDB_DISABLED"] = "true"
## just temporary to avoid warning
if hasattr(torch.utils.checkpoint, 'noop_context_fn'):
def my_checkpoint(
function,
*args,
use_reentrant: Optional[bool] = None,
context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = torch.utils.checkpoint.noop_context_fn,
determinism_check: str = torch.utils.checkpoint._DEFAULT_DETERMINISM_MODE,
debug: bool = False,
**kwargs
):
if use_reentrant is None:
#print ("reentran = NONE")
use_reentrant = True
# Hack to mix *args with **kwargs in a python 2.7-compliant way
preserve = kwargs.pop("preserve_rng_state", True)
if kwargs and use_reentrant:
raise ValueError(
"Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
)
if use_reentrant:
if context_fn is not torch.utils.checkpoint.noop_context_fn or debug is not False:
raise ValueError(
"Passing `context_fn` or `debug` is only supported when "
"use_reentrant=False."
)
return torch.utils.checkpoint.CheckpointFunction.apply(function, preserve, *args)
else:
print ("reentran = FALSE")
gen = torch.utils.checkpoint._checkpoint_without_reentrant_generator(
function, preserve, context_fn, determinism_check, debug, *args, **kwargs
)
# Runs pre-forward logic
next(gen)
ret = function(*args, **kwargs)
# Runs post-forward logic
try:
next(gen)
except StopIteration:
return ret
params = {
"display_name": "Training PRO",
"is_tab": True
}
non_serialized_params = {
"debug_slicer": False,
"Lora_sortedByTime": False,
"stop_at_loss": 0,
"save_steps_under_loss": 0.0,
"save_checkpoint_now": False,
"training_loop": False,
"current_stability": 0,
"save_epochs": 0,
"checkpoint_offset": 0,
"epoch_offset":0,
}
MODEL_CLASSES = {v[1]: v[0] for v in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.items()}
PARAMETERS = ["lora_name", "always_override", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "raw_text_file", "higher_rank_limit", "warmup_steps", "optimizer", "hard_cut_string", "train_only_after", "stop_at_loss", "add_eos_token", "min_chars", "report_to", "precize_slicing_overlap", "add_eos_token_type", "save_steps_under_loss", "add_bos_token", "training_projection","sliding_window","warmup_ratio","grad_accumulation","neft_noise_alpha"]
WANT_INTERRUPT = False
train_log = {}
train_template = {}
train_log_graph = []
train_choices = ["all","q-k-v-o","q-k-v","k-v-down","q-v"]
statistics = {
'loss': [],
'lr': [],
}
RED = "\033[91m"
YELLOW = "\033[93m"
GREEN = "\033[92m"
RESET = "\033[0m"
def ui():
with gr.Tab('Train LoRA', elem_id='lora-train-tab'):
tmp = gr.State('')
with gr.Row():
with gr.Column():
# YY.MM.DD
gr.Markdown("`Ver: 23.10.20` This is enhanced version of QLora Training. [Maintained by FP](https://github.com/FartyPants/Training_PRO/tree/main)")
with gr.Row():
with gr.Column(scale=5):
with gr.Row():
copy_from = gr.Dropdown(label='Copy parameters from', value='None', choices=get_available_loras_local(non_serialized_params['Lora_sortedByTime']), elem_classes=['slim-dropdown'])
create_refresh_button(copy_from, lambda: None, lambda: {'choices': get_available_loras_local(non_serialized_params['Lora_sortedByTime'])}, 'refresh-button')
with gr.Column():
sort_byTime = gr.Checkbox(label='Sort list by Date', value=False, info='Sorts Loras by date created.', elem_classes=['no-background'])
with gr.Row():
with gr.Column(scale=5):
lora_name = gr.Textbox(label='Name', info='The name of your new LoRA file')
with gr.Column():
always_override = gr.Checkbox(label='Override Existing Files', value=False, info='If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).', elem_classes=['no-background'])
with gr.Row():
with gr.Column():
lora_rank = gr.Slider(label='LoRA Rank', value=32, minimum=0, maximum=1024, step=4, info='Also called dimension count. Higher values = larger file, more content control. Smaller values = smaller file, less control. Use 4 or 8 for style, 128 or 256 to teach, 1024+ for fine-detail on big data. More VRAM is needed for higher ranks.')
lora_alpha = gr.Slider(label='LoRA Alpha', value=64, minimum=0, maximum=2048, step=4, info='This divided by the rank becomes the scaling of the LoRA. Higher means stronger. A good standard value is twice your Rank.')
batch_size = gr.Slider(visible= False, label='Batch Size', value=0, minimum=0, maximum=1024, step=4, info='Now Replaced with Gradient accumulation. Keeping it for sake of old saved data')
micro_batch_size = gr.Slider(label='True Batch Size', value=4, minimum=1, maximum=128, step=1, info='Specifies how many text blocks per step will be trained. The higher value, the better the concept of training will be, but it requires more GPU memory and it reduces speed.')
grad_accumulation = gr.Slider(label='Gradient Accumulation Steps', value=1, minimum=1, maximum=256, step=1, info="Virtually multiplies the Batch Size by averaging the learning over more than one step. VRAM friendly. Evens out loss fluctuations but can also degrade training fidelity.")
with gr.Column():
stop_at_loss = gr.Slider(label='Stop at loss (Can be changed during training)', minimum=0.0, maximum=3.0, step=0.1, value=0.00, info='The process will automatically stop once the desired loss value is reached.')
gr.Markdown(" ")
epochs = gr.Number(label='Epochs', value=3, info='Number of times every entry in the dataset should be fed into training. So 1 means feed each item in once, 5 means feed it in five times, etc.')
learning_rate = gr.Textbox(label='Learning Rate', value='3e-4', info='In scientific notation. 3e-4 is a good starting base point. 1e-2 is extremely high, 1e-6 is extremely low.')
lr_scheduler_type = gr.Dropdown(label='LR Scheduler', value='linear', choices=['linear', 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'polynomial', 'inverse_sqrt', 'FP_low_epoch_annealing', 'FP_half_time_annealing','FP_raise_fall_creative'], info='Learning rate scheduler - defines how the learning rate changes over time. Custom schedulers: FP_low_epoch_annealing, FP_half_time_annealing, FP_raise_fall_creative (see README)', elem_classes=['slim-dropdown'])
with gr.Accordion(label='Checkpoints', open=True):
with gr.Row():
with gr.Column():
save_steps = gr.Number(label='Save every n steps', value=0, info='A checkpoint will be saved every n steps and at each Epoch boundary. (0 = OFF)')
with gr.Column():
save_steps_under_loss = gr.Slider(label='Save at 10% Loss change', value=1.8, minimum=0.0, maximum=3.0, step=0.1, info="Saves checkpoints at (or bellow) this loss and then each time loss falls by at least 10% This works independently from 'Save every n steps'")
with gr.Row():
save_chackpoint_now = gr.Button('Queue Checkpoint Now')
with gr.Accordion(label='Advanced Options', open=True):
with gr.Row():
with gr.Column():
warmup_steps = gr.Number(label='Warmup Steps', value=100, info='Number of max steps used for a linear warmup. Reduces early over-fitting by the first training blocks. Value has precedent over Warmup Ratio. Aligns to the closest multiple of graddient accumulation')
warmup_ratio = gr.Slider(label='Warmup Ratio', minimum=0.0, maximum=0.2, step=0.025, value=0.0, info='Ratio of total training steps that will be used for a linear warmup. It applies only if Warmup Step is 0.')
neft_noise_alpha = gr.Slider(label='NEFtune noise scale', minimum=0.0, maximum=15, step=1, value=0.0, info='Add noise to the training to improve generalization. [0 - OFF, Starting value to experiment: 5]')
training_projection = gr.Radio(value = train_choices[4], label='LLaMA Target Projections', info='Change the targets (LORA is typically q-v)', choices=train_choices)
lora_dropout = gr.Slider(label='LoRA Dropout', minimum=0.0, maximum=1.0, step=0.025, value=0.05, info='Percentage probability for dropout of LoRA layers. This can help reduce overfitting. Most users should leave at default.')
optimizer = gr.Dropdown(label='Optimizer', value='adamw_torch', choices=['adamw_hf', 'adamw_torch', 'adamw_torch_fused', 'adamw_torch_xla', 'adamw_apex_fused', 'adafactor', 'adamw_bnb_8bit', 'adamw_anyprecision', 'sgd', 'adagrad'], info='Different optimizer implementation options, for advanced users. Effects of different options are not well documented yet.', elem_classes=['slim-dropdown'])
with gr.Column():
train_only_after = gr.Textbox(label='Train Only After', value='', info='Only consider text *after* this string in any given chunk for training. For Alpaca datasets, use "### Response:" to only train the response and ignore the input.')
add_bos_token = gr.Checkbox(label='Add BOS token', value=True, info="Adds BOS token for each dataset item")
add_eos_token = gr.Checkbox(label='Add EOS token', value=False, info="Adds EOS token for each dataset item")
add_eos_token_type = gr.Dropdown(label='EOS placement (Text file)', choices=['Every Block', 'Hard Cut Blocks Only'], value='Every Block', info='', allow_custom_value = False)
higher_rank_limit = gr.Checkbox(label='Enable higher ranks', value=False, info='If checked, changes Rank/Alpha slider above to go much higher. This will not work without a datacenter-class GPU.')
report_to = gr.Radio(label="Save detailed logs with", value="None", choices=["None", "wandb", "tensorboard"], interactive=True)
# for future
#with gr.Accordion(label='Dynamic Scheduler', open = False):
# ds_min_epochs = gr.Number(label='Minimum Epochs', value='1', info='Minimum epochs that will be always performed before ramp down can be triggered')
# ds_max_epochs = gr.Number(label='Maximum Epochs (fallback)', value='50', info='Maximum Epochs before the training will bail out completely (should be a large number)')
# ds_loss_trigger = gr.Slider(label='Trigger Loss', minimum=0.0, maximum=2.8, step=0.1, value=1.6, info='Loss at which the ramp down schedule will be triggered')
# ds_loss_rolling_window = gr.Number(label='Loss rolling average', value='4', info='Calculate loss by averaging last x numbers to avoid jumps and noise')
# ds_epochs_to_ramp = gr.Slider(label='Ramp down ratio', minimum=0.0, maximum=2.0, step=0.1, value=1.00, info='How long the ramp down will last relative to ellapsed steps (before trigger)')
# gr.Markdown('These are settings for FP_dynamic_loss_trigger scheduler. The scheduler will do warm up, then hold constant untill a loss falls under Trigger Loss, then it will commence linear ramp down schedule and stop. The length of ramp down is set by Ramp down ratio where (ramp down steps) = ratio * (elapsed steps). (The time to completition shown will be very high untill ramp down is triggered.)')
with gr.Column():
with gr.Tab(label='Formatted Dataset'):
with gr.Row():
with gr.Column():
with gr.Row():
dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'])
create_refresh_button(dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button')
with gr.Row():
eval_dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'])
create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button')
with gr.Column():
with gr.Row():
format = gr.Dropdown(choices=get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'])
create_refresh_button(format, lambda: None, lambda: {'choices': get_datasets('training/formats', 'json')}, 'refresh-button')
with gr.Row():
eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
with gr.Tab(label="Text file"):
with gr.Row():
raw_text_file = gr.Dropdown(choices=get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The text file to use for training.', elem_classes=['slim-dropdown'])
create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'txt')}, 'refresh-button')
with gr.Row():
with gr.Column():
precize_slicing_overlap = gr.Checkbox(label='Add Overlapping blocks', value = True)
sliding_window = gr.Checkbox(label='DEMENTOR Long-form Learning by FP (Highly Experimental, use low epochs)', value = False, info='Deep Memorization Enforcement Through Overlapping and Repetition. (I named it, so shush). Special process for learning long-form text using low amount of epochs.')
#debug_slicer = gr.Checkbox(label='Dump sentencelist.json to logs', value = non_serialized_params['debug_slicer'], info='Debug Slicer')
with gr.Column():
hard_cut_string = gr.Textbox(label='Hard Cut String', value='\\n\\n\\n', info='String that indicates a cut between logical blocks of text (ex. Ideas or Chapters). Helps prevent unwanted overlap between unrelated ideas.')
min_chars = gr.Number(label='Ignore small blocks', value=0, info='Ignore Text blocks that have less or equal characters than this number.')
with gr.Tab(label="URL"):
with gr.Row():
with gr.Column():
download_file_url = gr.Textbox(label='Download JSON or txt file to datasets (or formats) folder', value='',info='The URL of a file to download. If on github, make sure you get url of the raw file (https://raw.githubusercontent.com/...). If huggin face, make sure the url has /resolve/ in it not /blob/')
with gr.Row():
download_check_overwrite = gr.Checkbox(label='Overwrite', value=False, info='Overwrite if file exist')
download_folder = gr.Radio(label="Destination", value='training/datasets', choices=['training/datasets', 'training/formats'], interactive=True)
download_button = gr.Button('Download')
download_status = gr.Textbox(label='Download Status', value='', interactive=False)
with gr.Row():
with gr.Column():
with gr.Row():
cutoff_len = gr.Slider(label='Chunk Length (Cutoff Length)', minimum=32, maximum=2048, value=256, step=32, info='The maximum length of a chunk (in tokens). Applies to both JSON dataset and text files. Higher values require much more VRAM.')
with gr.Row():
with gr.Column():
check_dataset_btn = gr.Button('Verify Dataset/Text File and suggest data entries')
check_dataset_txt = gr.Textbox(label='Dataset info', value='')
with gr.Row():
start_button = gr.Button("Start LoRA Training", variant='primary')
stop_button = gr.Button("Interrupt")
with gr.Accordion(label="Graph", open=True):
with gr.Row():
# show_actions_button = False - we use old gradio
plot_graph = gr.LinePlot(x="epoch", y="value", title="Loss Metrics", overlay_point=True, tooltip=["epoch", "value"], x_lim=[0, 1], y_lim=[0, 3.5], width=500, height=250)
output = gr.Markdown(value="Ready")
with gr.Tab('Perplexity evaluation', elem_id='evaluate-tab'):
with gr.Row():
with gr.Column():
models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True)
evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.')
with gr.Row():
with gr.Column():
stride_length = gr.Slider(label='Stride', minimum=1, maximum=2048, value=512, step=1, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
with gr.Column():
max_length = gr.Slider(label='max_length', minimum=0, maximum=8096, value=0, step=1, info='The context for each evaluation. If set to 0, the maximum context length for the model will be used.')
with gr.Row():
start_current_evaluation = gr.Button("Evaluate loaded model")
start_evaluation = gr.Button("Evaluate selected models")
stop_evaluation = gr.Button("Interrupt")
with gr.Column():
evaluation_log = gr.Markdown(value='')
evaluation_table = gr.Dataframe(value=generate_markdown_table(), interactive=True)
with gr.Row():
save_comments = gr.Button('Save comments', elem_classes="small-button")
refresh_table = gr.Button('Refresh the table', elem_classes="small-button")
# Training events
all_params = [lora_name, always_override, save_steps, micro_batch_size, batch_size, epochs, learning_rate, lr_scheduler_type, lora_rank, lora_alpha, lora_dropout, cutoff_len, dataset, eval_dataset, format, eval_steps, raw_text_file, higher_rank_limit, warmup_steps, optimizer, hard_cut_string, train_only_after, stop_at_loss, add_eos_token, min_chars, report_to, precize_slicing_overlap, add_eos_token_type, save_steps_under_loss, add_bos_token, training_projection,sliding_window,warmup_ratio,grad_accumulation, neft_noise_alpha]
def fix_old_version(batch_size_val,micro_batch_size_val, grad_accumulation_val):
if batch_size_val>0:
gradient_acc = batch_size_val // micro_batch_size_val
print(f"Using Old version of Batch Size ({batch_size_val}) to set Gradient Accumulation: {gradient_acc}")
return gradient_acc
return grad_accumulation_val
copy_from.change(partial(do_copy_params, all_params= all_params), copy_from, all_params).then(fix_old_version,[batch_size,micro_batch_size, grad_accumulation],grad_accumulation)
start_button.click(do_train, all_params, [output,plot_graph])
stop_button.click(do_interrupt, None, None, queue=False)
higher_rank_limit.change(change_rank_limit, [higher_rank_limit], [lora_rank, lora_alpha])
def trigger_stop_at_loss(stop_at_loss_value):
non_serialized_params.update({"stop_at_loss": stop_at_loss_value})
if non_serialized_params['training_loop']:
print(f"Queue: [Stop at loss Change] to {stop_at_loss_value}")
stop_at_loss.change(trigger_stop_at_loss, stop_at_loss, None)
def trigger_save_checkpoint():
non_serialized_params.update({"save_checkpoint_now": True})
if non_serialized_params['training_loop']:
print("Queue: [Save checkpoint] Checkpoint will be saved after the current step is finished.")
else:
print("Use during the training to save the checkpoint at any time.")
def update_button():
return gr.Button.update('[Checkpoint in Queue]', variant='stop', interactive=True)
def update_button2():
time.sleep(1.0)
return gr.Button.update('Queue Checkpoint Now', variant='secondary',interactive = True)
save_chackpoint_now.click(trigger_save_checkpoint, None, None).then(update_button, None,save_chackpoint_now).then(update_button2, None,save_chackpoint_now)
dataset_calc_params = [save_steps,micro_batch_size, epochs, cutoff_len, dataset, format, raw_text_file, warmup_steps, hard_cut_string, min_chars, precize_slicing_overlap,sliding_window,warmup_ratio,grad_accumulation]
def check_dataset(save_steps:int, micro_batch_size: int, epochs: int, cutoff_len: int, dataset:str, format:str, raw_text_file:str, warmup_steps:int, hard_cut_string:str, min_chars:int, precize_slicing_overlap:bool,sliding_window:bool,warmup_ratio:float,grad_accumulation:int):
result = "Specify JSON dastaset or Text file"
total_blocks = 0
if shared.tokenizer is None:
yield "Tokenizer is not available. Please Load some Model first."
return
if raw_text_file not in ['None', '']:
logger.info("Loading Text file...")
fullpath = clean_path('training/datasets', f'{raw_text_file}')
fullpath = Path(fullpath)
if fullpath.is_dir():
logger.info('Training path directory {}'.format(raw_text_file))
raw_text = ""
file_paths = sorted(fullpath.glob('*.txt'), key=lambda path: natural_keys(path.name))
for file_path in file_paths:
if file_path.is_file():
with file_path.open('r', encoding='utf-8') as file:
raw_text += file.read().replace('\r', '')
logger.info(f"Loaded training file: {file_path.name}")
else:
try:
with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
raw_text = file.read().replace('\r', '')
except:
yield f"{raw_text_file}.txt doesn't seem to exsist anymore... check your training/datasets folder"
return
if min_chars<0:
min_chars = 0
# == New more precise slicing on sentence boundary ==
if sliding_window:
text_chunks = sliding_block_cut(raw_text, min_chars, False, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
else:
text_chunks = precise_cut(raw_text, precize_slicing_overlap, min_chars, False, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
total_blocks = len(text_chunks)
result = f"Text: ({raw_text_file}.txt) has {total_blocks} blocks (Block Size {cutoff_len} tokens)"
del text_chunks
else:
if dataset in ['None', '']:
yield "Select dataset or text file."
return
if format in ['None', '']:
yield "Select format choice for dataset."
return
with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
format_data: dict[str, str] = json.load(formatFile)
def generate_prompt(data_point: dict[str, str]):
for options, data in format_data.items():
if set(options.split(',')) == set(x[0] for x in data_point.items() if (type(x[1]) is str and len(x[1].strip()) > 0)):
for key, val in data_point.items():
if type(val) is str:
data = data.replace(f'%{key}%', val)
return data
raise RuntimeError(f'Data-point "{data_point}" has no keyset match within format "{list(format_data.keys())}"')
def tokenize_dummy(prompt):
input_ids = shared.tokenizer.encode(prompt, truncation=True, max_length=cutoff_len)
labels = [1] * len(input_ids)
input_ids = torch.tensor(input_ids)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": input_ids.ne(shared.tokenizer.pad_token_id),
}
def generate_and_tokenize_prompt(data_point):
prompt = generate_prompt(data_point)
return tokenize_dummy(prompt)
logger.info("Loading JSON datasets...")
data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json'))
data_keys = []
if data:
if 'train' in data: # Check if the 'train' split exists in the dataset
data_keys = list(data['train'][0].keys())
print("Data Keys:", data_keys)
else:
print("The dataset is empty.")
train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
total_blocks = train_data.num_rows
result = f"Dataset: ({dataset}.json) has {total_blocks} blocks @ length = {cutoff_len} tokens\n(Keys: {data_keys} - Format: {format}.json): "
#for options, data in format_data.items():
# format_keys = options.split(',')
# result += f"{format_keys}, "
#result = result.rstrip()
#result = result.rstrip(',')
if total_blocks>0:
number_ofSteps = int(math.ceil(total_blocks / micro_batch_size) * epochs)
num_stepsPer_epoch = int(math.ceil(number_ofSteps/epochs))
min_warm = math.ceil(100 / grad_accumulation)
warmup_steps_suggest = min(int(min_warm*grad_accumulation), int(math.ceil(number_ofSteps * 0.1)))
warmup_steps_suggest = min(warmup_steps_suggest,num_stepsPer_epoch)
save_each_n_min = int(math.ceil(number_ofSteps/10))
save_each_n_max = int(math.ceil(number_ofSteps/5))
gradient_accumulation_max = int(total_blocks)//micro_batch_size
result += f"\n[Batch Size: {micro_batch_size}, Epochs: {epochs}, Gradient Accumulation: {grad_accumulation}]\n"
result += f"Total number of steps: {number_ofSteps}\n"
result += f"Steps per each Epoch: {num_stepsPer_epoch}\n"
result += f"Suggestions:\n"
result += f"Checkpoints: Save every {save_each_n_min} - {save_each_n_max} steps (Current: {int(save_steps)})\n"
result += f"Warmup steps: {warmup_steps_suggest} (Current: {int(warmup_steps)})"
if gradient_accumulation_max < grad_accumulation:
result += f"\n\nWARNING: Gradient Accumulation {grad_accumulation} is too high: It should be below {gradient_accumulation_max}"
yield result
return
check_dataset_btn.click(check_dataset, dataset_calc_params ,check_dataset_txt)
# Evaluation events. For some reason, the interrupt event
# doesn't work with the .then() syntax, so I write them one
# by one in this ugly but functional way.
ev = start_evaluation.click(calculate_perplexity, [models, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
start_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
start_current_evaluation.click(lambda: ['current model'], None, tmp)
ev_cur = start_current_evaluation.click(calculate_perplexity, [tmp, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
start_current_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
stop_evaluation.click(None, None, None, cancels=[ev, ev_cur], queue=False)
refresh_table.click(generate_markdown_table, None, evaluation_table, show_progress=True)
save_comments.click(
save_past_evaluations, evaluation_table, None).then(
lambda: "Comments saved.", None, evaluation_log, show_progress=False)
def reload_lora():
return gr.Dropdown.update(choices=get_available_loras_local(non_serialized_params['Lora_sortedByTime']))
# nonserialized items
sort_byTime.change(lambda x: non_serialized_params.update({"Lora_sortedByTime": x}), sort_byTime, None).then(reload_lora,None,copy_from)
#debug_slicer.change(lambda x: non_serialized_params.update({"debug_slicer": x}), debug_slicer, None)
def update_dataset():
return gr.update(choices=get_datasets('training/datasets', 'json')), gr.update(choices=get_datasets('training/datasets', 'txt'))
download_button.click(download_file_from_url, [download_file_url,download_check_overwrite,download_folder] , download_status).then(update_dataset,None,[dataset , raw_text_file])
def get_datasets(path: str, ext: str):
# include subdirectories for raw txt files to allow training from a subdirectory of txt files
#if ext == "txt":
# return ['None'] + sorted(set([k.stem for k in list(Path(path).glob('txt')) + list(Path(path).glob('*/')) if k.stem != 'put-trainer-datasets-here']), key=natural_keys)
return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=natural_keys)
def do_interrupt():
global WANT_INTERRUPT
WANT_INTERRUPT = True
def do_copy_params(lora_name: str, all_params):
if lora_name:
f_name = f"{shared.args.lora_dir}/{clean_path(None, lora_name)}/training_parameters.json"
if Path(f_name).is_file():
with open(f_name, 'r', encoding='utf-8') as format_file:
params: dict[str, str] = json.load(format_file)
else:
params = {}
else:
params = {}
result = list()
for i in range(0, len(PARAMETERS)):
key = PARAMETERS[i]
if key in params:
result.append(params[key])
else:
result.append(all_params[i])
return result
def change_rank_limit(use_higher_ranks: bool):
mult = 2 if use_higher_ranks else 1
return {"maximum": 1024 * mult, "__type__": "update"}, {"maximum": 2048 * mult, "__type__": "update"}
def clean_path(base_path: str, path: str):
"""Strips unusual symbols and forcibly builds a path as relative to the intended directory."""
path = path.replace('\\', '/').replace('..', '_')
if base_path is None:
return path
return f'{Path(base_path).absolute()}/{path}'
def backup_adapter(input_folder):
# Get the creation date of the file adapter_model.bin
try:
adapter_file = Path(f"{input_folder}/adapter_model.bin")
if adapter_file.is_file():
logger.info("Backing up existing LoRA adapter...")
creation_date = datetime.fromtimestamp(adapter_file.stat().st_ctime)
creation_date_str = creation_date.strftime("Backup-%Y-%m-%d")
# Create the new subfolder
subfolder_path = Path(f"{input_folder}/{creation_date_str}")
subfolder_path.mkdir(parents=True, exist_ok=True)
# Check if the file already exists in the subfolder
backup_adapter_file = Path(f"{input_folder}/{creation_date_str}/adapter_model.bin")
if backup_adapter_file.is_file():
print(" - Backup already exists. Skipping backup process.")
return
# Copy existing files to the new subfolder
existing_files = Path(input_folder).iterdir()
for file in existing_files:
if file.is_file():
shutil.copy2(file, subfolder_path)
except Exception as e:
print("An error occurred in backup_adapter:", str(e))
def calc_trainable_parameters(model):
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
num_params = param.numel()
# if using DS Zero 3 and the weights are initialized empty
if num_params == 0 and hasattr(param, "ds_numel"):
num_params = param.ds_numel
all_param += num_params
if param.requires_grad:
trainable_params += num_params
return trainable_params, all_param
def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, raw_text_file: str, higher_rank_limit: bool, warmup_steps: int, optimizer: str, hard_cut_string: str, train_only_after: str, stop_at_loss: float, add_eos_token: bool, min_chars: int, report_to: str, precize_slicing_overlap: bool, add_eos_token_type: str, save_steps_under_loss: float, add_bos_token: bool, training_projection: str,sliding_window:bool,warmup_ratio:float, grad_accumulation: int,neft_noise_alpha:float):
if shared.args.monkey_patch:
replace_peft_model_with_int4_lora_model()
global train_log_graph
global WANT_INTERRUPT
WANT_INTERRUPT = False
statistics['loss'] = []
statistics['loss'].append({'epoch': 0, 'value': 0})
zero_pd = pd.DataFrame(statistics['loss'])
# == Input validation / processing ==
yield "Preparing the input...", zero_pd
lora_file_path = clean_path(None, lora_name)
if lora_file_path.strip() == '':
yield "Missing or invalid LoRA file name input.", zero_pd
return
lora_file_path = f"{Path(shared.args.lora_dir)}/{lora_file_path}"
actual_lr = float(learning_rate)
model_type = type(shared.model).__name__
if model_type in MODEL_CLASSES:
model_id = MODEL_CLASSES[model_type]
else:
model_id = "llama"
if model_type == "PeftModelForCausalLM":
if len(shared.lora_names) > 0:
yield "You are trying to train a LoRA while you already have another LoRA loaded. This will work, but may have unexpected effects. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*", zero_pd
logger.warning("Training LoRA over top of another LoRA. May have unexpected effects.")
else:
yield "Model ID not matched due to LoRA loading. Consider reloading base model. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*", zero_pd
logger.warning("Model ID not matched due to LoRA loading. Consider reloading base model.")
else:
yield "LoRA training has only currently been validated for LLaMA, OPT, GPT-J, and GPT-NeoX models. Unexpected errors may follow. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*", zero_pd
logger.warning(f"LoRA training has only currently been validated for LLaMA, OPT, GPT-J, and GPT-NeoX models. (Found model type: {model_type})")
time.sleep(5)
if shared.args.loader == 'GPTQ-for-LLaMa' and not shared.args.monkey_patch:
yield "LoRA training with GPTQ-for-LLaMa requires loading with `--monkey-patch`", zero_pd
return
if cutoff_len <= 0 or micro_batch_size <= 0 or actual_lr <= 0 or lora_rank <= 0 or lora_alpha <= 0:
yield "Cannot input zeroes.", zero_pd
return
#in new version we dumped this in favor of grad_accumulation
#set it to zero fo new save
batch_size = 0
gradient_accumulation_steps = grad_accumulation #batch_size // micro_batch_size
shared.tokenizer.pad_token_id = 0
shared.tokenizer.padding_side = "left"
def encode(text, prepend_bos_token):
result = shared.tokenizer.encode(text, truncation=True, max_length=cutoff_len)
# Check if the first two tokens are BOS
if len(result) >= 2 and result[:2] == [shared.tokenizer.bos_token_id, shared.tokenizer.bos_token_id]:
result = result[1:]
if not prepend_bos_token and result[0] == shared.tokenizer.bos_token_id:
result = result[1:]
return result
def tokenize(prompt, append_eos_token=False, prepend_bos_token = False):
if train_only_after == '' or train_only_after not in prompt:
input_ids = encode(prompt, prepend_bos_token)
if append_eos_token and input_ids[-1] != shared.tokenizer.eos_token_id and len(input_ids) < cutoff_len:
input_ids.append(shared.tokenizer.eos_token_id)
input_ids = [shared.tokenizer.pad_token_id] * (cutoff_len - len(input_ids)) + input_ids
labels = [1] * len(input_ids)
else:
ind = prompt.index(train_only_after) + len(train_only_after)
before_tokens = encode(prompt[:ind], prepend_bos_token)
after_tokens = encode(prompt[ind:], False)
if append_eos_token and after_tokens[-1] != shared.tokenizer.eos_token_id:
after_tokens.append(shared.tokenizer.eos_token_id)
full_length = len(after_tokens) + len(before_tokens)
if full_length > cutoff_len:
after_tokens = after_tokens[:cutoff_len - len(before_tokens)]
else:
before_tokens = [shared.tokenizer.pad_token_id] * (cutoff_len - full_length) + before_tokens
input_ids = before_tokens + after_tokens
labels = [-100] * len(before_tokens) + [1] * len(after_tokens)
input_ids = torch.tensor(input_ids)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": input_ids.ne(shared.tokenizer.pad_token_id),
}
train_template.clear()
#reset stuff
print(f"*** LoRA: {lora_name} ***")
non_serialized_params.update({"stop_at_loss": stop_at_loss})
non_serialized_params.update({"save_steps_under_loss": save_steps_under_loss+0.01})
non_serialized_params.update({"save_checkpoint_now": False})
non_serialized_params.update({"training_loop": False})
non_serialized_params.update({"current_stability": 0})
non_serialized_params.update({"save_epochs": 0})
non_serialized_params.update({"checkpoint_offset": 0})
non_serialized_params.update({"epoch_offset": 0})
train_log_graph.clear()
# === once fixed, this can be removed ==============================
if hasattr(torch.utils.checkpoint, 'noop_context_fn'):
print("Testing Pytorch...")
old_checkpoint_signature = inspect.signature(torch.utils.checkpoint.checkpoint)
# Get the signature of your new checkpoint function
my_checkpoint_signature = inspect.signature(my_checkpoint)
# Check if the signatures match
if old_checkpoint_signature.parameters == my_checkpoint_signature.parameters:
print(F"{RED}Overriding Torch checkpoint function to avoid repeated 'use_reentrant not explicitly set' warnings{RESET}")
#print(" - Note: Transformers need to pass use_reentrant in llama.modeling_llama in def forward, layer_outputs = torch.utils.checkpoint.checkpoint")
#print(" Once they do, this function can be removed")
torch.utils.checkpoint.checkpoint = my_checkpoint
# END OF FPHAM SENTENCE SPLIT functions ===================
# == Prep the dataset, format, etc ==
if raw_text_file not in ['None', '']:
train_template["template_type"] = "raw_text"
logger.info("Loading text file...")
fullpath = clean_path('training/datasets', f'{raw_text_file}')
fullpath = Path(fullpath)
if fullpath.is_dir():
logger.info('Training path directory {}'.format(raw_text_file))
raw_text = ""
file_paths = sorted(fullpath.glob('*.txt'), key=lambda path: natural_keys(path.name))
for file_path in file_paths:
if file_path.is_file():
with file_path.open('r', encoding='utf-8') as file:
raw_text += file.read().replace('\r', '')
logger.info(f"Loaded training file: {file_path.name}")
else:
with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
raw_text = file.read().replace('\r', '')
# FPHAM PRECISE SLICING
if min_chars<0:
min_chars = 0
add_EOS_to_all = add_eos_token and add_eos_token_type == 'Every Block'
add_EOS_to_HC = add_eos_token and add_eos_token_type != 'Every Block'
#print (f"add_eos_token {add_eos_token}, add_EOS_to_all {add_EOS_to_all}, add_EOS_to_HC {add_EOS_to_HC}")
# == New more precise slicing on sentence boundary ==
if sliding_window:
text_chunks = sliding_block_cut(raw_text, min_chars, add_EOS_to_HC, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
else:
text_chunks = precise_cut(raw_text, precize_slicing_overlap, min_chars, add_EOS_to_HC, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
train_data = Dataset.from_list([tokenize(x, add_EOS_to_all, add_bos_token) for x in text_chunks])
if add_EOS_to_all:
print(f"Added EOS to {len(text_chunks)} blocks")
print(f"All Data Blocks: {len(text_chunks)}")
del text_chunks
eval_data = None
else:
if dataset in ['None', '']:
yield "Missing dataset choice input, cannot continue.", zero_pd
return
if format in ['None', '']:
yield "Missing format choice input, cannot continue.", zero_pd
return
train_template["template_type"] = "dataset"
with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
format_data: dict[str, str] = json.load(formatFile)
# == store training prompt ==
for _, value in format_data.items():
prompt_key = f"template_{len(train_template)}"
train_template[prompt_key] = value
def generate_prompt(data_point: dict[str, str]):
for options, data in format_data.items():
if set(options.split(',')) == set(x[0] for x in data_point.items() if (type(x[1]) is str and len(x[1].strip()) > 0)):
for key, val in data_point.items():
if type(val) is str:
data = data.replace(f'%{key}%', val)
return data
raise RuntimeError(f'Data-point "{data_point}" has no keyset match within format "{list(format_data.keys())}"')
def generate_and_tokenize_prompt(data_point):
prompt = generate_prompt(data_point)
return tokenize(prompt, add_eos_token, add_bos_token)
logger.info("Loading JSON datasets...")
data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json'))
train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
print(f"BOS: {add_bos_token} EOS: {add_eos_token}")
print(f"Data Blocks: {train_data.num_rows}")
if eval_dataset == 'None':
eval_data = None
else:
eval_data = load_dataset("json", data_files=clean_path('training/datasets', f'{eval_dataset}.json'))
eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
# == We MUST reload model if it went through any previous training, even failed one ==
if shared.model_dirty_from_training:
selected_model = shared.model_name
if selected_model:
print("\033[1;31;1m(Model has been modified by previous training, it needs to be reloaded...)\033[0;37;0m")
try:
yield f"Reloading {selected_model}...", zero_pd
reload_model()
shared.tokenizer.pad_token_id = 0
shared.tokenizer.padding_side = "left"
if shared.model is not None:
print("Model reloaded OK, continue with training.")
else:
return f"Failed to load {selected_model}."
except:
exc = traceback.format_exc()
logger.error('Failed to reload the model.')
print(exc)
return exc.replace('\n', '\n\n')
# == Start prepping the model itself ==
if not hasattr(shared.model, 'lm_head') or hasattr(shared.model.lm_head, 'weight'):
logger.info("Getting model ready...")
# here we can disable gradient checkpoint, by default = true, use_gradient_checkpointing=True
prepare_model_for_kbit_training(shared.model)
# base model is now frozen and should not be reused for any other LoRA training than this one
shared.model_dirty_from_training = True
print(f"Transformers Model Type: {YELLOW}{model_type}{RESET}")
if training_projection==train_choices[0]:
model_to_lora_modules[model_id] = ["gate_proj","down_proj","up_proj","q_proj","k_proj","v_proj","o_proj"]
elif training_projection==train_choices[1]:
model_to_lora_modules[model_id] = ["q_proj","k_proj", "v_proj", "o_proj"]
elif training_projection==train_choices[2]:
model_to_lora_modules[model_id] = ["q_proj","k_proj", "v_proj"]
elif training_projection==train_choices[3]:
model_to_lora_modules[model_id] = ["k_proj", "v_proj", "down_proj"]
else:
model_to_lora_modules[model_id] = ["q_proj", "v_proj"]
logger.info("Preparing for training...")
config = LoraConfig(
r=lora_rank,
lora_alpha=lora_alpha,
target_modules=model_to_lora_modules[model_id],
lora_dropout=lora_dropout,
bias="none",
task_type="CAUSAL_LM"
)
# == Backup the existing adapter ==
if not always_override:
backup_adapter(lora_file_path)
# == get model trainable params
model_trainable_params, model_all_params = calc_trainable_parameters(shared.model)
try:
logger.info("Creating LoRA model...")
lora_model = get_peft_model(shared.model, config)
if not always_override and Path(f"{lora_file_path}/adapter_model.bin").is_file():
logger.info("Loading existing LoRA data...")
state_dict_peft = torch.load(f"{lora_file_path}/adapter_model.bin")
set_peft_model_state_dict(lora_model, state_dict_peft)
print(f" + Continue Training on {RED}{lora_file_path}/adapter_model.bin{RESET}")
#load training_log.json if exist
if Path(f"{lora_file_path}/training_log.json").is_file():
with open(f"{lora_file_path}/training_log.json", 'r') as json_file:
json_ilog = json.load(json_file)
for key, value in json_ilog.items():
if key=='current_steps':
non_serialized_params.update({"checkpoint_offset": int(value+1)})
print(f" + Checkpoints will be saved with offset: {RED}{non_serialized_params['checkpoint_offset']}{RESET}")
if key=='epoch':
non_serialized_params.update({"epoch_offset": value})
print(f" + Epoch offset: {RED}{non_serialized_params['epoch_offset']}{RESET}")
if Path(f"{lora_file_path}/training_graph.json").is_file():
try:
with open(f"{lora_file_path}/training_graph.json", 'r') as json_file:
train_log_graph = json.load(json_file)
print(" + Training Graph loaded")
except:
print(f"Can't read training_graph")
except:
yield traceback.format_exc().replace('\n', '\n\n'), zero_pd
return
if shared.args.monkey_patch:
for _, m in lora_model.named_modules():
if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
if m.is_v1_model:
m.zeros = m.zeros.half()
m.scales = m.scales.half()
class Tracked():
def __init__(self):
self.current_steps = 0
self.max_steps = 0
self.did_save = False
tracked = Tracked()
actual_save_steps = math.ceil(save_steps / gradient_accumulation_steps)
class Callbacks(transformers.TrainerCallback):
def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
tracked.current_steps = state.global_step * gradient_accumulation_steps
tracked.max_steps = state.max_steps * gradient_accumulation_steps
ssteps10 = int(max(2,(state.max_steps/epochs)*0.1))
if WANT_INTERRUPT:
control.should_epoch_stop = True
control.should_training_stop = True
else:
current_loss = float(train_log.get('loss', 0.0))
current_epoch_int = int(float(train_log.get('epoch', 0.0)))
force_save = False
current_steps_offset = tracked.current_steps + non_serialized_params['checkpoint_offset']
folder_save = f"checkpoint-{current_steps_offset}"
# save if triggered by user
if non_serialized_params['save_checkpoint_now']:
force_save = True
non_serialized_params.update({"save_checkpoint_now": False})
print(f"\033[1;31;1mSave Checkpoint manually trigerred.\033[0;37;0m")
folder_save = f"checkpoint-{current_steps_offset}-user"
patience = 3 # Set the number of consecutive steps for tracking stability
if gradient_accumulation_steps==1:
patience = 4
min_steps = ssteps10
# Save each time the loss is below the threshold
if current_loss < non_serialized_params['save_steps_under_loss'] and current_loss > 0 and state.global_step > min_steps:
current_stability = non_serialized_params['current_stability']
current_stability += 1
non_serialized_params.update({"current_stability": current_stability})
if current_stability >= patience:
current_stability = 0
non_serialized_params.update({"current_stability": current_stability})
current_loss_dec = round(current_loss, 2)
loss_str = f"{current_loss_dec:.2f}"
loss_str = loss_str.replace('.', '_')
new_save = (current_loss_dec-0.1) + 0.01
non_serialized_params.update({"save_steps_under_loss": new_save})
folder_save = f"checkpoint-{current_steps_offset}-loss-{loss_str}"
force_save = True
else:
# Reset stability if the loss goes above the threshold
non_serialized_params.update({"current_stability": 0})
# Save full epochs
if actual_save_steps>0 and current_epoch_int > non_serialized_params['save_epochs'] and state.global_step > min_steps:
current_epoch_offset = current_epoch_int
if non_serialized_params['epoch_offset'] > 0:
current_epoch_offset = current_epoch_int + round(non_serialized_params['epoch_offset'], 2)
ep_off_str = f"{current_epoch_offset}"
ep_off_str = ep_off_str.replace('.', '_')
folder_save = f"checkpoint-{current_steps_offset}-epoch-{ep_off_str}"
non_serialized_params.update({"save_epochs": current_epoch_int})
force_save = True
# save each actual_save_steps
if state.global_step > 0 and actual_save_steps > 0 and state.global_step % actual_save_steps == 0:
folder_save = f"checkpoint-{current_steps_offset}"
force_save = True
if force_save:
lora_model.save_pretrained(f"{lora_file_path}/{folder_save}/")
print(f"\033[1;30;40mStep: {tracked.current_steps:6} \033[0;37;0m Saved: [{folder_save}]")
# Save log
with open(f"{lora_file_path}/{folder_save}/training_log.json", 'w', encoding='utf-8') as file:
json.dump(train_log, file, indent=2)
# == Save training prompt ==
with open(f"{lora_file_path}/{folder_save}/training_prompt.json", 'w', encoding='utf-8') as file:
json.dump(train_template, file, indent=2)
def on_substep_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
tracked.current_steps += 1
if WANT_INTERRUPT:
control.should_epoch_stop = True
control.should_training_stop = True
def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, logs, **kwargs):
train_log.update(logs)
current_steps_offset = tracked.current_steps + non_serialized_params['checkpoint_offset']
current_epoch_offset = train_log.get('epoch', 0.0) + non_serialized_params['epoch_offset']
train_log.update({"current_steps": tracked.current_steps})
train_log.update({"current_steps_adjusted": current_steps_offset})
train_log.update({"epoch_adjusted": current_epoch_offset})
if WANT_INTERRUPT:
print("\033[1;31;1mInterrupted by user\033[0;37;0m")
if non_serialized_params['checkpoint_offset']>0:
print(f"\033[1;30;40mStep: {tracked.current_steps:6} [+{non_serialized_params['checkpoint_offset']}] \033[0;37;0m", end='')
else:
print(f"\033[1;30;40mStep: {tracked.current_steps:6} \033[0;37;0m", end='')
graphentry = {
'current_steps': int(train_log.get('current_steps_adjusted',0)),
'loss': float(train_log.get('loss', 0.0)),
'learning_rate': float(train_log.get('learning_rate', 0.0)),
'epoch': float(train_log.get('epoch_adjusted', 0.0))
}
cur_loss = float(train_log.get('loss', 0.0))
cur_lr = float(train_log.get('learning_rate', 0.0))
cur_epoch = float(train_log.get('epoch', 0.0))
if len(statistics['loss']) == 1:
first_epoch = statistics['loss'][0]['epoch']
first_value = statistics['loss'][0]['value']
if first_value ==0:
statistics['loss'] = []
statistics['loss'].append({'epoch': cur_epoch, 'value': cur_loss})
statistics['lr'].append({'epoch': cur_epoch, 'value': cur_lr})
# Add the entry to the continuous log
train_log_graph.append(graphentry)
# Save the graph log for now, we can later generate full graph
with open(f"{lora_file_path}/training_graph.json", 'w') as file:
json.dump(train_log_graph, file, indent=4)
if 'loss' in logs:
loss = float(logs['loss'])
if loss <= stop_at_loss:
control.should_epoch_stop = True
control.should_training_stop = True
print(f"{RED}Stop Loss {stop_at_loss} reached.{RESET}")
# FPHAM SAMPLE REQ Transformers error handling
gradient_accumulation_max = int(train_data.num_rows)//micro_batch_size
if gradient_accumulation_max < gradient_accumulation_steps:
print(f"{RED}WARNING:{RESET} Current gradient accumulation is {RED}too high{RESET} for the amount of training data.")
print(f"Gradient accumulation: {gradient_accumulation_steps} should be less than: {gradient_accumulation_max}. {RED}This could crash Accelerate/Transformers{RESET}")
#min_batchSize = sample_req*micro_batch_size
print(f"Preferable fix: {RED}Increase the size of dataset{RESET}")
print(f"... or Decrerase Gradient Accumulation {RED}{gradient_accumulation_steps}{RESET} to below {GREEN}{gradient_accumulation_max}{RESET}")
gradient_accumulation_steps = max(1,gradient_accumulation_max-1)
print(f"Last resort fix for this run: Lowering Gradient accumulation to {GREEN}{gradient_accumulation_steps}{RESET} [Good luck]")
else:
print(f"Data Size Check: Gradient accumulation: {YELLOW}{gradient_accumulation_steps}{RESET} <= Blocks/Batch {gradient_accumulation_max} ... {GREEN}[OK]{RESET}")
#END OF FPHAM SAMPLE REQ
# FPHAM Custom Scheduler ==
custom_scheduller = False
lr_scheduler_type_arg = lr_scheduler_type
if lr_scheduler_type == 'FP_low_epoch_annealing':
custom_scheduller = True
lr_scheduler_type_arg = 'cosine'
elif lr_scheduler_type == 'FP_half_time_annealing':
custom_scheduller = True
lr_scheduler_type_arg = 'constant'
elif lr_scheduler_type =='FP_raise_fall_creative':
custom_scheduller = True
lr_scheduler_type_arg = 'constant_with_warmup'
#gradient_checkpointing=True
args=transformers.TrainingArguments(
report_to=report_to if report_to != "None" else None,
per_device_train_batch_size=micro_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
warmup_steps=math.ceil(warmup_steps / gradient_accumulation_steps),
warmup_ratio = warmup_ratio,
num_train_epochs=epochs,
learning_rate=actual_lr,
fp16=False if shared.args.cpu else True,
optim=optimizer,
logging_steps=1,
evaluation_strategy="steps" if eval_data is not None else "no",
eval_steps=math.ceil(eval_steps / gradient_accumulation_steps) if eval_data is not None else None,
save_strategy="steps" if eval_data is not None else "no",
output_dir=lora_file_path,
lr_scheduler_type=lr_scheduler_type_arg,
load_best_model_at_end=eval_data is not None,
# TODO: Enable multi-device support
ddp_find_unused_parameters=None,
no_cuda=shared.args.cpu,
)
if custom_scheduller:
trainer = FPSchedulerTrainer(
neftune_noise_alpha=neft_noise_alpha,
model=lora_model,
train_dataset=train_data,
eval_dataset=eval_data,
args=args,
data_collator=transformers.DataCollatorForLanguageModeling(shared.tokenizer, mlm=False),
callbacks=list([Callbacks()])
)
elif neft_noise_alpha > 0: | trainer = FPNEFtuneTrainer( | 1 | 2023-12-20 14:13:38+00:00 | 12k |
foocker/Bert-VITS2-Faster | text/chinese.py | [
{
"identifier": "punctuation",
"path": "text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "text/tone_sandhi.py",
"snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",\n \"麻利\",\n \"鸳鸯\",\n \"高粱\",\n \"骨头\",\n \"骆驼\",\n \"马虎\",\n \"首饰\",\n \"馒头\",\n \"馄饨\",\n \"风筝\",\n \"难为\",\n \"队伍\",\n \"阔气\",\n \"闺女\",\n \"门道\",\n \"锄头\",\n \"铺盖\",\n \"铃铛\",\n \"铁匠\",\n \"钥匙\",\n \"里脊\",\n \"里头\",\n \"部分\",\n \"那么\",\n \"道士\",\n \"造化\",\n \"迷糊\",\n \"连累\",\n \"这么\",\n \"这个\",\n \"运气\",\n \"过去\",\n \"软和\",\n \"转悠\",\n \"踏实\",\n \"跳蚤\",\n \"跟头\",\n \"趔趄\",\n \"财主\",\n \"豆腐\",\n \"讲究\",\n \"记性\",\n \"记号\",\n \"认识\",\n \"规矩\",\n \"见识\",\n \"裁缝\",\n \"补丁\",\n \"衣裳\",\n \"衣服\",\n \"衙门\",\n \"街坊\",\n \"行李\",\n \"行当\",\n \"蛤蟆\",\n \"蘑菇\",\n \"薄荷\",\n \"葫芦\",\n \"葡萄\",\n \"萝卜\",\n \"荸荠\",\n \"苗条\",\n \"苗头\",\n \"苍蝇\",\n \"芝麻\",\n \"舒服\",\n \"舒坦\",\n \"舌头\",\n \"自在\",\n \"膏药\",\n \"脾气\",\n \"脑袋\",\n \"脊梁\",\n \"能耐\",\n \"胳膊\",\n \"胭脂\",\n \"胡萝\",\n \"胡琴\",\n \"胡同\",\n \"聪明\",\n \"耽误\",\n \"耽搁\",\n \"耷拉\",\n \"耳朵\",\n \"老爷\",\n \"老实\",\n \"老婆\",\n \"老头\",\n \"老太\",\n \"翻腾\",\n \"罗嗦\",\n \"罐头\",\n \"编辑\",\n \"结实\",\n \"红火\",\n \"累赘\",\n \"糨糊\",\n \"糊涂\",\n \"精神\",\n \"粮食\",\n \"簸箕\",\n \"篱笆\",\n \"算计\",\n \"算盘\",\n \"答应\",\n \"笤帚\",\n \"笑语\",\n \"笑话\",\n \"窟窿\",\n \"窝囊\",\n \"窗户\",\n \"稳当\",\n \"稀罕\",\n \"称呼\",\n \"秧歌\",\n \"秀气\",\n \"秀才\",\n \"福气\",\n \"祖宗\",\n \"砚台\",\n \"码头\",\n \"石榴\",\n \"石头\",\n \"石匠\",\n \"知识\",\n \"眼睛\",\n \"眯缝\",\n \"眨巴\",\n \"眉毛\",\n \"相声\",\n \"盘算\",\n \"白净\",\n \"痢疾\",\n \"痛快\",\n \"疟疾\",\n \"疙瘩\",\n \"疏忽\",\n \"畜生\",\n \"生意\",\n \"甘蔗\",\n \"琵琶\",\n \"琢磨\",\n \"琉璃\",\n \"玻璃\",\n \"玫瑰\",\n \"玄乎\",\n \"狐狸\",\n \"状元\",\n \"特务\",\n \"牲口\",\n \"牙碜\",\n \"牌楼\",\n \"爽快\",\n \"爱人\",\n \"热闹\",\n \"烧饼\",\n \"烟筒\",\n \"烂糊\",\n \"点心\",\n \"炊帚\",\n \"灯笼\",\n \"火候\",\n \"漂亮\",\n \"滑溜\",\n \"溜达\",\n \"温和\",\n \"清楚\",\n \"消息\",\n \"浪头\",\n \"活泼\",\n \"比方\",\n \"正经\",\n \"欺负\",\n \"模糊\",\n \"槟榔\",\n \"棺材\",\n \"棒槌\",\n \"棉花\",\n \"核桃\",\n \"栅栏\",\n \"柴火\",\n \"架势\",\n \"枕头\",\n \"枇杷\",\n \"机灵\",\n \"本事\",\n \"木头\",\n \"木匠\",\n \"朋友\",\n \"月饼\",\n \"月亮\",\n \"暖和\",\n \"明白\",\n \"时候\",\n \"新鲜\",\n \"故事\",\n \"收拾\",\n \"收成\",\n \"提防\",\n \"挖苦\",\n \"挑剔\",\n \"指甲\",\n \"指头\",\n \"拾掇\",\n \"拳头\",\n \"拨弄\",\n \"招牌\",\n \"招呼\",\n \"抬举\",\n \"护士\",\n \"折腾\",\n \"扫帚\",\n \"打量\",\n \"打算\",\n \"打点\",\n \"打扮\",\n \"打听\",\n \"打发\",\n \"扎实\",\n \"扁担\",\n \"戒指\",\n \"懒得\",\n \"意识\",\n \"意思\",\n \"情形\",\n \"悟性\",\n \"怪物\",\n \"思量\",\n \"怎么\",\n \"念头\",\n \"念叨\",\n \"快活\",\n \"忙活\",\n \"志气\",\n \"心思\",\n \"得罪\",\n \"张罗\",\n \"弟兄\",\n \"开通\",\n \"应酬\",\n \"庄稼\",\n \"干事\",\n \"帮手\",\n \"帐篷\",\n \"希罕\",\n \"师父\",\n \"师傅\",\n \"巴结\",\n \"巴掌\",\n \"差事\",\n \"工夫\",\n \"岁数\",\n \"屁股\",\n \"尾巴\",\n \"少爷\",\n \"小气\",\n \"小伙\",\n \"将就\",\n \"对头\",\n \"对付\",\n \"寡妇\",\n \"家伙\",\n \"客气\",\n \"实在\",\n \"官司\",\n \"学问\",\n \"学生\",\n \"字号\",\n \"嫁妆\",\n \"媳妇\",\n \"媒人\",\n \"婆家\",\n \"娘家\",\n \"委屈\",\n \"姑娘\",\n \"姐夫\",\n \"妯娌\",\n \"妥当\",\n \"妖精\",\n \"奴才\",\n \"女婿\",\n \"头发\",\n \"太阳\",\n \"大爷\",\n \"大方\",\n \"大意\",\n \"大夫\",\n \"多少\",\n \"多么\",\n \"外甥\",\n \"壮实\",\n \"地道\",\n \"地方\",\n \"在乎\",\n \"困难\",\n \"嘴巴\",\n \"嘱咐\",\n \"嘟囔\",\n \"嘀咕\",\n \"喜欢\",\n \"喇嘛\",\n \"喇叭\",\n \"商量\",\n \"唾沫\",\n \"哑巴\",\n \"哈欠\",\n \"哆嗦\",\n \"咳嗽\",\n \"和尚\",\n \"告诉\",\n \"告示\",\n \"含糊\",\n \"吓唬\",\n \"后头\",\n \"名字\",\n \"名堂\",\n \"合同\",\n \"吆喝\",\n \"叫唤\",\n \"口袋\",\n \"厚道\",\n \"厉害\",\n \"千斤\",\n \"包袱\",\n \"包涵\",\n \"匀称\",\n \"勤快\",\n \"动静\",\n \"动弹\",\n \"功夫\",\n \"力气\",\n \"前头\",\n \"刺猬\",\n \"刺激\",\n \"别扭\",\n \"利落\",\n \"利索\",\n \"利害\",\n \"分析\",\n \"出息\",\n \"凑合\",\n \"凉快\",\n \"冷战\",\n \"冤枉\",\n \"冒失\",\n \"养活\",\n \"关系\",\n \"先生\",\n \"兄弟\",\n \"便宜\",\n \"使唤\",\n \"佩服\",\n \"作坊\",\n \"体面\",\n \"位置\",\n \"似的\",\n \"伙计\",\n \"休息\",\n \"什么\",\n \"人家\",\n \"亲戚\",\n \"亲家\",\n \"交情\",\n \"云彩\",\n \"事情\",\n \"买卖\",\n \"主意\",\n \"丫头\",\n \"丧气\",\n \"两口\",\n \"东西\",\n \"东家\",\n \"世故\",\n \"不由\",\n \"不在\",\n \"下水\",\n \"下巴\",\n \"上头\",\n \"上司\",\n \"丈夫\",\n \"丈人\",\n \"一辈\",\n \"那个\",\n \"菩萨\",\n \"父亲\",\n \"母亲\",\n \"咕噜\",\n \"邋遢\",\n \"费用\",\n \"冤家\",\n \"甜头\",\n \"介绍\",\n \"荒唐\",\n \"大人\",\n \"泥鳅\",\n \"幸福\",\n \"熟悉\",\n \"计划\",\n \"扑腾\",\n \"蜡烛\",\n \"姥爷\",\n \"照顾\",\n \"喉咙\",\n \"吉他\",\n \"弄堂\",\n \"蚂蚱\",\n \"凤凰\",\n \"拖沓\",\n \"寒碜\",\n \"糟蹋\",\n \"倒腾\",\n \"报复\",\n \"逻辑\",\n \"盘缠\",\n \"喽啰\",\n \"牢骚\",\n \"咖喱\",\n \"扫把\",\n \"惦记\",\n }\n self.must_not_neural_tone_words = {\n \"男子\",\n \"女子\",\n \"分子\",\n \"原子\",\n \"量子\",\n \"莲子\",\n \"石子\",\n \"瓜子\",\n \"电子\",\n \"人人\",\n \"虎虎\",\n }\n self.punc = \":,;。?!“”‘’':,;.?!\"\n\n # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041\n # e.g.\n # word: \"家里\"\n # pos: \"s\"\n # finals: ['ia1', 'i3']\n def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:\n # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺\n for j, item in enumerate(word):\n if (\n j - 1 >= 0\n and item == word[j - 1]\n and pos[0] in {\"n\", \"v\", \"a\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[j] = finals[j][:-1] + \"5\"\n ge_idx = word.find(\"个\")\n if len(word) >= 1 and word[-1] in \"吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶\":\n finals[-1] = finals[-1][:-1] + \"5\"\n elif len(word) >= 1 and word[-1] in \"的地得\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 走了, 看着, 去过\n # elif len(word) == 1 and word in \"了着过\" and pos in {\"ul\", \"uz\", \"ug\"}:\n # finals[-1] = finals[-1][:-1] + \"5\"\n elif (\n len(word) > 1\n and word[-1] in \"们子\"\n and pos in {\"r\", \"n\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 桌上, 地下, 家里\n elif len(word) > 1 and word[-1] in \"上下里\" and pos in {\"s\", \"l\", \"f\"}:\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 上来, 下去\n elif len(word) > 1 and word[-1] in \"来去\" and word[-2] in \"上下进出回过起开\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # 个做量词\n elif (\n ge_idx >= 1\n and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in \"几有两半多各整每做是\")\n ) or word == \"个\":\n finals[ge_idx] = finals[ge_idx][:-1] + \"5\"\n else:\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n\n word_list = self._split_word(word)\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n for i, word in enumerate(word_list):\n # conventional neural in Chinese\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals_list[i][-1] = finals_list[i][-1][:-1] + \"5\"\n finals = sum(finals_list, [])\n return finals\n\n def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # e.g. 看不懂\n if len(word) == 3 and word[1] == \"不\":\n finals[1] = finals[1][:-1] + \"5\"\n else:\n for i, char in enumerate(word):\n # \"不\" before tone4 should be bu2, e.g. 不怕\n if char == \"不\" and i + 1 < len(word) and finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n return finals\n\n def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # \"一\" in number sequences, e.g. 一零零, 二一零\n if word.find(\"一\") != -1 and all(\n [item.isnumeric() for item in word if item != \"一\"]\n ):\n return finals\n # \"一\" between reduplication words should be yi5, e.g. 看一看\n elif len(word) == 3 and word[1] == \"一\" and word[0] == word[-1]:\n finals[1] = finals[1][:-1] + \"5\"\n # when \"一\" is ordinal word, it should be yi1\n elif word.startswith(\"第一\"):\n finals[1] = finals[1][:-1] + \"1\"\n else:\n for i, char in enumerate(word):\n if char == \"一\" and i + 1 < len(word):\n # \"一\" before tone4 should be yi2, e.g. 一段\n if finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n # \"一\" before non-tone4 should be yi4, e.g. 一天\n else:\n # \"一\" 后面如果是标点,还读一声\n if word[i + 1] not in self.punc:\n finals[i] = finals[i][:-1] + \"4\"\n return finals\n\n def _split_word(self, word: str) -> List[str]:\n word_list = jieba.cut_for_search(word)\n word_list = sorted(word_list, key=lambda i: len(i), reverse=False)\n first_subword = word_list[0]\n first_begin_idx = word.find(first_subword)\n if first_begin_idx == 0:\n second_subword = word[len(first_subword) :]\n new_word_list = [first_subword, second_subword]\n else:\n second_subword = word[: -len(first_subword)]\n new_word_list = [second_subword, first_subword]\n return new_word_list\n\n def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:\n if len(word) == 2 and self._all_tone_three(finals):\n finals[0] = finals[0][:-1] + \"2\"\n elif len(word) == 3:\n word_list = self._split_word(word)\n if self._all_tone_three(finals):\n # disyllabic + monosyllabic, e.g. 蒙古/包\n if len(word_list[0]) == 2:\n finals[0] = finals[0][:-1] + \"2\"\n finals[1] = finals[1][:-1] + \"2\"\n # monosyllabic + disyllabic, e.g. 纸/老虎\n elif len(word_list[0]) == 1:\n finals[1] = finals[1][:-1] + \"2\"\n else:\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n if len(finals_list) == 2:\n for i, sub in enumerate(finals_list):\n # e.g. 所有/人\n if self._all_tone_three(sub) and len(sub) == 2:\n finals_list[i][0] = finals_list[i][0][:-1] + \"2\"\n # e.g. 好/喜欢\n elif (\n i == 1\n and not self._all_tone_three(sub)\n and finals_list[i][0][-1] == \"3\"\n and finals_list[0][-1][-1] == \"3\"\n ):\n finals_list[0][-1] = finals_list[0][-1][:-1] + \"2\"\n finals = sum(finals_list, [])\n # split idiom into two words who's length is 2\n elif len(word) == 4:\n finals_list = [finals[:2], finals[2:]]\n finals = []\n for sub in finals_list:\n if self._all_tone_three(sub):\n sub[0] = sub[0][:-1] + \"2\"\n finals += sub\n\n return finals\n\n def _all_tone_three(self, finals: List[str]) -> bool:\n return all(x[-1] == \"3\" for x in finals)\n\n # merge \"不\" and the word behind it\n # if don't merge, \"不\" sometimes appears alone according to jieba, which may occur sandhi error\n def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n last_word = \"\"\n for word, pos in seg:\n if last_word == \"不\":\n word = last_word + word\n if word != \"不\":\n new_seg.append((word, pos))\n last_word = word[:]\n if last_word == \"不\":\n new_seg.append((last_word, \"d\"))\n last_word = \"\"\n return new_seg\n\n # function 1: merge \"一\" and reduplication words in it's left and right, e.g. \"听\",\"一\",\"听\" ->\"听一听\"\n # function 2: merge single \"一\" and the word behind it\n # if don't merge, \"一\" sometimes appears alone according to jieba, which may occur sandhi error\n # e.g.\n # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]\n # output seg: [['听一听', 'v']]\n def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n # function 1\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and word == \"一\"\n and i + 1 < len(seg)\n and seg[i - 1][0] == seg[i + 1][0]\n and seg[i - 1][1] == \"v\"\n ):\n new_seg[i - 1][0] = new_seg[i - 1][0] + \"一\" + new_seg[i - 1][0]\n else:\n if (\n i - 2 >= 0\n and seg[i - 1][0] == \"一\"\n and seg[i - 2][0] == word\n and pos == \"v\"\n ):\n continue\n else:\n new_seg.append([word, pos])\n seg = new_seg\n new_seg = []\n # function 2\n for i, (word, pos) in enumerate(seg):\n if new_seg and new_seg[-1][0] == \"一\":\n new_seg[-1][0] = new_seg[-1][0] + word\n else:\n new_seg.append([word, pos])\n return new_seg\n\n # the first and the second words are all_tone_three\n def _merge_continuous_three_tones(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and self._all_tone_three(sub_finals_list[i - 1])\n and self._all_tone_three(sub_finals_list[i])\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n\n return new_seg\n\n def _is_reduplication(self, word: str) -> bool:\n return len(word) == 2 and word[0] == word[1]\n\n # the last char of first word and the first char of second word is tone_three\n def _merge_continuous_three_tones_2(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and sub_finals_list[i - 1][-1][-1] == \"3\"\n and sub_finals_list[i][0][-1] == \"3\"\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if i - 1 >= 0 and word == \"儿\" and seg[i - 1][0] != \"#\":\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if new_seg and word == new_seg[-1][0]:\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n seg = self._merge_bu(seg)\n try:\n seg = self._merge_yi(seg)\n except:\n print(\"_merge_yi failed\")\n seg = self._merge_reduplication(seg)\n seg = self._merge_continuous_three_tones(seg)\n seg = self._merge_continuous_three_tones_2(seg)\n seg = self._merge_er(seg)\n return seg\n\n def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:\n finals = self._bu_sandhi(word, finals)\n finals = self._yi_sandhi(word, finals)\n finals = self._neural_sandhi(word, pos, finals)\n finals = self._three_sandhi(word, finals)\n return finals"
}
] | import os
import re
import cn2an
import sys
import jieba.posseg as psg
from pypinyin import lazy_pinyin, Style
from text.symbols import punctuation
from text.tone_sandhi import ToneSandhi
from text import chinese_bert
from text.chinese_bert import get_bert_feature | 7,688 |
sys.path.insert(0,"/data/stable-diffusion-tritonserver/Bert-VITS2")
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
tone_modifier = ToneSandhi()
def replace_punctuation(text):
text = text.replace("嗯", "恩").replace("呣", "母")
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
replaced_text = re.sub(
|
sys.path.insert(0,"/data/stable-diffusion-tritonserver/Bert-VITS2")
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
tone_modifier = ToneSandhi()
def replace_punctuation(text):
text = text.replace("嗯", "恩").replace("呣", "母")
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
replaced_text = re.sub( | r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", replaced_text | 0 | 2023-12-18 09:53:41+00:00 | 12k |
sinoyou/nelf-pro | nerfstudio/cameras/cameras.py | [
{
"identifier": "camera_utils",
"path": "nerfstudio/cameras/camera_utils.py",
"snippet": "_EPS = np.finfo(float).eps * 4.0\n M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]\n K = np.array(\n [\n [m00 - m11 - m22, 0.0, 0.0, 0.0],\n [m01 + m10, m11 - m00 - m22, 0.0, 0.0],\n [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],\n [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],\n ]\n )\ndef unit_vector(data, axis: Optional[int] = None) -> np.ndarray:\ndef quaternion_from_matrix(matrix, isprecise: bool = False) -> np.ndarray:\ndef quaternion_slerp(quat0, quat1, fraction: float, spin: int = 0, shortestpath: bool = True) -> np.ndarray:\ndef quaternion_matrix(quaternion) -> np.ndarray:\ndef get_interpolated_poses(pose_a, pose_b, steps: int = 10) -> List[float]:\ndef get_interpolated_k(k_a, k_b, steps: int = 10) -> TensorType[3, 4]:\ndef get_interpolated_poses_many(\n poses: TensorType[\"num_poses\", 3, 4],\n Ks: TensorType[\"num_poses\", 3, 3],\n steps_per_transition=10,\n) -> Tuple[TensorType[\"num_poses\", 3, 4], TensorType[\"num_poses\", 3, 3]]:\ndef normalize(x) -> TensorType[...]:\ndef viewmatrix(lookat, up, pos) -> TensorType[...]:\ndef get_distortion_params(\n k1: float = 0.0,\n k2: float = 0.0,\n k3: float = 0.0,\n k4: float = 0.0,\n p1: float = 0.0,\n p2: float = 0.0,\n) -> TensorType[...]:\ndef _compute_residual_and_jacobian(\n x: torch.Tensor,\n y: torch.Tensor,\n xd: torch.Tensor,\n yd: torch.Tensor,\n distortion_params: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor,]:\ndef radial_and_tangential_undistort(\n coords: torch.Tensor,\n distortion_params: torch.Tensor,\n eps: float = 1e-3,\n max_iterations: int = 10,\n) -> torch.Tensor:\ndef rotation_matrix(a: TensorType[3], b: TensorType[3]) -> TensorType[3, 3]:\ndef auto_orient_and_center_poses(\n poses: TensorType[\"num_poses\":..., 4, 4], method: Literal[\"pca\", \"up\", \"none\"] = \"up\", center_poses: bool = True\n) -> TensorType[\"num_poses\":..., 3, 4]:"
},
{
"identifier": "Probes",
"path": "nerfstudio/cameras/probes.py",
"snippet": "class Probes:\n camera_to_worlds: TensorType[\"num_cams\", 3, 4]\n probe_config: dict\n\n num_basis: int = field(init=False)\n num_core: int = field(init=False)\n pos_basis: TensorType[\"num_basis\", 3] = field(init=False)\n pos_core: TensorType[\"num_core\", 3] = field(init=False)\n sorted_basis_index: TensorType[\"num_cams\", \"num_basis\"] = field(init=False)\n sorted_core_index: TensorType[\"num_cams\", \"num_core\"] = field(init=False)\n\n def __post_init__(self):\n \"\"\"\n For each scene camera, generate the index of the ascending <camera, factor> distance. \n \"\"\"\n # extract camera position\n if len(self.camera_to_worlds.shape) == 2:\n self.camera_to_worlds = self.camera_to_worlds.unsqueeze(dim=0)\n camera_pos = self.camera_to_worlds[:, :3, 3].view(self.camera_to_worlds.shape[0], 1, 3) # num_cam, 1, 3\n \n self.__init_basis_factor__(camera_pos)\n self.__init_core_factor__(camera_pos)\n \n def __init_basis_factor__(self, camera_pos):\n # basis camera number\n self.basis_factor_list = self.probe_config[0]['basis']\n self.num_basis = len(self.basis_factor_list)\n\n # prepare basis factor position \n self.pos_basis = [torch.tensor([factor['x'], factor['y'], factor['z']]) for factor in self.basis_factor_list]\n self.pos_basis = torch.stack(self.pos_basis, dim=0)\n self.pos_basis = self.pos_basis.view(self.num_basis, 3).to(self.device) # num_basis, 3\n\n camera_basis_dist = torch.norm(camera_pos - self.pos_basis.unsqueeze(dim=0), dim=-1) # num_cam, num_basis\n _, self.sorted_basis_index = torch.sort(camera_basis_dist, dim=1)\n \n def __init_core_factor__(self, camera_pos):\n self.core_factor_list = self.probe_config[0]['core']\n self.num_core = len(self.core_factor_list)\n\n self.pos_core = [torch.tensor([factor['x'], factor['y'], factor['z']]) for factor in self.core_factor_list]\n self.pos_core = torch.stack(self.pos_core, dim=0)\n self.pos_core = self.pos_core.view(self.num_core, 3).to(self.device) # num_core, 3\n \n camera_core_dist = torch.norm(camera_pos - self.pos_core.unsqueeze(dim=0), dim=-1) # num_cam, num_core\n _, self.sorted_core_index = torch.sort(camera_core_dist, dim=1)\n\n def get_plotly(self):\n plotly_data = []\n\n # basis\n basis_plot_data = {'center': self.pos_basis.cpu().numpy()} \n plotly_data += plot_spheres(basis_plot_data, name='basis factor')\n\n # core\n core_plot_data = {'center': self.pos_core.cpu().numpy()}\n plotly_data += plot_spheres(core_plot_data, name='core factor', scatter_size=9, color='rgba(255, 192, 203, 1)')\n \n return plotly_data\n\n @property\n def device(self):\n \"\"\"Returns the device that the data is on.\"\"\"\n return self.camera_to_worlds.device\n \n def get_nearby_basis_index_and_pos(self, camera_indices: TensorType[\"bs\"], near_num: int) -> TensorType[\"bs\", \"near_num\", 3]:\n \"\"\"Get the indices and the positions of each ray's nearby basis factor. \"\"\"\n camera_indices = camera_indices.squeeze(-1)\n assert len(camera_indices.shape) == 1, 'squeezed camera_indices should be TensorType[\"bs\", ], but got {}'.format(camera_indices.shape)\n assert self.num_basis >= near_num, 'near_num should be smaller than total basis factor number. '\n\n return self._get_nearby_factor_index_and_pos(camera_indices, near_num, self.sorted_basis_index, self.pos_basis)\n\n def get_nearby_core_index_and_pos(self, camera_indices: TensorType[\"bs\"], near_num: int) -> TensorType[\"bs\", \"near_num\", 3]:\n \"\"\"Get the indices and the positions of each ray's nearby core factor.\"\"\"\n camera_indices = camera_indices.squeeze(-1)\n assert len(camera_indices.shape) == 1, 'squeezed camera_indices should be TensorType[\"bs\", ], but got {}'.format(camera_indices.shape)\n assert self.num_core >= near_num, 'near_num should be smaller than total core factor number. '\n\n return self._get_nearby_factor_index_and_pos(camera_indices, near_num, self.sorted_core_index, self.pos_core)\n \n def _get_nearby_factor_index(self, camera_indices: TensorType[\"bs\"], near_num: int, sorted_factor_index: TensorType[\"num_cam, num_factor\"]) -> TensorType[\"bs\", \"near_num\"]:\n \"\"\"Get nearby factor index for each ray. \"\"\"\n sorted_core_index_prefix = sorted_factor_index[:, :near_num]\n return sorted_core_index_prefix[camera_indices, :]\n\n def _get_nearby_factor_index_and_pos(self, camera_indices: TensorType[\"bs\"], near_num: int, sorted_factor_index: TensorType[\"num_cam, num_factor\"], factor_pos: TensorType[\"num_factor\", 3]) -> TensorType[\"bs\", \"near_num\", 3]: \n bs = camera_indices.shape[0]\n selected_index = self._get_nearby_factor_index(camera_indices, near_num, sorted_factor_index)\n selected_pos = factor_pos[selected_index.view(-1), :].view(bs, near_num, 3)\n return selected_index, selected_pos\n \n def get_num_basis(self):\n return self.num_basis\n\n def get_num_core(self):\n return self.num_core"
},
{
"identifier": "RayBundle",
"path": "nerfstudio/cameras/rays.py",
"snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n directions_norm: Optional[TensorType[..., 1]] = None\n \"\"\"Norm of ray direction vector before normalization\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n probes: Optional[Probes] = None\n \"\"\"Probe Cameras Object. This object doesn't follow the same shape pattern as the other fields. \n Lazy broadcasting is used for preventing CUDA memory overflow. \"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self):\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indicies.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin. (in Euclidean space)\n bin_ends: Distance from origin to end of bin. (in Euclidean space)\n spacing_starts: start point in normalized space. [0, 1]\n spacing_ends: end point in normalized space. [0, 1]\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n probes=self.probes, # special class, not following the same shape pattern\n )\n\n return ray_samples"
},
{
"identifier": "TensorDataclass",
"path": "nerfstudio/utils/tensor_dataclass.py",
"snippet": "class TensorDataclass:\n \"\"\"@dataclass of tensors with the same size batch. Allows indexing and standard tensor ops.\n Fields that are not Tensors will not be batched unless they are also a TensorDataclass.\n Any fields that are dictionaries will have their Tensors or TensorDataclasses batched, and\n dictionaries will have their tensors or TensorDataclasses considered in the initial broadcast.\n Tensor fields must have at least 1 dimension, meaning that you must convert a field like torch.Tensor(1)\n to torch.Tensor([1])\n\n Example:\n\n .. code-block:: python\n\n @dataclass\n class TestTensorDataclass(TensorDataclass):\n a: torch.Tensor\n b: torch.Tensor\n c: torch.Tensor = None\n\n # Create a new tensor dataclass with batch size of [2,3,4]\n test = TestTensorDataclass(a=torch.ones((2, 3, 4, 2)), b=torch.ones((4, 3)))\n\n test.shape # [2, 3, 4]\n test.a.shape # [2, 3, 4, 2]\n test.b.shape # [2, 3, 4, 3]\n\n test.reshape((6,4)).shape # [6, 4]\n test.flatten().shape # [24,]\n\n test[..., 0].shape # [2, 3]\n test[:, 0, :].shape # [2, 4]\n \"\"\"\n\n _shape: tuple\n\n # A mapping from field-name (str): n (int)\n # Any field OR any key in a dictionary field with this name (field-name) and a corresponding\n # torch.Tensor will be assumed to have n dimensions after the batch dims. These n final dimensions\n # will remain the same shape when doing reshapes, broadcasting, etc on the tensordataclass\n _field_custom_dimensions: Dict[str, int] = {}\n\n def __post_init__(self) -> None:\n \"\"\"Finishes setting up the TensorDataclass\n\n This will 1) find the broadcasted shape and 2) broadcast all fields to this shape 3)\n set _shape to be the broadcasted shape.\n \"\"\"\n if self._field_custom_dimensions is not None:\n for k, v in self._field_custom_dimensions.items():\n assert (\n isinstance(v, int) and v > 1\n ), f\"Custom dimensions must be an integer greater than 1, since 1 is the default, received {k}: {v}\"\n\n if not dataclasses.is_dataclass(self):\n raise TypeError(\"TensorDataclass must be a dataclass\")\n\n batch_shapes = self._get_dict_batch_shapes(\n {f.name: self.__getattribute__(f.name) for f in dataclasses.fields(self)}\n )\n if len(batch_shapes) == 0:\n raise ValueError(\"TensorDataclass must have at least one tensor\")\n batch_shape = torch.broadcast_shapes(*batch_shapes)\n\n broadcasted_fields = self._broadcast_dict_fields(\n {f.name: self.__getattribute__(f.name) for f in dataclasses.fields(self)}, batch_shape\n )\n for f, v in broadcasted_fields.items():\n self.__setattr__(f, v)\n\n self.__setattr__(\"_shape\", batch_shape)\n\n def _get_dict_batch_shapes(self, dict_: Dict) -> List:\n \"\"\"Returns batch shapes of all tensors in a dictionary\n\n Args:\n dict_: The dictionary to get the batch shapes of.\n\n Returns:\n The batch shapes of all tensors in the dictionary.\n \"\"\"\n batch_shapes = []\n for k, v in dict_.items():\n if isinstance(v, torch.Tensor):\n if isinstance(self._field_custom_dimensions, dict) and k in self._field_custom_dimensions:\n # pylint: disable=unsubscriptable-object\n batch_shapes.append(v.shape[: -self._field_custom_dimensions[k]])\n else:\n batch_shapes.append(v.shape[:-1])\n elif isinstance(v, TensorDataclass):\n batch_shapes.append(v.shape)\n elif isinstance(v, Dict):\n batch_shapes.extend(self._get_dict_batch_shapes(v))\n return batch_shapes\n\n def _broadcast_dict_fields(self, dict_: Dict, batch_shape) -> Dict:\n \"\"\"Broadcasts all tensors in a dictionary according to batch_shape\n\n Args:\n dict_: The dictionary to broadcast.\n\n Returns:\n The broadcasted dictionary.\n \"\"\"\n new_dict = {}\n for k, v in dict_.items():\n if isinstance(v, torch.Tensor):\n # If custom dimension key, then we need to\n if isinstance(self._field_custom_dimensions, dict) and k in self._field_custom_dimensions:\n # pylint: disable=unsubscriptable-object\n new_dict[k] = v.broadcast_to(\n (\n *batch_shape,\n *v.shape[-self._field_custom_dimensions[k] :],\n )\n )\n else:\n new_dict[k] = v.broadcast_to((*batch_shape, v.shape[-1]))\n elif isinstance(v, TensorDataclass):\n new_dict[k] = v.broadcast_to(batch_shape)\n elif isinstance(v, Dict):\n new_dict[k] = self._broadcast_dict_fields(v, batch_shape)\n return new_dict\n\n def __getitem__(self: TensorDataclassT, indices) -> TensorDataclassT:\n if isinstance(indices, (torch.Tensor)):\n return self._apply_fn_to_fields(lambda x: x[indices])\n if isinstance(indices, (int, slice, type(Ellipsis))):\n indices = (indices,)\n assert isinstance(indices, tuple)\n tensor_fn = lambda x: x[indices + (slice(None),)]\n dataclass_fn = lambda x: x[indices]\n\n def custom_tensor_dims_fn(k, v):\n custom_dims = self._field_custom_dimensions[k] # pylint: disable=unsubscriptable-object\n return v[indices + ((slice(None),) * custom_dims)]\n\n return self._apply_fn_to_fields(tensor_fn, dataclass_fn, custom_tensor_dims_fn=custom_tensor_dims_fn)\n\n def __setitem__(self, indices, value) -> NoReturn:\n raise RuntimeError(\"Index assignment is not supported for TensorDataclass\")\n\n def __len__(self) -> int:\n if len(self._shape) == 0:\n raise TypeError(\"len() of a 0-d tensor\")\n return self.shape[0]\n\n def __bool__(self) -> bool:\n if len(self) == 0:\n raise ValueError(\n f\"The truth value of {self.__class__.__name__} when `len(x) == 0` \"\n \"is ambiguous. Use `len(x)` or `x is not None`.\"\n )\n return True\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"Returns the batch shape of the tensor dataclass.\"\"\"\n return self._shape\n\n @property\n def size(self) -> int:\n \"\"\"Returns the number of elements in the tensor dataclass batch dimension.\"\"\"\n if len(self._shape) == 0:\n return 1\n return int(np.prod(self._shape))\n\n @property\n def ndim(self) -> int:\n \"\"\"Returns the number of dimensions of the tensor dataclass.\"\"\"\n return len(self._shape)\n\n def reshape(self: TensorDataclassT, shape: Tuple[int, ...]) -> TensorDataclassT:\n \"\"\"Returns a new TensorDataclass with the same data but with a new shape.\n\n This should deepcopy as well.\n\n Args:\n shape: The new shape of the tensor dataclass.\n\n Returns:\n A new TensorDataclass with the same data but with a new shape.\n \"\"\"\n if isinstance(shape, int):\n shape = (shape,)\n tensor_fn = lambda x: x.reshape((*shape, x.shape[-1]))\n dataclass_fn = lambda x: x.reshape(shape)\n\n def custom_tensor_dims_fn(k, v):\n custom_dims = self._field_custom_dimensions[k] # pylint: disable=unsubscriptable-object\n return v.reshape((*shape, *v.shape[-custom_dims:]))\n\n return self._apply_fn_to_fields(tensor_fn, dataclass_fn, custom_tensor_dims_fn=custom_tensor_dims_fn)\n\n def flatten(self: TensorDataclassT) -> TensorDataclassT:\n \"\"\"Returns a new TensorDataclass with flattened batch dimensions\n\n Returns:\n TensorDataclass: A new TensorDataclass with the same data but with a new shape.\n \"\"\"\n return self.reshape((-1,))\n\n def broadcast_to(self: TensorDataclassT, shape: Union[torch.Size, Tuple[int, ...]]) -> TensorDataclassT:\n \"\"\"Returns a new TensorDataclass broadcast to new shape.\n\n Changes to the original tensor dataclass should effect the returned tensor dataclass,\n meaning it is NOT a deepcopy, and they are still linked.\n\n Args:\n shape: The new shape of the tensor dataclass.\n\n Returns:\n A new TensorDataclass with the same data but with a new shape.\n \"\"\"\n\n def custom_tensor_dims_fn(k, v):\n custom_dims = self._field_custom_dimensions[k] # pylint: disable=unsubscriptable-object\n return v.broadcast_to((*shape, *v.shape[-custom_dims:]))\n\n return self._apply_fn_to_fields(\n lambda x: x.broadcast_to((*shape, x.shape[-1])), custom_tensor_dims_fn=custom_tensor_dims_fn\n )\n\n def to(self: TensorDataclassT, device) -> TensorDataclassT:\n \"\"\"Returns a new TensorDataclass with the same data but on the specified device.\n\n Args:\n device: The device to place the tensor dataclass.\n\n Returns:\n A new TensorDataclass with the same data but on the specified device.\n \"\"\"\n return self._apply_fn_to_fields(lambda x: x.to(device))\n\n def _apply_fn_to_fields(\n self: TensorDataclassT,\n fn: Callable,\n dataclass_fn: Optional[Callable] = None,\n custom_tensor_dims_fn: Optional[Callable] = None,\n ) -> TensorDataclassT:\n \"\"\"Applies a function to all fields of the tensor dataclass.\n\n TODO: Someone needs to make a high level design choice for whether not not we want this\n to apply the function to any fields in arbitray superclasses. This is an edge case until we\n upgrade to python 3.10 and dataclasses can actually be subclassed with vanilla python and no\n janking, but if people try to jank some subclasses that are grandchildren of TensorDataclass\n (imagine if someone tries to subclass the RayBundle) this will matter even before upgrading\n to 3.10 . Currently we aren't going to be able to work properly for grandchildren, but you\n want to use self.__dict__ if you want to apply this to grandchildren instead of our dictionary\n from dataclasses.fields(self) as we do below and in other places.\n\n Args:\n fn: The function to apply to tensor fields.\n dataclass_fn: The function to apply to TensorDataclass fields.\n\n Returns:\n A new TensorDataclass with the same data but with a new shape.\n \"\"\"\n old_fields = {f.name: self.__getattribute__(f.name) for f in dataclasses.fields(self)}\n new_fields = self._apply_fn_to_dict(old_fields, fn, dataclass_fn, custom_tensor_dims_fn)\n return dataclasses.replace(self, **new_fields)\n\n def _apply_fn_to_dict(\n self,\n dict_: Dict,\n fn: Callable,\n dataclass_fn: Optional[Callable] = None,\n custom_tensor_dims_fn: Optional[Callable] = None,\n ) -> Dict:\n \"\"\"A helper function for _apply_fn_to_fields, applying a function to all fields of dict_\n\n Args:\n dict_: The dictionary to apply the function to.\n fn: The function to apply to tensor fields.\n dataclass_fn: The function to apply to TensorDataclass fields.\n\n Returns:\n A new dictionary with the same data but with a new shape. Will deep copy\"\"\"\n field_names = dict_.keys()\n new_dict = {}\n for f in field_names:\n v = dict_[f]\n if v is not None:\n if isinstance(v, TensorDataclass) and dataclass_fn is not None:\n new_dict[f] = dataclass_fn(v)\n # This is the case when we have a custom dimensions tensor\n elif (\n isinstance(v, torch.Tensor)\n and isinstance(self._field_custom_dimensions, dict)\n and f in self._field_custom_dimensions\n and custom_tensor_dims_fn is not None\n ):\n new_dict[f] = custom_tensor_dims_fn(f, v)\n elif isinstance(v, (torch.Tensor, TensorDataclass)):\n new_dict[f] = fn(v)\n elif isinstance(v, Dict):\n new_dict[f] = self._apply_fn_to_dict(v, fn, dataclass_fn)\n else:\n new_dict[f] = deepcopy(v)\n else:\n pass\n # comment out this warning for now, since it's too verbose\n # print(f'[warning]: {f} is treated as None type when copying the tensorclass. ') \n\n return new_dict"
},
{
"identifier": "plot_camera_components",
"path": "nerfstudio/utils/plotly_utils_nelfpro.py",
"snippet": "def plot_spheres(coordinates, name='default', scatter_size=7, color=None):\ndef plot_point3d(xyz, color):\ndef plot_a_segment(coordinates, camera_group, idx, direction, color, hovertext): \ndef plot_camera_axis(camera_group, coordinates, image_list):\n def add_a_segment(idx, direction, color, hovertext): \ndef plot_camera_pyramid(camera_group, coordinates, image_list, special_camera):\n def get_color(camera_group, special=False):\n def add_a_pyramid(idx, color, hovertext, surface_opacity=0.05):\ndef plot_camera_components(coordinates, image_list, white_list=None, special_camera=None, camera_group=None):"
}
] | import base64
import math
import cv2
import torch
import torchvision
import numpy as np
import nerfstudio.utils.poses as pose_utils
from dataclasses import dataclass, field
from enum import Enum, auto
from typing import Dict, List, Optional, Tuple, Union
from rich.console import Console
from torch.nn.functional import normalize
from torchtyping import TensorType
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.probes import Probes
from nerfstudio.cameras.rays import RayBundle
from nerfstudio.utils.tensor_dataclass import TensorDataclass
from nerfstudio.utils.plotly_utils_nelfpro import plot_camera_components, plotly_camera_scale | 9,979 | camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
@property
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
@property
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
@property
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
@property
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
| # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Camera Models
"""
CONSOLE = Console(width=120)
class CameraType(Enum):
"""Supported camera types."""
PERSPECTIVE = auto()
FISHEYE = auto()
EQUIRECTANGULAR = auto()
CAMERA_MODEL_TO_TYPE = {
"SIMPLE_PINHOLE": CameraType.PERSPECTIVE,
"PINHOLE": CameraType.PERSPECTIVE,
"SIMPLE_RADIAL": CameraType.PERSPECTIVE,
"RADIAL": CameraType.PERSPECTIVE,
"OPENCV": CameraType.PERSPECTIVE,
"OPENCV_FISHEYE": CameraType.FISHEYE,
"EQUIRECTANGULAR": CameraType.EQUIRECTANGULAR,
}
@dataclass(init=False)
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
probe_config: dict config containing the generated probe information (core and basis)
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras":..., 1]]
image_filenames: Optional[List[str]]
probe_config: Optional[list]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
image_filenames: Optional[List[str]] = None,
probe_config: Optional[list] = None
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.image_filenames = image_filenames
self.probe_config = probe_config
if self.probe_config is not None:
self.probe = Probes(self.camera_to_worlds, self.probe_config)
else:
self.probe = None
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
@property
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
@property
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
@property
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
@property
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False, | ) -> RayBundle: | 2 | 2023-12-15 20:07:22+00:00 | 12k |
Infleqtion/qLDPC | qldpc/codes.py | [
{
"identifier": "abstract",
"path": "qldpc/abstract.py",
"snippet": "DEFAULT_FIELD_ORDER = 2\nclass GroupMember(comb.Permutation):\nclass Group:\nclass Element:\nclass Protograph:\nclass TrivialGroup(Group):\nclass CyclicGroup(Group):\nclass DihedralGroup(Group):\nclass QuaternionGroup(Group):\n def __mul__(self, other: UnknownType) -> UnknownType:\n def __add__(self, other: UnknownType) -> UnknownType:\n def __lt__(self, other: GroupMember) -> bool:\n def __matmul__(self, other: GroupMember) -> GroupMember:\ndef default_lift(member: GroupMember) -> IntegerArray:\n def __init__(\n self, group: PermutationGroup, field: int | None = None, lift: Lift | None = None\n ) -> None:\n def __eq__(self, other: object) -> bool:\n def __mul__(self, other: Group) -> Group:\n def lift(member: GroupMember) -> galois.FieldArray:\n def __contains__(self, member: GroupMember) -> bool:\n def field(self) -> type[galois.FieldArray]:\n def order(self) -> int:\n def generators(self) -> Sequence[GroupMember]:\n def generate(self) -> Iterator[GroupMember]:\n def identity(self) -> GroupMember:\n def product(cls, *groups: Group, repeat: int = 1) -> Group:\n def lift(self, member: GroupMember) -> galois.FieldArray:\n def lift_dim(self) -> int:\n def table(self) -> IntegerArray:\n def from_table(\n cls,\n table: IntegerArray | Sequence[Sequence[int]],\n field: int | None = None,\n integer_lift: IntegerLift | None = None,\n ) -> Group:\n def lift(member: GroupMember) -> IntegerArray:\n def from_generators(\n cls, *generators: GroupMember, field: int | None = None, lift: Lift | None = None\n ) -> Group:\n def __init__(self, group: Group, *members: GroupMember):\n def __eq__(self, other: object) -> bool:\n def __iter__(self) -> Iterator[tuple[GroupMember, galois.FieldArray]]:\n def __add__(self, other: GroupMember | Element) -> Element:\n def __radd__(self, other: GroupMember) -> Element:\n def __mul__(self, other: int | GroupMember | Element) -> Element:\n def __rmul__(self, other: int | GroupMember) -> Element:\n def __neg__(self) -> Element:\n def __pow__(self, power: int) -> Element:\n def copy(self) -> Element:\n def field(self) -> type[galois.FieldArray]:\n def group(self) -> Group:\n def lift(self) -> galois.FieldArray:\n def zero(self) -> Element:\n def one(self) -> Element:\n def T(self) -> Element:\n def __init__(self, matrix: Protograph | ObjectMatrix) -> None:\n def __eq__(self, other: object) -> bool:\n def __rmul__(self, val: int) -> Protograph:\n def __mul__(self, val: int) -> Protograph:\n def matrix(self) -> npt.NDArray[np.object_]:\n def shape(self) -> tuple[int, ...]:\n def group(self) -> Group:\n def field(self) -> type[galois.FieldArray]:\n def lift(self) -> galois.FieldArray:\n def T(self) -> Protograph:\n def build(cls, group: Group, matrix: ObjectMatrix, *, field: int = 2) -> Protograph:\n def __init__(self, field: int | None = None) -> None:\n def to_protograph(\n cls, matrix: IntegerArray | Sequence[Sequence[int]], field: int | None = None\n ) -> Protograph:\n def __init__(self, order: int) -> None:\n def __init__(self, order: int) -> None:\n def __init__(self) -> None:\n def lift(member: int) -> IntegerArray:"
},
{
"identifier": "CayleyComplex",
"path": "qldpc/objects.py",
"snippet": "class CayleyComplex:\n \"\"\"Left-right Cayley complex, used for constructing quantum Tanner codes.\n\n A Cayley complex is a geometric structure built out of a two subsets A and B of a group G. The\n subsets respectively act on elements of G from the left and right, and must be symmetric, which\n is to say (for example) that a ∈ A iff a^-1 ∈ A. To avoid constructing a complex that factors\n into disconnected pieces, we can define G as the group generated by all elements of A and B.\n\n The generating data (A,B) is used to build vertices V, edges E, and faces F as follows:\n - vertices are members of G,\n - edges have the form (g, ag) and (g, gb), and\n - faces f(g,a,b) have the form {g, ab, gb, agb}:\n\n g → gb\n ↓ ↓\n ag → agb\n\n The complex (V,E,F) is in turn used to construct two bipartite directed graphs:\n - subgraph_0 with edges ( g, f(g,a,b)), and\n - subgraph_1 with edges (ag, f(g,a,b)).\n These graphs are used to construct classical Tanner codes that serve as the X and Z sectors of a\n quantum CSS code (namely, a quantum Tanner code).\n\n There are, however, two complications to keep in mind. First, in order for the faces to be non\n degenerate (that is, for each face to contain four vertices), the generating data (A,B) must\n satisfy the Total No Conjugacy condition:\n\n [1] ag != gb for all g,a,b in (G,A,B).\n\n Second, in order to construct a valid quantum Tanner code out of subgraph_0 and subgraph_1, the\n graph (V,E) must be bipartite, V = V_0 ∪ V_1, such that (for example) nodes {g,agb} are in one\n partition, while nodes {ag,gb} are in the other partition. The nodes V_i are then used as the\n sources of subgraph_i. The graph (V,E) is bipartite if:\n\n [2] The Cayley graphs (G;A) and (G;B) both are bipartite.\n\n The Cayley graphs (G;A) and (G;B) are graphs whose\n - vertices are members of G, and\n - edges are pairs of vertices connected by A or B, as in (g, ag) or (g, gb).\n\n If both [1] and [2] are satisfied, when we can construct a Cayley complex out of (G,A,B)\n directly, which we call a \"rank-0\" complex.\n\n If [1] is satisfied but [2] is not, then we can construct a \"rank-1\" complex that enforces\n requirement [2] by taking the double cover of G and modifying members of A and B as:\n - G --> G ⊗ {0,1},\n - a --> (a,1), and\n - b --> (b,1),\n where (a,1) acts on (g,i) as (a,1) * (g,i) = (ag,i+1), and similarly (b,1) * (g,i) = (gb,i+1).\n\n If requirement [1] is not satisfied, then we can construct a \"rank-2\" complex that enforces both\n [1] and [2] by taking the quadruple cover of G and modifying members of A and B as:\n - G --> G ⊗ {0,1} ⊗ {0,1},\n - a --> (a,1,0), and\n - b --> (b,0,1),\n where similarly to before (a,1,0) * (g,i,j) = (ag,i+1,j) and (b,0,1) * (g,i,j) = (gb,i,j+1).\n\n References:\n - https://arxiv.org/abs/2202.13641\n - https://arxiv.org/abs/2206.07571\n - https://www.youtube.com/watch?v=orWcstqWGGo\n \"\"\"\n\n # generating data\n subset_a: set[abstract.GroupMember]\n subset_b: set[abstract.GroupMember]\n group: abstract.Group\n\n # rank and graph (vertices and edges)\n rank: int\n graph: nx.Graph\n faces: set[frozenset[abstract.GroupMember]]\n\n # subgraphs used for a quantum Tanner code\n subgraph_0: nx.DiGraph\n subgraph_1: nx.DiGraph\n\n def __init__(\n self,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember] | None = None,\n *,\n rank: int | None = None,\n ) -> None:\n \"\"\"Construct a left-right Cayley complex.\"\"\"\n assert not rank or 0 <= rank <= 2\n if subset_b is None:\n subset_b = subset_a\n subset_a = set(subset_a)\n subset_b = set(subset_b)\n assert all(~member in subset_a for member in subset_a)\n assert all(~member in subset_b for member in subset_b)\n\n # identify the group generated by the provided (sub)sets\n group = abstract.Group.from_generators(*subset_a, *subset_b)\n\n # determine the rank of this complex\n min_rank = CayleyComplex.get_min_rank(group, subset_a, subset_b)\n if rank is not None and rank < min_rank:\n error = f\"Cannot set CayleyComplex rank to {rank} (min_rank: {min_rank})\"\n raise ValueError(error)\n self.rank = min_rank if rank is None else rank\n\n # take the double cover(s) of the group, if necessary, and save the generating data\n identity, shift = abstract.CyclicGroup(2).generate()\n if self.rank == 2:\n shift_a = shift @ identity\n shift_b = identity @ shift\n elif self.rank == 1:\n shift_a = shift_b = shift\n else: # self.rank == 0\n shift_a = shift_b = abstract.TrivialGroup().identity\n self.subset_a = set(aa @ shift_a for aa in subset_a)\n self.subset_b = set(bb @ shift_b for bb in subset_b)\n self.group = abstract.Group.from_generators(*self.subset_a, *self.subset_b)\n\n # construct the vertices, edges, and faces of this complex\n self.graph = nx.Graph()\n self.faces = set()\n for gg, aa, bb in itertools.product(self.group.generate(), self.subset_a, self.subset_b):\n aa_gg, gg_bb, aa_gg_bb = aa * gg, gg * bb, aa * gg * bb\n face = frozenset([gg, aa_gg, gg_bb, aa_gg_bb])\n self.faces.add(face)\n self.graph.add_edge(gg, aa_gg)\n self.graph.add_edge(gg, gg_bb)\n self.graph.add_edge(aa_gg, aa_gg_bb)\n self.graph.add_edge(gg_bb, aa_gg_bb)\n\n # construct the subgraphs of the complex\n self.subgraph_0 = nx.DiGraph()\n self.subgraph_1 = nx.DiGraph()\n half_group, _ = nx.bipartite.sets(self.graph)\n for gg, aa, bb in itertools.product(half_group, self.subset_a, self.subset_b):\n aa_gg, gg_bb, aa_gg_bb = aa * gg, gg * bb, aa * gg * bb\n face = frozenset([gg, aa_gg, gg_bb, aa_gg_bb])\n self.subgraph_0.add_edge(gg, face, sort=(aa, bb))\n self.subgraph_1.add_edge(aa_gg, face, sort=(~aa, bb))\n\n @classmethod\n def get_min_rank(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> Literal[0, 1, 2]:\n \"\"\"Minimum rank of a Cayley complex built out of the given generating data.\"\"\"\n if not CayleyComplex.satisfies_total_no_conjugacy(group, subset_a, subset_b):\n return 2\n graph_a, graph_b = CayleyComplex.get_cayley_graphs(group, subset_a, subset_b)\n if not nx.is_bipartite(graph_a) or not nx.is_bipartite(graph_b):\n return 1\n return 0\n\n @classmethod\n def satisfies_total_no_conjugacy(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> bool:\n \"\"\"Check the Total No-Conjugacy condition: aa gg != gg bb for all gg, aa, bb.\"\"\"\n return all(\n aa * gg != gg * bb\n for gg, aa, bb in itertools.product(group.generate(), subset_a, subset_b)\n )\n\n @classmethod\n def get_cayley_graphs(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> tuple[nx.Graph, nx.Graph]:\n \"\"\"Cayley graphs for the left- and right-acting subsets.\"\"\"\n edges_a = [(gg, aa * gg) for gg in group.generate() for aa in subset_a]\n edges_b = [(gg, gg * bb) for gg in group.generate() for bb in subset_b]\n return nx.Graph(edges_a), nx.Graph(edges_b)"
},
{
"identifier": "Node",
"path": "qldpc/objects.py",
"snippet": "class Node:\n \"\"\"Node in a Tanner graph.\n\n A node essentially an integer index, together with a boolean flag to distinguish \"data\" node\n from a \"check\" node in an error-correcting code.\n \"\"\"\n\n index: int\n is_data: bool = True\n\n def __hash__(self) -> int:\n return hash((self.index, self.is_data))\n\n def __lt__(self, other: Node) -> bool:\n if self.is_data == other.is_data:\n return self.index < other.index\n return self.is_data # data bits \"precede\" check bits\n\n def __str__(self) -> str:\n tag = \"d\" if self.is_data else \"c\"\n return f\"{tag}_{self.index}\""
},
{
"identifier": "Pauli",
"path": "qldpc/objects.py",
"snippet": "class Pauli(enum.Enum):\n \"\"\"Pauli operators.\"\"\"\n\n I = (0, 0) # noqa: E741\n Z = (0, 1)\n X = (1, 0)\n Y = (1, 1)\n\n def __mul__(self, other: Pauli) -> Pauli:\n \"\"\"Product of two Pauli operators.\"\"\"\n val_x = (self.value[0] + other.value[0]) % 2\n val_z = (self.value[1] + other.value[1]) % 2\n return Pauli((val_x, val_z))\n\n def __invert__(self) -> Pauli:\n \"\"\"Hadamard-transform this Pauli operator.\"\"\"\n return Pauli(self.value[::-1])\n\n def __str__(self) -> str:\n if self == Pauli.I:\n return \"I\"\n elif self == Pauli.Z:\n return \"Z\"\n elif self == Pauli.X:\n return \"X\"\n return \"Y\"\n\n @classmethod\n def from_string(cls, string: str) -> Pauli:\n \"\"\"Build a Pauli operator from a string.\"\"\"\n if string == \"I\":\n return Pauli.I\n elif string == \"Z\":\n return Pauli.Z\n elif string == \"X\":\n return Pauli.X\n elif string == \"Y\":\n return Pauli.Y\n raise ValueError(f\"Invalid Pauli operator: {string}\")\n\n @property\n def index(self) -> int:\n \"\"\"Numerical index for Pauli operators.\"\"\"\n if self == Pauli.X:\n return 0\n if self == Pauli.Z:\n return 1\n raise AttributeError(f\"No index for {self}.\")"
},
{
"identifier": "QuditOperator",
"path": "qldpc/objects.py",
"snippet": "class QuditOperator:\n \"\"\"A qudit operator of the form X(val_x)*Z(val_z).\"\"\"\n\n def __init__(self, value: tuple[int, int] = (0, 0)) -> None:\n self.value = value\n\n def __eq__(self, other: object) -> bool:\n return isinstance(other, QuditOperator) and self.value == other.value\n\n def __invert__(self) -> QuditOperator:\n \"\"\"Fourier-transform this qudit operator.\"\"\"\n return QuditOperator(self.value[::-1])\n\n def __str__(self) -> str:\n val_x, val_z = self.value\n if not val_x and not val_z:\n return \"I\"\n if val_x == val_z:\n return f\"Y({val_z})\"\n ops = []\n if val_x:\n ops.append(f\"X({val_x})\")\n if val_z:\n ops.append(f\"Z({val_z})\")\n return \"*\".join(ops)\n\n @classmethod\n def from_string(cls, string: str) -> QuditOperator:\n \"\"\"Build a qudit operator from its string representation.\"\"\"\n if string == \"I\":\n return QuditOperator((0, 0))\n\n invalid_op = f\"Invalid qudit operator: {string}\"\n\n val_x, val_z = 0, 0\n factors = string.split(\"*\")\n if len(factors) > 2:\n raise ValueError(invalid_op)\n\n for factor in factors:\n pauli = factor[0]\n val_str = factor[2:-1]\n _factor = f\"{pauli}({val_str})\"\n if pauli not in \"XYZ\" or not val_str.isnumeric() or factor != _factor:\n raise ValueError(invalid_op)\n\n val = int(val_str)\n if pauli == \"X\":\n val_x = val\n elif pauli == \"Z\":\n val_z = val\n else: # pauli == \"Y\"\n val_x = val_z = val\n\n return QuditOperator((val_x, val_z))"
}
] | import abc
import functools
import itertools
import cachetools
import galois
import ldpc.mod2
import networkx as nx
import numpy as np
import numpy.typing as npt
import qldpc
from collections.abc import Collection, Iterable, Sequence
from typing import TYPE_CHECKING, Literal
from qldpc import abstract
from qldpc.objects import CayleyComplex, Node, Pauli, QuditOperator
from typing_extensions import Self | 7,713 | Here:
- n is the number of data bits
- k is the number of encoded ("logical") bits
- d is the code distance
"""
return self.num_bits, self.dimension, self.get_distance()
@classmethod
def random(cls, bits: int, checks: int, field: int | None = None) -> ClassicalCode:
"""Construct a random classical code with the given number of bits and nontrivial checks."""
if field is None:
field = DEFAULT_FIELD_ORDER
code_field = galois.GF(field)
rows, cols = checks, bits
matrix = code_field.Random((rows, cols))
for row in range(matrix.shape[0]):
if not matrix[row, :].any():
matrix[row, np.random.randint(cols)] = code_field.Random(low=1) # pragma: no cover
for col in range(matrix.shape[1]):
if not matrix[:, col].any():
matrix[np.random.randint(rows), col] = code_field.Random(low=1) # pragma: no cover
return ClassicalCode(matrix, field)
@classmethod
def repetition(cls, num_bits: int, field: int | None = None) -> ClassicalCode:
"""Construct a repetition code on the given number of bits."""
minus_one = galois.GF(field or DEFAULT_FIELD_ORDER).characteristic - 1
matrix = np.zeros((num_bits - 1, num_bits), dtype=int)
for row in range(num_bits - 1):
matrix[row, row] = 1
matrix[row, row + 1] = minus_one
return ClassicalCode(matrix, field)
@classmethod
def ring(cls, num_bits: int, field: int | None = None) -> ClassicalCode:
"""Construct a repetition code with periodic boundary conditions."""
minus_one = galois.GF(field or DEFAULT_FIELD_ORDER).characteristic - 1
matrix = np.zeros((num_bits, num_bits), dtype=int)
for row in range(num_bits):
matrix[row, row] = 1
matrix[row, (row + 1) % num_bits] = minus_one
return ClassicalCode(matrix, field)
@classmethod
def hamming(cls, rank: int, field: int | None = None) -> ClassicalCode:
"""Construct a hamming code of a given rank."""
field = field or DEFAULT_FIELD_ORDER
if field == 2:
# parity check matrix: columns = all nonzero bitstrings
bitstrings = list(itertools.product([0, 1], repeat=rank))
return ClassicalCode(np.array(bitstrings[1:]).T)
# More generally, columns = maximal set of nonzero, linearly independent strings.
# This is achieved by collecting together all strings whose first nonzero element is a 1.
strings = [
(0,) * top_row + (1,) + rest
for top_row in range(rank - 1, -1, -1)
for rest in itertools.product(range(field), repeat=rank - top_row - 1)
]
return ClassicalCode(np.array(strings).T, field=field)
# TODO: add more codes, particularly from code families that are useful for good quantum codes
# see https://mhostetter.github.io/galois/latest/api/#forward-error-correction
# TODO:
# - add method to convert a parity check matrix into standard form
# - see https://arxiv.org/abs/1101.1519
# - one method to compute "blocks" of standard form, one to return the matrix itself
# - add is_CSS method to figure out whether this is a CSS Code
# - see https://quantumcomputing.stackexchange.com/questions/15432/
# - also compute and store sub-codes, if CSS
# - also add QuditCode.to_CSS() -> CSSCode
class QuditCode(AbstractCode):
"""Quantum stabilizer code for Galois qudits, with dimension q = p^m for prime p and integer m.
The parity check matrix of a QuditCode has dimensions (num_checks, 2 * num_qudits), and can be
written as a block matrix in the form H = [H_x|H_z]. Each block has num_qudits columns.
The entries H_x[c, d] = r_x and H_z[c, d] = r_z iff check c addresses qudit d with the operator
X(r_x) * Z(r_z), where r_x, r_z range over the base field, and X(r), Z(r) are generalized Pauli
operators. Specifically:
- X(r) = sum_{j=0}^{q-1} |j+r><j| is a shift operator, and
- Z(r) = sum_{j=0}^{q-1} w^{j r} |j><j| is a phase operator, with w = exp(2 pi i / q).
Warning: here j, r, s, etc. not integers, but elements of the Galois field GF(q), which has
different rules for addition and multiplication when q is not a prime number.
Helpful lecture by Gottesman: https://www.youtube.com/watch?v=JWg4zrNAF-g
"""
@property
def num_checks(self) -> int:
"""Number of parity checks (stabilizers) in this code."""
return self.matrix.shape[0]
@property
def num_qudits(self) -> int:
"""Number of data qudits in this code."""
return self.matrix.shape[1] // 2
@property
def num_qubits(self) -> int:
"""Number of data qubits in this code."""
self._assert_qubit_code()
return self.num_qudits
def _assert_qubit_code(self) -> None:
if self._field_order != 2:
raise ValueError("Attempted to call a qubit-only method with a non-qubit code.")
@classmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix into a Tanner graph."""
graph = nx.DiGraph()
matrix = np.reshape(matrix, (len(matrix), 2, -1))
for row, col_xz, col in zip(*np.nonzero(matrix)):
node_check = Node(index=int(row), is_data=False)
node_qudit = Node(index=int(col), is_data=True)
graph.add_edge(node_check, node_qudit)
| """Error correction code constructions
Copyright 2023 The qLDPC Authors and Infleqtion Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
if TYPE_CHECKING:
DEFAULT_FIELD_ORDER = abstract.DEFAULT_FIELD_ORDER
################################################################################
# template error correction code classes
class AbstractCode(abc.ABC):
"""Template class for error-correcting codes."""
_field_order: int
def __init__(
self,
matrix: Self | npt.NDArray[np.int_] | Sequence[Sequence[int]],
field: int | None = None,
) -> None:
"""Construct a code from a parity check matrix over a finite field.
The base field is taken to be F_2 by default.
"""
self._matrix: galois.FieldArray
if isinstance(matrix, type(self)):
self._field_order = matrix.field.order
if not (field is None or field == self._field_order):
raise ValueError(
f"Field argument {field} is inconsistent with the given code, which is defined"
f" over F_{self._field_order}"
)
self._matrix = matrix.matrix
elif isinstance(matrix, galois.FieldArray):
self._field_order = type(matrix).order
self._matrix = matrix
else:
self._field_order = field or DEFAULT_FIELD_ORDER
self._matrix = self.field(np.array(matrix))
@property
def field(self) -> type[galois.FieldArray]:
"""Base field over which this code is defined."""
return galois.GF(self._field_order)
@property
def matrix(self) -> galois.FieldArray:
"""Parity check matrix of this code."""
return self._matrix
@functools.cached_property
def graph(self) -> nx.DiGraph:
"""Tanner graph of this code."""
return self.matrix_to_graph(self.matrix)
@classmethod
@abc.abstractmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix into a Tanner graph."""
@classmethod
@abc.abstractmethod
def graph_to_matrix(cls, graph: nx.DiGraph) -> galois.FieldArray:
"""Convert a Tanner graph into a parity check matrix."""
class ClassicalCode(AbstractCode):
"""Classical linear error-correcting code over a finite field F_q.
A classical binary code C = {x} is a set of vectors x (with entries in F_q) called code words.
We consider only linear codes, for which any linear combination of code words is also code word.
Operationally, we define a classical code by a parity check matrix H with dimensions
(num_checks, num_bits). Each row of H represents a linear constraint (a "check") that code
words must satisfy. A vector x is a code word iff H @ x = 0.
"""
def __contains__(self, word: npt.NDArray[np.int_] | Sequence[int]) -> bool:
return not np.any(self.matrix @ self.field(word))
@classmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix H into a Tanner graph.
The Tanner graph is a bipartite graph with (num_checks, num_bits) vertices, respectively
identified with the checks and bits of the code. The check vertex c and the bit vertex b
share an edge iff c addresses b; that is, edge (c, b) is in the graph iff H[c, b] != 0.
"""
graph = nx.DiGraph()
for row, col in zip(*np.nonzero(matrix)):
node_c = Node(index=int(row), is_data=False)
node_d = Node(index=int(col), is_data=True)
graph.add_edge(node_c, node_d, val=matrix[row][col])
if isinstance(matrix, galois.FieldArray):
graph.order = type(matrix).order
return graph
@classmethod
def graph_to_matrix(cls, graph: nx.DiGraph) -> galois.FieldArray:
"""Convert a Tanner graph into a parity check matrix."""
num_bits = sum(1 for node in graph.nodes() if node.is_data)
num_checks = len(graph.nodes()) - num_bits
field = graph.order if hasattr(graph, "order") else DEFAULT_FIELD_ORDER
matrix = galois.GF(field).Zeros((num_checks, num_bits))
for node_c, node_b, data in graph.edges(data=True):
matrix[node_c.index, node_b.index] = data.get("val", 1)
return matrix
@functools.cached_property
def generator(self) -> galois.FieldArray:
"""Generator of this code: a matrix whose rows for a basis for code words."""
return self.matrix.null_space()
def words(self) -> galois.FieldArray:
"""Code words of this code."""
vectors = itertools.product(self.field.elements, repeat=self.generator.shape[0])
return self.field(list(vectors)) @ self.generator
def get_random_word(self) -> galois.FieldArray:
"""Random code word: a sum all generators with random field coefficients."""
return self.field.Random(self.generator.shape[0]) @ self.generator
def dual(self) -> ClassicalCode:
"""Dual to this code.
The dual code ~C is the set of bitstrings orthogonal to C:
~C = { x : x @ y = 0 for all y in C }.
The parity check matrix of ~C is equal to the generator of C.
"""
return ClassicalCode(self.generator, self._field_order)
def __invert__(self) -> ClassicalCode:
return self.dual()
@classmethod
def tensor_product(cls, code_a: ClassicalCode, code_b: ClassicalCode) -> ClassicalCode:
"""Tensor product C_a ⊗ C_b of two codes C_a and C_b.
Let G_a and G_b respectively denote the generators C_a and C_b.
Definition: C_a ⊗ C_b is the code whose generators are G_a ⊗ G_b.
Observation: G_a ⊗ G_b is the check matrix of ~(C_a ⊗ C_b).
We therefore construct ~(C_a ⊗ C_b) and return its dual ~~(C_a ⊗ C_b) = C_a ⊗ C_b.
"""
if not code_a._field_order == code_b._field_order:
raise ValueError("Cannot take tensor product of codes over different fields")
gen_a: npt.NDArray[np.int_] = code_a.generator
gen_b: npt.NDArray[np.int_] = code_b.generator
return ~ClassicalCode(np.kron(gen_a, gen_b))
@property
def num_checks(self) -> int:
"""Number of check bits in this code."""
return self._matrix.shape[0]
@property
def num_bits(self) -> int:
"""Number of data bits in this code."""
return self._matrix.shape[1]
@functools.cached_property
def rank(self) -> int:
"""Rank of this code's parity check matrix.
Equivalently, the number of linearly independent parity checks in this code.
"""
if self._field_order == 2:
return ldpc.mod2.rank(self._matrix)
return np.linalg.matrix_rank(self._matrix)
@property
def dimension(self) -> int:
"""The number of logical bits encoded by this code."""
return self.num_bits - self.rank
@functools.cache
def get_distance(self) -> int:
"""The distance of this code, or equivalently the minimal weight of a nonzero code word."""
words = self.words().view(np.ndarray)
return np.min(np.count_nonzero(words[1:], axis=1))
def get_code_params(self) -> tuple[int, int, int]:
"""Compute the parameters of this code: [n,k,d].
Here:
- n is the number of data bits
- k is the number of encoded ("logical") bits
- d is the code distance
"""
return self.num_bits, self.dimension, self.get_distance()
@classmethod
def random(cls, bits: int, checks: int, field: int | None = None) -> ClassicalCode:
"""Construct a random classical code with the given number of bits and nontrivial checks."""
if field is None:
field = DEFAULT_FIELD_ORDER
code_field = galois.GF(field)
rows, cols = checks, bits
matrix = code_field.Random((rows, cols))
for row in range(matrix.shape[0]):
if not matrix[row, :].any():
matrix[row, np.random.randint(cols)] = code_field.Random(low=1) # pragma: no cover
for col in range(matrix.shape[1]):
if not matrix[:, col].any():
matrix[np.random.randint(rows), col] = code_field.Random(low=1) # pragma: no cover
return ClassicalCode(matrix, field)
@classmethod
def repetition(cls, num_bits: int, field: int | None = None) -> ClassicalCode:
"""Construct a repetition code on the given number of bits."""
minus_one = galois.GF(field or DEFAULT_FIELD_ORDER).characteristic - 1
matrix = np.zeros((num_bits - 1, num_bits), dtype=int)
for row in range(num_bits - 1):
matrix[row, row] = 1
matrix[row, row + 1] = minus_one
return ClassicalCode(matrix, field)
@classmethod
def ring(cls, num_bits: int, field: int | None = None) -> ClassicalCode:
"""Construct a repetition code with periodic boundary conditions."""
minus_one = galois.GF(field or DEFAULT_FIELD_ORDER).characteristic - 1
matrix = np.zeros((num_bits, num_bits), dtype=int)
for row in range(num_bits):
matrix[row, row] = 1
matrix[row, (row + 1) % num_bits] = minus_one
return ClassicalCode(matrix, field)
@classmethod
def hamming(cls, rank: int, field: int | None = None) -> ClassicalCode:
"""Construct a hamming code of a given rank."""
field = field or DEFAULT_FIELD_ORDER
if field == 2:
# parity check matrix: columns = all nonzero bitstrings
bitstrings = list(itertools.product([0, 1], repeat=rank))
return ClassicalCode(np.array(bitstrings[1:]).T)
# More generally, columns = maximal set of nonzero, linearly independent strings.
# This is achieved by collecting together all strings whose first nonzero element is a 1.
strings = [
(0,) * top_row + (1,) + rest
for top_row in range(rank - 1, -1, -1)
for rest in itertools.product(range(field), repeat=rank - top_row - 1)
]
return ClassicalCode(np.array(strings).T, field=field)
# TODO: add more codes, particularly from code families that are useful for good quantum codes
# see https://mhostetter.github.io/galois/latest/api/#forward-error-correction
# TODO:
# - add method to convert a parity check matrix into standard form
# - see https://arxiv.org/abs/1101.1519
# - one method to compute "blocks" of standard form, one to return the matrix itself
# - add is_CSS method to figure out whether this is a CSS Code
# - see https://quantumcomputing.stackexchange.com/questions/15432/
# - also compute and store sub-codes, if CSS
# - also add QuditCode.to_CSS() -> CSSCode
class QuditCode(AbstractCode):
"""Quantum stabilizer code for Galois qudits, with dimension q = p^m for prime p and integer m.
The parity check matrix of a QuditCode has dimensions (num_checks, 2 * num_qudits), and can be
written as a block matrix in the form H = [H_x|H_z]. Each block has num_qudits columns.
The entries H_x[c, d] = r_x and H_z[c, d] = r_z iff check c addresses qudit d with the operator
X(r_x) * Z(r_z), where r_x, r_z range over the base field, and X(r), Z(r) are generalized Pauli
operators. Specifically:
- X(r) = sum_{j=0}^{q-1} |j+r><j| is a shift operator, and
- Z(r) = sum_{j=0}^{q-1} w^{j r} |j><j| is a phase operator, with w = exp(2 pi i / q).
Warning: here j, r, s, etc. not integers, but elements of the Galois field GF(q), which has
different rules for addition and multiplication when q is not a prime number.
Helpful lecture by Gottesman: https://www.youtube.com/watch?v=JWg4zrNAF-g
"""
@property
def num_checks(self) -> int:
"""Number of parity checks (stabilizers) in this code."""
return self.matrix.shape[0]
@property
def num_qudits(self) -> int:
"""Number of data qudits in this code."""
return self.matrix.shape[1] // 2
@property
def num_qubits(self) -> int:
"""Number of data qubits in this code."""
self._assert_qubit_code()
return self.num_qudits
def _assert_qubit_code(self) -> None:
if self._field_order != 2:
raise ValueError("Attempted to call a qubit-only method with a non-qubit code.")
@classmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix into a Tanner graph."""
graph = nx.DiGraph()
matrix = np.reshape(matrix, (len(matrix), 2, -1))
for row, col_xz, col in zip(*np.nonzero(matrix)):
node_check = Node(index=int(row), is_data=False)
node_qudit = Node(index=int(col), is_data=True)
graph.add_edge(node_check, node_qudit)
| qudit_op = graph[node_check][node_qudit].get(QuditOperator, QuditOperator()) | 4 | 2023-12-19 22:29:42+00:00 | 12k |
amazon-science/c2f-seg | src/image_model.py | [
{
"identifier": "VQModel",
"path": "taming_src/taming_models.py",
"snippet": "class VQModel(nn.Module):\n def __init__(self, config):\n super(VQModel, self).__init__()\n self.config = config\n self.iteration = 0\n self.name = config.model_type\n self.m_path = os.path.join(config.path, self.name)\n self.eps = 1e-6\n\n self.ddconfig = config.model['params']['ddconfig']\n n_embed = config.model['params']['n_embed']\n embed_dim = config.model['params']['embed_dim']\n \n self.encoder = Encoder(self.ddconfig).to(config.device)\n self.decoder = Decoder(self.ddconfig).to(config.device)\n self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25).to(config.device).to(config.device)\n self.quant_conv = torch.nn.Conv2d(self.ddconfig[\"z_channels\"], embed_dim, 1).to(config.device)\n # self.quant_proj = torch.nn.Linear(self.ddconfig[\"z_channels\"], embed_dim).to(config.device)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, self.ddconfig[\"z_channels\"], 1).to(config.device)\n # self.pose_quant_proj = torch.nn.Linear(embed_dim, self.ddconfig[\"z_channels\"]).to(config.device)\n\n def encode(self, x, mask=None):\n h = self.encoder(x) # dim=256\n h = self.quant_conv(h) # dim=256\n if mask is not None:\n mask = F.max_pool2d(mask, kernel_size=int(mask.shape[2] / h.shape[2]),\n stride=int(mask.shape[2] / h.shape[2]))\n quant = quant * mask + h * (1 - mask)\n quant, emb_loss, info = self.quantize(h, mask)\n \n return quant, emb_loss, info\n\n def decode(self, quant):\n quant = self.post_quant_conv(quant) # dim: 256\n dec = self.decoder(quant)\n return dec\n\n def decode_code(self, code_b):\n quant_b = self.quantize.embed_code(code_b)\n dec = self.decode(quant_b)\n return dec\n\n def forward(self, x, mask=None):\n quant, diff, _ = self.encode(x, mask) # quant dim: 256\n\n dec = self.decode(quant)\n return dec, diff\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n def restore(self, ckpt_file, g_opt=None, d_opt=None):\n torch_init_model(self, ckpt_file, \"state_dict\")\n saving = torch.load(ckpt_file, map_location='cpu')\n if 'optimizer_states' in saving and g_opt is not None and d_opt is not None:\n opt_state = saving['optimizer_states']\n g_opt.load_state_dict(opt_state[0])\n d_opt.load_state_dict(opt_state[1])\n print(f\"Restored from {ckpt_file}\")\n return g_opt, d_opt\n\n def save(self, prefix=None, g_opt=None, d_opt=None):\n if prefix is not None:\n save_path = self.m_path + \"_{}.pth\".format(prefix)\n else:\n save_path = self.m_path + \".pth\"\n\n print('\\nsaving {} {}...\\n'.format(self.name, prefix))\n all_saving = {'state_dict': self.state_dict(),\n 'optimizer_states': [g_opt.state_dict(), d_opt.state_dict()]}\n torch.save(all_saving, save_path)"
},
{
"identifier": "MaskedTransformer",
"path": "src/image_component.py",
"snippet": "class MaskedTransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n embedding_dim = config.n_embd\n num_embed = config.vocab_size+1\n self.conv_in = torch.nn.Conv2d(2048, embedding_dim//2, 3, padding=1)\n # z_embedding\n self.c_emb = nn.Embedding(num_embed, embedding_dim//4)\n self.z_emb = nn.Embedding(num_embed, embedding_dim//4)\n # posotion embedding\n self.pos_emb = nn.Embedding(config.sequence_length, embedding_dim)\n self.drop = nn.Dropout(config.embd_pdrop)\n # transformer\n self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)])\n # decoder head\n self.dec = Transformer_Prediction(config)\n # z dec and m dec\n self.m_dec = nn.Linear(embedding_dim, num_embed, bias=False)\n # self.m_dec.weight = self.m_emb.weight\n self.m_bias = nn.Parameter(torch.zeros(num_embed))\n\n self.sequence_length = config.sequence_length\n self.apply(self._init_weights)\n self.config = config\n\n def forward(self, img_feat, c_idx, z_idx, mask=None):\n # img_feat: [B, 2048, 16, 16]\n # attn_map: [B, 1, 16, 16]\n i_embeddings = self.conv_in(img_feat) # [B, 768//2-1, 16, 16]\n i_embeddings = i_embeddings.flatten(2).transpose(-2, -1)\n # c and z embedding\n c_embeddings = self.c_emb(c_idx) # [B, 256, D//4]\n z_embeddings = self.z_emb(z_idx) # [B, 256, D//4]\n token_embeddings = torch.cat([i_embeddings, c_embeddings, z_embeddings], dim=2) # [B, 256, D]\n # add positional embeddings\n n_tokens = token_embeddings.shape[1] # 16 * 16\n position_ids = torch.arange(n_tokens, dtype=torch.long, device=z_idx.device)\n position_ids = position_ids.unsqueeze(0).repeat(z_idx.shape[0], 1) # [B, 256, 1]\n position_embeddings = self.pos_emb(position_ids) # [B, 256, D]\n\n x = self.drop(token_embeddings + position_embeddings)\n\n batch_size = token_embeddings.shape[0]\n mask = torch.ones(batch_size, 1, n_tokens, n_tokens).cuda()\n\n for block in self.blocks:\n x = block(x, mask=mask)\n\n x = self.dec(x)\n logits_m = self.m_dec(x) + self.m_bias\n \n return logits_m\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)"
},
{
"identifier": "Resnet_Encoder",
"path": "src/image_component.py",
"snippet": "class Resnet_Encoder(nn.Module):\n def __init__(self):\n super(Resnet_Encoder, self).__init__()\n self.encoder = base_resnet()\n\n def forward(self, img):\n features = self.encoder(img)\n return features"
},
{
"identifier": "Refine_Module",
"path": "src/image_component.py",
"snippet": "class Refine_Module(nn.Module):\n def __init__(self):\n super(Refine_Module, self).__init__()\n dim = 256 + 2\n self.conv_adapter = torch.nn.Conv2d(2048, 2048, 1)\n self.conv_in = torch.nn.Conv2d(2048, 256, 3, padding=1)\n self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)\n self.bn1 = torch.nn.BatchNorm2d(dim)\n\n self.lay2 = torch.nn.Conv2d(dim, 128, 3, padding=1)\n self.bn2 = torch.nn.BatchNorm2d(128)\n\n self.lay3 = torch.nn.Conv2d(128, 64, 3, padding=1)\n self.bn3 = torch.nn.BatchNorm2d(64)\n self.adapter1 = torch.nn.Conv2d(1024, 128, 1)\n\n # visible mask branch\n self.lay4_vm = torch.nn.Conv2d(64, 32, 3, padding=1)\n self.bn4_vm = torch.nn.BatchNorm2d(32)\n self.lay5_vm = torch.nn.Conv2d(32, 16, 3, padding=1)\n self.bn5_vm = torch.nn.BatchNorm2d(16)\n self.adapter2_vm = torch.nn.Conv2d(512, 64, 1)\n self.adapter3_vm = torch.nn.Conv2d(256, 32, 1)\n self.out_lay_vm = torch.nn.Conv2d(16, 1, 3, padding=1)\n\n # amodal mask branch\n self.lay4_am = torch.nn.Conv2d(64, 32, 3, padding=1)\n self.bn4_am = torch.nn.BatchNorm2d(32)\n self.lay5_am = torch.nn.Conv2d(32, 16, 3, padding=1)\n self.bn5_am = torch.nn.BatchNorm2d(16)\n self.adapter2_am = torch.nn.Conv2d(512, 64, 1)\n self.adapter3_am = torch.nn.Conv2d(256, 32, 1)\n self.out_lay_am = torch.nn.Conv2d(16, 1, 3, padding=1)\n \n def get_attn_map(self, feature, guidance):\n b,c,h,w = guidance.shape\n q = torch.flatten(guidance, start_dim=2)\n v = torch.flatten(feature, start_dim=2)\n\n k = v * q\n k = k.sum(dim=-1, keepdim=True) / (q.sum(dim=-1, keepdim=True) + 1e-6)\n attn = (k.transpose(-2, -1) @ v) / 1\n attn = F.softmax(attn, dim=-1)\n attn = attn.reshape(b, c, h, w)\n return attn\n \n def forward(self, features, coarse_mask):\n # features: [B, 2048, 16, 16]\n # attn_map: [B, 1, 16, 16]\n # coarse_mask: [B, 1, 256, 256]\n feat = self.conv_adapter(features[-1])\n coarse_mask = F.interpolate(coarse_mask, scale_factor=(1/16))\n attn_map = self.get_attn_map(feat, coarse_mask)\n x = self.conv_in(feat)\n x = torch.cat((x, attn_map, coarse_mask), dim=1)\n x = F.relu(self.bn1(self.lay1(x)))\n x = F.relu(self.bn2(self.lay2(x)))\n \n cur_feat = self.adapter1(features[-2])\n x = cur_feat + x\n x = F.interpolate(x, size=(32, 32), mode=\"nearest\")\n x = F.relu(self.bn3(self.lay3(x)))\n\n # TODO: visible mask branch\n cur_feat_vm = self.adapter2_vm(features[-3])\n x_vm = cur_feat_vm + x\n x_vm = F.interpolate(x_vm, size=(64, 64), mode=\"nearest\")\n x_vm = F.relu(self.bn4_vm(self.lay4_vm(x_vm)))\n\n cur_feat_vm = self.adapter3_vm(features[-4])\n x_vm = cur_feat_vm + x_vm\n x_vm = F.interpolate(x_vm, size=(128, 128), mode=\"nearest\")\n x_vm = F.relu(self.bn5_vm(self.lay5_vm(x_vm)))\n \n x_vm = self.out_lay_vm(x_vm)\n\n # TODO: full mask branch\n cur_feat_am = self.adapter2_am(features[-3])\n x_am = cur_feat_am + x\n x_am = F.interpolate(x_am, size=(64, 64), mode=\"nearest\")\n x_am = F.relu(self.bn4_am(self.lay4_am(x_am)))\n\n cur_feat_am = self.adapter3_am(features[-4])\n x_am = cur_feat_am + x_am\n x_am = F.interpolate(x_am, size=(128, 128), mode=\"nearest\")\n x_am = F.relu(self.bn5_am(self.lay5_am(x_am)))\n \n x_am = self.out_lay_am(x_am)\n\n return x_vm, x_am"
},
{
"identifier": "VGG19",
"path": "src/loss.py",
"snippet": "class VGG19(torch.nn.Module):\n def __init__(self, pretrained=True, vgg_norm=False):\n super(VGG19, self).__init__()\n self.vgg_norm = vgg_norm\n features = models.vgg19(pretrained=pretrained).features\n self.relu1_1 = torch.nn.Sequential()\n self.relu1_2 = torch.nn.Sequential()\n\n self.relu2_1 = torch.nn.Sequential()\n self.relu2_2 = torch.nn.Sequential()\n\n self.relu3_1 = torch.nn.Sequential()\n self.relu3_2 = torch.nn.Sequential()\n self.relu3_3 = torch.nn.Sequential()\n self.relu3_4 = torch.nn.Sequential()\n\n self.relu4_1 = torch.nn.Sequential()\n self.relu4_2 = torch.nn.Sequential()\n self.relu4_3 = torch.nn.Sequential()\n self.relu4_4 = torch.nn.Sequential()\n\n self.relu5_1 = torch.nn.Sequential()\n self.relu5_2 = torch.nn.Sequential()\n self.relu5_3 = torch.nn.Sequential()\n self.relu5_4 = torch.nn.Sequential()\n\n for x in range(2):\n self.relu1_1.add_module(str(x), features[x])\n\n for x in range(2, 4):\n self.relu1_2.add_module(str(x), features[x])\n\n for x in range(4, 7):\n self.relu2_1.add_module(str(x), features[x])\n\n for x in range(7, 9):\n self.relu2_2.add_module(str(x), features[x])\n\n for x in range(9, 12):\n self.relu3_1.add_module(str(x), features[x])\n\n for x in range(12, 14):\n self.relu3_2.add_module(str(x), features[x])\n\n for x in range(14, 16):\n self.relu3_3.add_module(str(x), features[x])\n\n for x in range(16, 18):\n self.relu3_4.add_module(str(x), features[x])\n\n for x in range(18, 21):\n self.relu4_1.add_module(str(x), features[x])\n\n for x in range(21, 23):\n self.relu4_2.add_module(str(x), features[x])\n\n for x in range(23, 25):\n self.relu4_3.add_module(str(x), features[x])\n\n for x in range(25, 27):\n self.relu4_4.add_module(str(x), features[x])\n\n for x in range(27, 30):\n self.relu5_1.add_module(str(x), features[x])\n\n for x in range(30, 32):\n self.relu5_2.add_module(str(x), features[x])\n\n for x in range(32, 34):\n self.relu5_3.add_module(str(x), features[x])\n\n for x in range(34, 36):\n self.relu5_4.add_module(str(x), features[x])\n\n # don't need the gradients, just want the features\n for param in self.parameters():\n param.requires_grad = False\n\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n\n def forward(self, x):\n if self.vgg_norm:\n x = (x + 1) / 2 # -1~1 --> 0~1\n # 由0~1重新归一化\n mean = torch.as_tensor(self.mean, dtype=x.dtype, device=x.device)\n std = torch.as_tensor(self.std, dtype=x.dtype, device=x.device)\n x.sub_(mean[None,:, None, None]).div_(std[None,:, None, None])\n\n relu1_1 = self.relu1_1(x)\n relu1_2 = self.relu1_2(relu1_1)\n\n relu2_1 = self.relu2_1(relu1_2)\n relu2_2 = self.relu2_2(relu2_1)\n\n relu3_1 = self.relu3_1(relu2_2)\n relu3_2 = self.relu3_2(relu3_1)\n relu3_3 = self.relu3_3(relu3_2)\n relu3_4 = self.relu3_4(relu3_3)\n\n relu4_1 = self.relu4_1(relu3_4)\n relu4_2 = self.relu4_2(relu4_1)\n relu4_3 = self.relu4_3(relu4_2)\n relu4_4 = self.relu4_4(relu4_3)\n\n relu5_1 = self.relu5_1(relu4_4)\n relu5_2 = self.relu5_2(relu5_1)\n relu5_3 = self.relu5_3(relu5_2)\n relu5_4 = self.relu5_4(relu5_3)\n\n out = {\n 'relu1_1': relu1_1,\n 'relu1_2': relu1_2,\n\n 'relu2_1': relu2_1,\n 'relu2_2': relu2_2,\n\n 'relu3_1': relu3_1,\n 'relu3_2': relu3_2,\n 'relu3_3': relu3_3,\n 'relu3_4': relu3_4,\n\n 'relu4_1': relu4_1,\n 'relu4_2': relu4_2,\n 'relu4_3': relu4_3,\n 'relu4_4': relu4_4,\n\n 'relu5_1': relu5_1,\n 'relu5_2': relu5_2,\n 'relu5_3': relu5_3,\n 'relu5_4': relu5_4,\n }\n return out"
},
{
"identifier": "PerceptualLoss",
"path": "src/loss.py",
"snippet": "class PerceptualLoss(nn.Module):\n r\"\"\"\n Perceptual loss, VGG-based\n https://arxiv.org/abs/1603.08155\n https://github.com/dxyang/StyleTransfer/blob/master/utils.py\n \"\"\"\n\n def __init__(self, vgg, weights=[1.0, 1.0, 1.0, 1.0, 1.0], reduction='mean'):\n super(PerceptualLoss, self).__init__()\n # self.add_module('vgg', VGG19())\n self.vgg = vgg\n self.reduction = reduction\n self.criterion = torch.nn.L1Loss(reduction=reduction)\n self.weights = weights\n\n def __call__(self, x, y):\n # Compute features\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n\n if self.reduction == 'mean':\n content_loss = 0.0\n content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])\n content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])\n content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])\n content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])\n content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])\n elif self.reduction == 'none':\n content_loss = []\n content_loss.append(self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1']))\n content_loss.append(self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1']))\n content_loss.append(self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1']))\n content_loss.append(self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1']))\n content_loss.append(self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1']))\n else:\n raise NotImplementedError\n\n return content_loss"
},
{
"identifier": "AdamW",
"path": "utils/pytorch_optimization.py",
"snippet": "class AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\"Adam does not support sparse gradients, please consider SparseAdam instead\")\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n # exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n # exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(grad, alpha = 1.0 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value = 1.0 - beta2)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n step_size = group[\"lr\"]\n if group[\"correct_bias\"]: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state[\"step\"]\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n # p.data.addcdiv_(-step_size, exp_avg, denom)\n p.data.addcdiv_(exp_avg, denom, value = -step_size)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group[\"weight_decay\"] > 0.0:\n p.data.add_(p.data, alpha = -group[\"lr\"] * group[\"weight_decay\"])\n\n return loss"
},
{
"identifier": "get_linear_schedule_with_warmup",
"path": "utils/pytorch_optimization.py",
"snippet": "def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):\n \"\"\" Create a schedule with a learning rate that decreases linearly after\n linearly increasing during a warmup period.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n return max(\n 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))\n )\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)"
},
{
"identifier": "torch_show_all_params",
"path": "utils/utils.py",
"snippet": "def torch_show_all_params(model):\n params = list(model.parameters())\n k = 0\n for i in params:\n l = 1\n for j in i.size():\n l *= j\n k = k + l\n return k"
},
{
"identifier": "torch_init_model",
"path": "utils/utils.py",
"snippet": "def torch_init_model(model, init_checkpoint, key):\n state_dict = torch.load(init_checkpoint, map_location='cpu')[key]\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n\n load(model, prefix='')\n \n print(\"missing keys:{}\".format(missing_keys))\n print('unexpected keys:{}'.format(unexpected_keys))\n print('error msgs:{}'.format(error_msgs))"
},
{
"identifier": "Config",
"path": "utils/utils.py",
"snippet": "class Config(object):\n def __init__(self, config_path):\n with open(config_path, 'r') as f:\n self._yaml = f.read()\n self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader)\n self._dict['path'] = os.path.dirname(config_path)\n\n def __getattr__(self, name):\n if self._dict.get(name) is not None:\n return self._dict[name]\n\n return None\n\n def print(self):\n print('Model configurations:')\n print('---------------------------------')\n print(self._yaml)\n print('')\n print('---------------------------------')\n print('')"
},
{
"identifier": "evaluation_image",
"path": "utils/evaluation.py",
"snippet": "def evaluation_image(frame_pred, frame_label, counts, meta, save_dict=None):\n frame_pred = (frame_pred > 0.5).to(torch.int64)\n frame_label = frame_label.to(torch.int64)\n counts = counts.to(torch.int64)\n vm_no_crop_gt = meta[\"vm_no_crop_gt\"].squeeze().unsqueeze(0).to(torch.int64)\n frame_pred = frame_pred.unsqueeze(0)\n frame_label = frame_label.unsqueeze(0)\n\n iou_ = get_IoU(frame_pred, frame_label)\n invisible_iou_= iou(frame_pred - vm_no_crop_gt, frame_label - vm_no_crop_gt)\n if (frame_label - vm_no_crop_gt).sum()==0:\n counts-=1\n return iou_.sum(), invisible_iou_, counts"
},
{
"identifier": "CrossEntropyLoss",
"path": "utils/loss.py",
"snippet": "class CrossEntropyLoss(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n\n Equation: y = (1 - epsilon) * y + epsilon / K.\n\n Args:\n - num_classes (int): number of classes\n - epsilon (float): weight\n - use_gpu (bool): whether to use gpu devices\n - label_smooth (bool): whether to apply label smoothing, if False, epsilon = 0\n \"\"\"\n def __init__(self, num_classes, epsilon=0.1, device=None, label_smooth=True):\n super(CrossEntropyLoss, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon if label_smooth else 0\n self.device = device\n if device is None:\n self.logsoftmax = nn.LogSoftmax(dim=1)\n else:\n self.logsoftmax = nn.LogSoftmax(dim=1).to(device)\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n - inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n - targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)\n if self.device is not None:\n targets = targets.to(self.device)\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (- targets * log_probs).mean(0).sum()\n return loss"
}
] | import os
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torchvision import transforms
from taming_src.taming_models import VQModel
from src.image_component import MaskedTransformer, Resnet_Encoder, Refine_Module
from src.loss import VGG19, PerceptualLoss
from utils.pytorch_optimization import AdamW, get_linear_schedule_with_warmup
from utils.utils import torch_show_all_params, torch_init_model
from utils.utils import Config
from utils.evaluation import evaluation_image
from utils.loss import CrossEntropyLoss
from tqdm import tqdm | 7,680 |
class C2F_Seg(nn.Module):
def __init__(self, config, g_path, mode, logger=None, save_eval_dict={}):
super(C2F_Seg, self).__init__()
self.config = config
self.iteration = 0
self.sample_iter = 0
self.name = config.model_type
# load g model for mask
self.g_config = Config(os.path.join(g_path, 'vqgan_{}.yml'.format(config.dataset)))
self.g_path = os.path.join(g_path, self.g_config.model_type)
self.root_path = config.path
self.transformer_path = os.path.join(config.path, self.name)
self.mode = mode
self.save_eval_dict = save_eval_dict
self.eps = 1e-6
self.train_sample_iters = config.train_sample_iters
self.g_model = VQModel(self.g_config).to(config.device)
self.img_encoder = Resnet_Encoder().to(config.device)
|
class C2F_Seg(nn.Module):
def __init__(self, config, g_path, mode, logger=None, save_eval_dict={}):
super(C2F_Seg, self).__init__()
self.config = config
self.iteration = 0
self.sample_iter = 0
self.name = config.model_type
# load g model for mask
self.g_config = Config(os.path.join(g_path, 'vqgan_{}.yml'.format(config.dataset)))
self.g_path = os.path.join(g_path, self.g_config.model_type)
self.root_path = config.path
self.transformer_path = os.path.join(config.path, self.name)
self.mode = mode
self.save_eval_dict = save_eval_dict
self.eps = 1e-6
self.train_sample_iters = config.train_sample_iters
self.g_model = VQModel(self.g_config).to(config.device)
self.img_encoder = Resnet_Encoder().to(config.device) | self.refine_module = Refine_Module().to(config.device) | 3 | 2023-12-21 04:25:47+00:00 | 12k |
huahuahuage/Bert-VITS2-Speech | onnx_infer/text/chinese.py | [
{
"identifier": "punctuation",
"path": "onnx_infer/text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "onnx_infer/text/chinese_tone_sandhi.py",
"snippet": "class ToneSandhi:\r\n def __init__(self):\r\n self.must_neural_tone_words = {\r\n \"麻烦\",\r\n \"麻利\",\r\n \"鸳鸯\",\r\n \"高粱\",\r\n \"骨头\",\r\n \"骆驼\",\r\n \"马虎\",\r\n \"首饰\",\r\n \"馒头\",\r\n \"馄饨\",\r\n \"风筝\",\r\n \"难为\",\r\n \"队伍\",\r\n \"阔气\",\r\n \"闺女\",\r\n \"门道\",\r\n \"锄头\",\r\n \"铺盖\",\r\n \"铃铛\",\r\n \"铁匠\",\r\n \"钥匙\",\r\n \"里脊\",\r\n \"里头\",\r\n \"部分\",\r\n \"那么\",\r\n \"道士\",\r\n \"造化\",\r\n \"迷糊\",\r\n \"连累\",\r\n \"这么\",\r\n \"这个\",\r\n \"运气\",\r\n \"过去\",\r\n \"软和\",\r\n \"转悠\",\r\n \"踏实\",\r\n \"跳蚤\",\r\n \"跟头\",\r\n \"趔趄\",\r\n \"财主\",\r\n \"豆腐\",\r\n \"讲究\",\r\n \"记性\",\r\n \"记号\",\r\n \"认识\",\r\n \"规矩\",\r\n \"见识\",\r\n \"裁缝\",\r\n \"补丁\",\r\n \"衣裳\",\r\n \"衣服\",\r\n \"衙门\",\r\n \"街坊\",\r\n \"行李\",\r\n \"行当\",\r\n \"蛤蟆\",\r\n \"蘑菇\",\r\n \"薄荷\",\r\n \"葫芦\",\r\n \"葡萄\",\r\n \"萝卜\",\r\n \"荸荠\",\r\n \"苗条\",\r\n \"苗头\",\r\n \"苍蝇\",\r\n \"芝麻\",\r\n \"舒服\",\r\n \"舒坦\",\r\n \"舌头\",\r\n \"自在\",\r\n \"膏药\",\r\n \"脾气\",\r\n \"脑袋\",\r\n \"脊梁\",\r\n \"能耐\",\r\n \"胳膊\",\r\n \"胭脂\",\r\n \"胡萝\",\r\n \"胡琴\",\r\n \"胡同\",\r\n \"聪明\",\r\n \"耽误\",\r\n \"耽搁\",\r\n \"耷拉\",\r\n \"耳朵\",\r\n \"老爷\",\r\n \"老实\",\r\n \"老婆\",\r\n \"老头\",\r\n \"老太\",\r\n \"翻腾\",\r\n \"罗嗦\",\r\n \"罐头\",\r\n \"编辑\",\r\n \"结实\",\r\n \"红火\",\r\n \"累赘\",\r\n \"糨糊\",\r\n \"糊涂\",\r\n \"精神\",\r\n \"粮食\",\r\n \"簸箕\",\r\n \"篱笆\",\r\n \"算计\",\r\n \"算盘\",\r\n \"答应\",\r\n \"笤帚\",\r\n \"笑语\",\r\n \"笑话\",\r\n \"窟窿\",\r\n \"窝囊\",\r\n \"窗户\",\r\n \"稳当\",\r\n \"稀罕\",\r\n \"称呼\",\r\n \"秧歌\",\r\n \"秀气\",\r\n \"秀才\",\r\n \"福气\",\r\n \"祖宗\",\r\n \"砚台\",\r\n \"码头\",\r\n \"石榴\",\r\n \"石头\",\r\n \"石匠\",\r\n \"知识\",\r\n \"眼睛\",\r\n \"眯缝\",\r\n \"眨巴\",\r\n \"眉毛\",\r\n \"相声\",\r\n \"盘算\",\r\n \"白净\",\r\n \"痢疾\",\r\n \"痛快\",\r\n \"疟疾\",\r\n \"疙瘩\",\r\n \"疏忽\",\r\n \"畜生\",\r\n \"生意\",\r\n \"甘蔗\",\r\n \"琵琶\",\r\n \"琢磨\",\r\n \"琉璃\",\r\n \"玻璃\",\r\n \"玫瑰\",\r\n \"玄乎\",\r\n \"狐狸\",\r\n \"状元\",\r\n \"特务\",\r\n \"牲口\",\r\n \"牙碜\",\r\n \"牌楼\",\r\n \"爽快\",\r\n \"爱人\",\r\n \"热闹\",\r\n \"烧饼\",\r\n \"烟筒\",\r\n \"烂糊\",\r\n \"点心\",\r\n \"炊帚\",\r\n \"灯笼\",\r\n \"火候\",\r\n \"漂亮\",\r\n \"滑溜\",\r\n \"溜达\",\r\n \"温和\",\r\n \"清楚\",\r\n \"消息\",\r\n \"浪头\",\r\n \"活泼\",\r\n \"比方\",\r\n \"正经\",\r\n \"欺负\",\r\n \"模糊\",\r\n \"槟榔\",\r\n \"棺材\",\r\n \"棒槌\",\r\n \"棉花\",\r\n \"核桃\",\r\n \"栅栏\",\r\n \"柴火\",\r\n \"架势\",\r\n \"枕头\",\r\n \"枇杷\",\r\n \"机灵\",\r\n \"本事\",\r\n \"木头\",\r\n \"木匠\",\r\n \"朋友\",\r\n \"月饼\",\r\n \"月亮\",\r\n \"暖和\",\r\n \"明白\",\r\n \"时候\",\r\n \"新鲜\",\r\n \"故事\",\r\n \"收拾\",\r\n \"收成\",\r\n \"提防\",\r\n \"挖苦\",\r\n \"挑剔\",\r\n \"指甲\",\r\n \"指头\",\r\n \"拾掇\",\r\n \"拳头\",\r\n \"拨弄\",\r\n \"招牌\",\r\n \"招呼\",\r\n \"抬举\",\r\n \"护士\",\r\n \"折腾\",\r\n \"扫帚\",\r\n \"打量\",\r\n \"打算\",\r\n \"打点\",\r\n \"打扮\",\r\n \"打听\",\r\n \"打发\",\r\n \"扎实\",\r\n \"扁担\",\r\n \"戒指\",\r\n \"懒得\",\r\n \"意识\",\r\n \"意思\",\r\n \"情形\",\r\n \"悟性\",\r\n \"怪物\",\r\n \"思量\",\r\n \"怎么\",\r\n \"念头\",\r\n \"念叨\",\r\n \"快活\",\r\n \"忙活\",\r\n \"志气\",\r\n \"心思\",\r\n \"得罪\",\r\n \"张罗\",\r\n \"弟兄\",\r\n \"开通\",\r\n \"应酬\",\r\n \"庄稼\",\r\n \"干事\",\r\n \"帮手\",\r\n \"帐篷\",\r\n \"希罕\",\r\n \"师父\",\r\n \"师傅\",\r\n \"巴结\",\r\n \"巴掌\",\r\n \"差事\",\r\n \"工夫\",\r\n \"岁数\",\r\n \"屁股\",\r\n \"尾巴\",\r\n \"少爷\",\r\n \"小气\",\r\n \"小伙\",\r\n \"将就\",\r\n \"对头\",\r\n \"对付\",\r\n \"寡妇\",\r\n \"家伙\",\r\n \"客气\",\r\n \"实在\",\r\n \"官司\",\r\n \"学问\",\r\n \"学生\",\r\n \"字号\",\r\n \"嫁妆\",\r\n \"媳妇\",\r\n \"媒人\",\r\n \"婆家\",\r\n \"娘家\",\r\n \"委屈\",\r\n \"姑娘\",\r\n \"姐夫\",\r\n \"妯娌\",\r\n \"妥当\",\r\n \"妖精\",\r\n \"奴才\",\r\n \"女婿\",\r\n \"头发\",\r\n \"太阳\",\r\n \"大爷\",\r\n \"大方\",\r\n \"大意\",\r\n \"大夫\",\r\n \"多少\",\r\n \"多么\",\r\n \"外甥\",\r\n \"壮实\",\r\n \"地道\",\r\n \"地方\",\r\n \"在乎\",\r\n \"困难\",\r\n \"嘴巴\",\r\n \"嘱咐\",\r\n \"嘟囔\",\r\n \"嘀咕\",\r\n \"喜欢\",\r\n \"喇嘛\",\r\n \"喇叭\",\r\n \"商量\",\r\n \"唾沫\",\r\n \"哑巴\",\r\n \"哈欠\",\r\n \"哆嗦\",\r\n \"咳嗽\",\r\n \"和尚\",\r\n \"告诉\",\r\n \"告示\",\r\n \"含糊\",\r\n \"吓唬\",\r\n \"后头\",\r\n \"名字\",\r\n \"名堂\",\r\n \"合同\",\r\n \"吆喝\",\r\n \"叫唤\",\r\n \"口袋\",\r\n \"厚道\",\r\n \"厉害\",\r\n \"千斤\",\r\n \"包袱\",\r\n \"包涵\",\r\n \"匀称\",\r\n \"勤快\",\r\n \"动静\",\r\n \"动弹\",\r\n \"功夫\",\r\n \"力气\",\r\n \"前头\",\r\n \"刺猬\",\r\n \"刺激\",\r\n \"别扭\",\r\n \"利落\",\r\n \"利索\",\r\n \"利害\",\r\n \"分析\",\r\n \"出息\",\r\n \"凑合\",\r\n \"凉快\",\r\n \"冷战\",\r\n \"冤枉\",\r\n \"冒失\",\r\n \"养活\",\r\n \"关系\",\r\n \"先生\",\r\n \"兄弟\",\r\n \"便宜\",\r\n \"使唤\",\r\n \"佩服\",\r\n \"作坊\",\r\n \"体面\",\r\n \"位置\",\r\n \"似的\",\r\n \"伙计\",\r\n \"休息\",\r\n \"什么\",\r\n \"人家\",\r\n \"亲戚\",\r\n \"亲家\",\r\n \"交情\",\r\n \"云彩\",\r\n \"事情\",\r\n \"买卖\",\r\n \"主意\",\r\n \"丫头\",\r\n \"丧气\",\r\n \"两口\",\r\n \"东西\",\r\n \"东家\",\r\n \"世故\",\r\n \"不由\",\r\n \"不在\",\r\n \"下水\",\r\n \"下巴\",\r\n \"上头\",\r\n \"上司\",\r\n \"丈夫\",\r\n \"丈人\",\r\n \"一辈\",\r\n \"那个\",\r\n \"菩萨\",\r\n \"父亲\",\r\n \"母亲\",\r\n \"咕噜\",\r\n \"邋遢\",\r\n \"费用\",\r\n \"冤家\",\r\n \"甜头\",\r\n \"介绍\",\r\n \"荒唐\",\r\n \"大人\",\r\n \"泥鳅\",\r\n \"幸福\",\r\n \"熟悉\",\r\n \"计划\",\r\n \"扑腾\",\r\n \"蜡烛\",\r\n \"姥爷\",\r\n \"照顾\",\r\n \"喉咙\",\r\n \"吉他\",\r\n \"弄堂\",\r\n \"蚂蚱\",\r\n \"凤凰\",\r\n \"拖沓\",\r\n \"寒碜\",\r\n \"糟蹋\",\r\n \"倒腾\",\r\n \"报复\",\r\n \"逻辑\",\r\n \"盘缠\",\r\n \"喽啰\",\r\n \"牢骚\",\r\n \"咖喱\",\r\n \"扫把\",\r\n \"惦记\",\r\n }\r\n self.must_not_neural_tone_words = {\r\n \"男子\",\r\n \"女子\",\r\n \"分子\",\r\n \"原子\",\r\n \"量子\",\r\n \"莲子\",\r\n \"石子\",\r\n \"瓜子\",\r\n \"电子\",\r\n \"人人\",\r\n \"虎虎\",\r\n }\r\n self.punc = \":,;。?!“”‘’':,;.?!\"\r\n\r\n # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041\r\n # e.g.\r\n # word: \"家里\"\r\n # pos: \"s\"\r\n # finals: ['ia1', 'i3']\r\n def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:\r\n # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺\r\n for j, item in enumerate(word):\r\n if (\r\n j - 1 >= 0\r\n and item == word[j - 1]\r\n and pos[0] in {\"n\", \"v\", \"a\"}\r\n and word not in self.must_not_neural_tone_words\r\n ):\r\n finals[j] = finals[j][:-1] + \"5\"\r\n ge_idx = word.find(\"个\")\r\n if len(word) >= 1 and word[-1] in \"吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶\":\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n elif len(word) >= 1 and word[-1] in \"的地得\":\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n # e.g. 走了, 看着, 去过\r\n # elif len(word) == 1 and word in \"了着过\" and pos in {\"ul\", \"uz\", \"ug\"}:\r\n # finals[-1] = finals[-1][:-1] + \"5\"\r\n elif (\r\n len(word) > 1\r\n and word[-1] in \"们子\"\r\n and pos in {\"r\", \"n\"}\r\n and word not in self.must_not_neural_tone_words\r\n ):\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n # e.g. 桌上, 地下, 家里\r\n elif len(word) > 1 and word[-1] in \"上下里\" and pos in {\"s\", \"l\", \"f\"}:\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n # e.g. 上来, 下去\r\n elif len(word) > 1 and word[-1] in \"来去\" and word[-2] in \"上下进出回过起开\":\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n # 个做量词\r\n elif (\r\n ge_idx >= 1\r\n and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in \"几有两半多各整每做是\")\r\n ) or word == \"个\":\r\n finals[ge_idx] = finals[ge_idx][:-1] + \"5\"\r\n else:\r\n if (\r\n word in self.must_neural_tone_words\r\n or word[-2:] in self.must_neural_tone_words\r\n ):\r\n finals[-1] = finals[-1][:-1] + \"5\"\r\n\r\n word_list = self._split_word(word)\r\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\r\n for i, word in enumerate(word_list):\r\n # conventional neural in Chinese\r\n if (\r\n word in self.must_neural_tone_words\r\n or word[-2:] in self.must_neural_tone_words\r\n ):\r\n finals_list[i][-1] = finals_list[i][-1][:-1] + \"5\"\r\n finals = sum(finals_list, [])\r\n return finals\r\n\r\n def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:\r\n # e.g. 看不懂\r\n if len(word) == 3 and word[1] == \"不\":\r\n finals[1] = finals[1][:-1] + \"5\"\r\n else:\r\n for i, char in enumerate(word):\r\n # \"不\" before tone4 should be bu2, e.g. 不怕\r\n if char == \"不\" and i + 1 < len(word) and finals[i + 1][-1] == \"4\":\r\n finals[i] = finals[i][:-1] + \"2\"\r\n return finals\r\n\r\n def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:\r\n # \"一\" in number sequences, e.g. 一零零, 二一零\r\n if word.find(\"一\") != -1 and all(\r\n [item.isnumeric() for item in word if item != \"一\"]\r\n ):\r\n return finals\r\n # \"一\" between reduplication words should be yi5, e.g. 看一看\r\n elif len(word) == 3 and word[1] == \"一\" and word[0] == word[-1]:\r\n finals[1] = finals[1][:-1] + \"5\"\r\n # when \"一\" is ordinal word, it should be yi1\r\n elif word.startswith(\"第一\"):\r\n finals[1] = finals[1][:-1] + \"1\"\r\n else:\r\n for i, char in enumerate(word):\r\n if char == \"一\" and i + 1 < len(word):\r\n # \"一\" before tone4 should be yi2, e.g. 一段\r\n if finals[i + 1][-1] == \"4\":\r\n finals[i] = finals[i][:-1] + \"2\"\r\n # \"一\" before non-tone4 should be yi4, e.g. 一天\r\n else:\r\n # \"一\" 后面如果是标点,还读一声\r\n if word[i + 1] not in self.punc:\r\n finals[i] = finals[i][:-1] + \"4\"\r\n return finals\r\n\r\n def _split_word(self, word: str) -> List[str]:\r\n word_list = jieba.cut_for_search(word)\r\n word_list = sorted(word_list, key=lambda i: len(i), reverse=False)\r\n first_subword = word_list[0]\r\n first_begin_idx = word.find(first_subword)\r\n if first_begin_idx == 0:\r\n second_subword = word[len(first_subword) :]\r\n new_word_list = [first_subword, second_subword]\r\n else:\r\n second_subword = word[: -len(first_subword)]\r\n new_word_list = [second_subword, first_subword]\r\n return new_word_list\r\n\r\n def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:\r\n if len(word) == 2 and self._all_tone_three(finals):\r\n finals[0] = finals[0][:-1] + \"2\"\r\n elif len(word) == 3:\r\n word_list = self._split_word(word)\r\n if self._all_tone_three(finals):\r\n # disyllabic + monosyllabic, e.g. 蒙古/包\r\n if len(word_list[0]) == 2:\r\n finals[0] = finals[0][:-1] + \"2\"\r\n finals[1] = finals[1][:-1] + \"2\"\r\n # monosyllabic + disyllabic, e.g. 纸/老虎\r\n elif len(word_list[0]) == 1:\r\n finals[1] = finals[1][:-1] + \"2\"\r\n else:\r\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\r\n if len(finals_list) == 2:\r\n for i, sub in enumerate(finals_list):\r\n # e.g. 所有/人\r\n if self._all_tone_three(sub) and len(sub) == 2:\r\n finals_list[i][0] = finals_list[i][0][:-1] + \"2\"\r\n # e.g. 好/喜欢\r\n elif (\r\n i == 1\r\n and not self._all_tone_three(sub)\r\n and finals_list[i][0][-1] == \"3\"\r\n and finals_list[0][-1][-1] == \"3\"\r\n ):\r\n finals_list[0][-1] = finals_list[0][-1][:-1] + \"2\"\r\n finals = sum(finals_list, [])\r\n # split idiom into two words who's length is 2\r\n elif len(word) == 4:\r\n finals_list = [finals[:2], finals[2:]]\r\n finals = []\r\n for sub in finals_list:\r\n if self._all_tone_three(sub):\r\n sub[0] = sub[0][:-1] + \"2\"\r\n finals += sub\r\n\r\n return finals\r\n\r\n def _all_tone_three(self, finals: List[str]) -> bool:\r\n return all(x[-1] == \"3\" for x in finals)\r\n\r\n # merge \"不\" and the word behind it\r\n # if don't merge, \"不\" sometimes appears alone according to jieba, which may occur sandhi error\r\n def __merge_bu(self, seg_list: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 合并'不'字,在jieba中'不'字单独出现可能会引起错误\r\n \"\"\"\r\n last_words = \"\"\r\n new_seg_list = []\r\n # 在分词列表中查找单独出现的'不'字\r\n for words, speech_part in seg_list:\r\n if last_words == \"不\" and words != \"不\":\r\n words = last_words + words\r\n if words != \"不\":\r\n new_seg_list.append((words, speech_part))\r\n last_words = words\r\n if last_words == \"不\":\r\n new_seg_list.append((last_words, \"d\"))\r\n return new_seg_list\r\n\r\n # function 1: merge \"一\" and reduplication words in it's left and right, e.g. \"听\",\"一\",\"听\" ->\"听一听\"\r\n # function 2: merge single \"一\" and the word behind it\r\n # if don't merge, \"一\" sometimes appears alone according to jieba, which may occur sandhi error\r\n # e.g.\r\n # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]\r\n # output seg: [['听一听', 'v']]\r\n def __merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 合并'一'字,在jieba中'不'字单独出现可能会引起错误\r\n \"\"\"\r\n new_seg = []\r\n # function 1\r\n for i, (word, pos) in enumerate(seg):\r\n if (\r\n i - 1 >= 0\r\n and word == \"一\"\r\n and i + 1 < len(seg)\r\n and seg[i - 1][0] == seg[i + 1][0]\r\n and seg[i - 1][1] == \"v\"\r\n ):\r\n new_seg[i - 1][0] = new_seg[i - 1][0] + \"一\" + new_seg[i - 1][0]\r\n else:\r\n if (\r\n i - 2 >= 0\r\n and seg[i - 1][0] == \"一\"\r\n and seg[i - 2][0] == word\r\n and pos == \"v\"\r\n ):\r\n continue\r\n else:\r\n new_seg.append([word, pos])\r\n seg = new_seg\r\n new_seg = []\r\n # function 2\r\n for i, (word, pos) in enumerate(seg):\r\n if new_seg and new_seg[-1][0] == \"一\":\r\n new_seg[-1][0] = new_seg[-1][0] + word\r\n else:\r\n new_seg.append([word, pos])\r\n return new_seg\r\n\r\n # the first and the second words are all_tone_three\r\n def __merge_continuous_three_tones(\r\n self, seg: List[Tuple[str, str]]\r\n ) -> List[Tuple[str, str]]:\r\n new_seg = []\r\n sub_finals_list = [\r\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\r\n for (word, pos) in seg\r\n ]\r\n assert len(sub_finals_list) == len(seg)\r\n merge_last = [False] * len(seg)\r\n for i, (word, pos) in enumerate(seg):\r\n if (\r\n i - 1 >= 0\r\n and self._all_tone_three(sub_finals_list[i - 1])\r\n and self._all_tone_three(sub_finals_list[i])\r\n and not merge_last[i - 1]\r\n ):\r\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\r\n if (\r\n not self._is_reduplication(seg[i - 1][0])\r\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\r\n ):\r\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\r\n merge_last[i] = True\r\n else:\r\n new_seg.append([word, pos])\r\n else:\r\n new_seg.append([word, pos])\r\n\r\n return new_seg\r\n\r\n def _is_reduplication(self, word: str) -> bool:\r\n return len(word) == 2 and word[0] == word[1]\r\n\r\n # the last char of first word and the first char of second word is tone_three\r\n def __merge_continuous_three_tones_2(\r\n self, seg: List[Tuple[str, str]]\r\n ) -> List[Tuple[str, str]]:\r\n new_seg = []\r\n sub_finals_list = [\r\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\r\n for (word, pos) in seg\r\n ]\r\n assert len(sub_finals_list) == len(seg)\r\n merge_last = [False] * len(seg)\r\n for i, (word, pos) in enumerate(seg):\r\n if (\r\n i - 1 >= 0\r\n and sub_finals_list[i - 1][-1][-1] == \"3\"\r\n and sub_finals_list[i][0][-1] == \"3\"\r\n and not merge_last[i - 1]\r\n ):\r\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\r\n if (\r\n not self._is_reduplication(seg[i - 1][0])\r\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\r\n ):\r\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\r\n merge_last[i] = True\r\n else:\r\n new_seg.append([word, pos])\r\n else:\r\n new_seg.append([word, pos])\r\n return new_seg\r\n\r\n def __merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 合并'儿'话\r\n \"\"\"\r\n new_seg = []\r\n for i, (word, pos) in enumerate(seg):\r\n if i - 1 >= 0 and word == \"儿\" and seg[i - 1][0] != \"#\":\r\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\r\n else:\r\n new_seg.append([word, pos])\r\n return new_seg\r\n\r\n def __merge_reduplication(\r\n self, seg: List[Tuple[str, str]]\r\n ) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 合并单独的叠词\r\n \"\"\"\r\n new_seg = []\r\n for i, (word, pos) in enumerate(seg):\r\n if new_seg and word == new_seg[-1][0]:\r\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\r\n else:\r\n new_seg.append([word, pos])\r\n return new_seg\r\n\r\n def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\r\n \"\"\"\r\n 自定义jieba分词合并处理\r\n\r\n 输入: jieba分词结果列表\r\n \"\"\"\r\n seg = self.__merge_bu(seg)\r\n try:\r\n seg = self.__merge_yi(seg)\r\n except:\r\n log_instance.warning(\"jieba中文分词:合并'一'字失败\")\r\n # print(\"jieba中文分词:合并'一'字失败\")\r\n log_instance.debug(\"jieba中文分词:合并相同的字词\")\r\n seg = self.__merge_reduplication(seg)\r\n log_instance.debug(seg)\r\n seg = self.__merge_continuous_three_tones(seg)\r\n seg = self.__merge_continuous_three_tones_2(seg)\r\n seg = self.__merge_er(seg)\r\n return seg\r\n\r\n def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:\r\n finals = self._bu_sandhi(word, finals)\r\n finals = self._yi_sandhi(word, finals)\r\n finals = self._neural_sandhi(word, pos, finals)\r\n finals = self._three_sandhi(word, finals)\r\n return finals\r"
},
{
"identifier": "log_instance",
"path": "log.py",
"snippet": "DISABLED_LOGGER = [\"gradio.processing_utils\", \"gradio\", \"httpx\"]\r"
}
] | import os
import re
import cn2an
import jieba.posseg as psg
from typing import List, Dict
from pypinyin import lazy_pinyin, Style
from .symbols import punctuation
from .chinese_tone_sandhi import ToneSandhi
from log import log_instance
| 9,001 | f = open("onnx/Text/opencpop-strict.txt", "r")
for line in f.readlines():
self.pinyin_to_symbol_map[line.split("\t")[0]] = line.strip().split("\t")[1]
f.close()
@staticmethod
def __get_initials_finals(word):
initials = []
finals = []
orig_initials = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.INITIALS
)
orig_finals = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.FINALS_TONE3
)
for c, v in zip(orig_initials, orig_finals):
initials.append(c)
finals.append(v)
return initials, finals
def g2p(self, segments_list: List[str]):
phones_list = []
tones_list = []
word2ph = []
for seg in segments_list:
seg_cut = psg.lcut(seg)
initials = []
finals = []
seg_cut = self.tone_modifier.pre_merge_for_modify(seg_cut)
for word, pos in seg_cut:
if pos == "eng":
continue
sub_initials, sub_finals = self.__get_initials_finals(word)
sub_finals = self.tone_modifier.modified_tone(word, pos, sub_finals)
initials.append(sub_initials)
finals.append(sub_finals)
# assert len(sub_initials) == len(sub_finals) == len(word)
initials = sum(initials, [])
finals = sum(finals, [])
#
for c, v in zip(initials, finals):
raw_pinyin = c + v
# NOTE: post process for pypinyin outputs
# we discriminate i, ii and iii
if c == v:
assert c in punctuation
phone = [c]
tone = "0"
word2ph.append(1)
else:
v_without_tone = v[:-1]
tone = v[-1]
pinyin = c + v_without_tone
assert tone in "12345"
if c:
# 多音节
v_rep_map = {
"uei": "ui",
"iou": "iu",
"uen": "un",
}
if v_without_tone in v_rep_map.keys():
pinyin = c + v_rep_map[v_without_tone]
else:
# 单音节
pinyin_rep_map = {
"ing": "ying",
"i": "yi",
"in": "yin",
"u": "wu",
}
if pinyin in pinyin_rep_map.keys():
pinyin = pinyin_rep_map[pinyin]
else:
single_rep_map = {
"v": "yu",
"e": "e",
"i": "y",
"u": "w",
}
if pinyin[0] in single_rep_map.keys():
pinyin = single_rep_map[pinyin[0]] + pinyin[1:]
assert pinyin in self.pinyin_to_symbol_map.keys(), (
pinyin,
seg,
raw_pinyin,
)
phone = self.pinyin_to_symbol_map[pinyin].split(" ")
word2ph.append(len(phone))
phones_list += phone
tones_list += [int(tone)] * len(phone)
return phones_list, tones_list, word2ph
chinese_g2p_instance = ChineseG2P()
def g2p(text: str):
"""
将文本转换成音节
"""
# 将文本按照标点符号切分成列表
pattern = r"(?<=[{0}])\s*".format("".join(punctuation))
sentences = [i for i in re.split(pattern, text) if i.strip() != ""]
# 根据切分后的列表,返回文本对应发音列表
# phone:拼音的声母、韵母
# tone:声调 1 2 3 4 5
# word2ph:如果只有韵母,返回1,如果有声母韵母,返回2
phones_list, tones_list, word2ph_list = chinese_g2p_instance.g2p(sentences)
if sum(word2ph_list) != len(phones_list):
raise ValueError("中文转拼音失败:音节总数(sum(word2ph_list))与音节的个数(len(phones_list))不匹配。")
if len(word2ph_list) != len(text): # Sometimes it will crash,you can add a try-catch.
raise ValueError("中文转拼音失败:拼音结果个数(len(word2ph_list))与文本长度(len(text))不匹配。")
phones_list = ["_"] + phones_list + ["_"]
|
REP_MAP = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
'"': "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
class ChineseG2P:
def __init__(self) -> None:
self.tone_modifier = ToneSandhi()
self.pinyin_to_symbol_map: Dict[str, str] = {}
self.__read_opencpop_symbol_map()
def __read_opencpop_symbol_map(self):
"""
取读opencpop数据
"""
f = open("onnx/Text/opencpop-strict.txt", "r")
for line in f.readlines():
self.pinyin_to_symbol_map[line.split("\t")[0]] = line.strip().split("\t")[1]
f.close()
@staticmethod
def __get_initials_finals(word):
initials = []
finals = []
orig_initials = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.INITIALS
)
orig_finals = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.FINALS_TONE3
)
for c, v in zip(orig_initials, orig_finals):
initials.append(c)
finals.append(v)
return initials, finals
def g2p(self, segments_list: List[str]):
phones_list = []
tones_list = []
word2ph = []
for seg in segments_list:
seg_cut = psg.lcut(seg)
initials = []
finals = []
seg_cut = self.tone_modifier.pre_merge_for_modify(seg_cut)
for word, pos in seg_cut:
if pos == "eng":
continue
sub_initials, sub_finals = self.__get_initials_finals(word)
sub_finals = self.tone_modifier.modified_tone(word, pos, sub_finals)
initials.append(sub_initials)
finals.append(sub_finals)
# assert len(sub_initials) == len(sub_finals) == len(word)
initials = sum(initials, [])
finals = sum(finals, [])
#
for c, v in zip(initials, finals):
raw_pinyin = c + v
# NOTE: post process for pypinyin outputs
# we discriminate i, ii and iii
if c == v:
assert c in punctuation
phone = [c]
tone = "0"
word2ph.append(1)
else:
v_without_tone = v[:-1]
tone = v[-1]
pinyin = c + v_without_tone
assert tone in "12345"
if c:
# 多音节
v_rep_map = {
"uei": "ui",
"iou": "iu",
"uen": "un",
}
if v_without_tone in v_rep_map.keys():
pinyin = c + v_rep_map[v_without_tone]
else:
# 单音节
pinyin_rep_map = {
"ing": "ying",
"i": "yi",
"in": "yin",
"u": "wu",
}
if pinyin in pinyin_rep_map.keys():
pinyin = pinyin_rep_map[pinyin]
else:
single_rep_map = {
"v": "yu",
"e": "e",
"i": "y",
"u": "w",
}
if pinyin[0] in single_rep_map.keys():
pinyin = single_rep_map[pinyin[0]] + pinyin[1:]
assert pinyin in self.pinyin_to_symbol_map.keys(), (
pinyin,
seg,
raw_pinyin,
)
phone = self.pinyin_to_symbol_map[pinyin].split(" ")
word2ph.append(len(phone))
phones_list += phone
tones_list += [int(tone)] * len(phone)
return phones_list, tones_list, word2ph
chinese_g2p_instance = ChineseG2P()
def g2p(text: str):
"""
将文本转换成音节
"""
# 将文本按照标点符号切分成列表
pattern = r"(?<=[{0}])\s*".format("".join(punctuation))
sentences = [i for i in re.split(pattern, text) if i.strip() != ""]
# 根据切分后的列表,返回文本对应发音列表
# phone:拼音的声母、韵母
# tone:声调 1 2 3 4 5
# word2ph:如果只有韵母,返回1,如果有声母韵母,返回2
phones_list, tones_list, word2ph_list = chinese_g2p_instance.g2p(sentences)
if sum(word2ph_list) != len(phones_list):
raise ValueError("中文转拼音失败:音节总数(sum(word2ph_list))与音节的个数(len(phones_list))不匹配。")
if len(word2ph_list) != len(text): # Sometimes it will crash,you can add a try-catch.
raise ValueError("中文转拼音失败:拼音结果个数(len(word2ph_list))与文本长度(len(text))不匹配。")
phones_list = ["_"] + phones_list + ["_"]
| log_instance.debug(f"phones {str(phones_list)}")
| 2 | 2023-12-21 13:50:50+00:00 | 12k |
lipku/metahuman-stream | main.py | [
{
"identifier": "NeRFDataset",
"path": "nerf_triplane/provider.py",
"snippet": "class NeRFDataset:\n def __init__(self, opt, device, type='train', downscale=1):\n super().__init__()\n \n self.opt = opt\n self.device = device\n self.type = type # train, val, test\n self.downscale = downscale\n self.root_path = opt.path\n self.preload = opt.preload # 0 = disk, 1 = cpu, 2 = gpu\n self.scale = opt.scale # camera radius scale to make sure camera are inside the bounding box.\n self.offset = opt.offset # camera offset\n self.bound = opt.bound # bounding box half length, also used as the radius to random sample poses.\n self.fp16 = opt.fp16\n\n self.start_index = opt.data_range[0]\n self.end_index = opt.data_range[1]\n\n self.training = self.type in ['train', 'all', 'trainval']\n self.num_rays = self.opt.num_rays if self.training else -1\n\n # load nerf-compatible format data.\n \n with open(opt.pose, 'r') as f:\n transform = json.load(f)\n\n # load image size\n if 'h' in transform and 'w' in transform:\n self.H = int(transform['h']) // downscale\n self.W = int(transform['w']) // downscale\n else:\n self.H = int(transform['cy']) * 2 // downscale\n self.W = int(transform['cx']) * 2 // downscale\n \n # read images\n frames = transform[\"frames\"]\n\n # use a slice of the dataset\n if self.end_index == -1: # abuse...\n self.end_index = len(frames)\n\n frames = frames[self.start_index:self.end_index]\n print(f'[INFO] load {len(frames)} {type} frames.')\n\n # only load pre-calculated aud features when not live-streaming\n if not self.opt.asr:\n\n # empty means the default self-driven extracted features.\n if self.opt.aud == '':\n if 'esperanto' in self.opt.asr_model:\n aud_features = np.load(os.path.join(self.root_path, 'aud_eo.npy'))\n elif 'deepspeech' in self.opt.asr_model:\n aud_features = np.load(os.path.join(self.root_path, 'aud_ds.npy'))\n else:\n aud_features = np.load(os.path.join(self.root_path, 'aud.npy'))\n # cross-driven extracted features. \n else:\n aud_features = np.load(self.opt.aud)\n\n aud_features = torch.from_numpy(aud_features)\n\n # support both [N, 16] labels and [N, 16, K] logits\n if len(aud_features.shape) == 3:\n aud_features = aud_features.float().permute(0, 2, 1) # [N, 16, 29] --> [N, 29, 16] \n\n if self.opt.emb:\n print(f'[INFO] argmax to aud features {aud_features.shape} for --emb mode')\n aud_features = aud_features.argmax(1) # [N, 16]\n \n else:\n assert self.opt.emb, \"aud only provide labels, must use --emb\"\n aud_features = aud_features.long()\n\n print(f'[INFO] load {self.opt.aud} aud_features: {aud_features.shape}')\n\n # load action units\n import pandas as pd\n au_blink_info=pd.read_csv(os.path.join(self.root_path, 'au.csv'))\n au_blink = au_blink_info[' AU45_r'].values\n\n self.torso_img = []\n self.images = []\n\n self.poses = []\n self.exps = []\n\n self.auds = []\n self.face_rect = []\n self.lhalf_rect = []\n self.lips_rect = []\n self.eye_area = []\n self.eye_rect = []\n\n for f in tqdm.tqdm(frames, desc=f'Loading {type} data'):\n\n f_path = os.path.join(self.root_path, 'gt_imgs', str(f['img_id']) + '.jpg')\n\n if not os.path.exists(f_path):\n print('[WARN]', f_path, 'NOT FOUND!')\n continue\n \n pose = np.array(f['transform_matrix'], dtype=np.float32) # [4, 4]\n pose = nerf_matrix_to_ngp(pose, scale=self.scale, offset=self.offset)\n self.poses.append(pose)\n\n if self.preload > 0:\n image = cv2.imread(f_path, cv2.IMREAD_UNCHANGED) # [H, W, 3] o [H, W, 4]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.images.append(image)\n else:\n self.images.append(f_path)\n\n # load frame-wise bg\n \n torso_img_path = os.path.join(self.root_path, 'torso_imgs', str(f['img_id']) + '.png')\n\n if self.preload > 0:\n torso_img = cv2.imread(torso_img_path, cv2.IMREAD_UNCHANGED) # [H, W, 4]\n torso_img = cv2.cvtColor(torso_img, cv2.COLOR_BGRA2RGBA)\n torso_img = torso_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.torso_img.append(torso_img)\n else:\n self.torso_img.append(torso_img_path)\n\n # find the corresponding audio to the image frame\n if not self.opt.asr and self.opt.aud == '':\n aud = aud_features[min(f['aud_id'], aud_features.shape[0] - 1)] # careful for the last frame...\n self.auds.append(aud)\n\n # load lms and extract face\n lms = np.loadtxt(os.path.join(self.root_path, 'ori_imgs', str(f['img_id']) + '.lms')) # [68, 2]\n\n lh_xmin, lh_xmax = int(lms[31:36, 1].min()), int(lms[:, 1].max()) # actually lower half area\n xmin, xmax = int(lms[:, 1].min()), int(lms[:, 1].max())\n ymin, ymax = int(lms[:, 0].min()), int(lms[:, 0].max())\n self.face_rect.append([xmin, xmax, ymin, ymax])\n self.lhalf_rect.append([lh_xmin, lh_xmax, ymin, ymax])\n\n if self.opt.exp_eye:\n # eyes_left = slice(36, 42)\n # eyes_right = slice(42, 48)\n\n # area_left = polygon_area(lms[eyes_left, 0], lms[eyes_left, 1])\n # area_right = polygon_area(lms[eyes_right, 0], lms[eyes_right, 1])\n\n # # area percentage of two eyes of the whole image...\n # area = (area_left + area_right) / (self.H * self.W) * 100\n\n # action units blink AU45\n area = au_blink[f['img_id']]\n area = np.clip(area, 0, 2) / 2\n # area = area + np.random.rand() / 10\n self.eye_area.append(area)\n\n xmin, xmax = int(lms[36:48, 1].min()), int(lms[36:48, 1].max())\n ymin, ymax = int(lms[36:48, 0].min()), int(lms[36:48, 0].max())\n self.eye_rect.append([xmin, xmax, ymin, ymax])\n\n if self.opt.finetune_lips:\n lips = slice(48, 60)\n xmin, xmax = int(lms[lips, 1].min()), int(lms[lips, 1].max())\n ymin, ymax = int(lms[lips, 0].min()), int(lms[lips, 0].max())\n\n # padding to H == W\n cx = (xmin + xmax) // 2\n cy = (ymin + ymax) // 2\n\n l = max(xmax - xmin, ymax - ymin) // 2\n xmin = max(0, cx - l)\n xmax = min(self.H, cx + l)\n ymin = max(0, cy - l)\n ymax = min(self.W, cy + l)\n\n self.lips_rect.append([xmin, xmax, ymin, ymax])\n \n # load pre-extracted background image (should be the same size as training image...)\n\n if self.opt.bg_img == 'white': # special\n bg_img = np.ones((self.H, self.W, 3), dtype=np.float32)\n elif self.opt.bg_img == 'black': # special\n bg_img = np.zeros((self.H, self.W, 3), dtype=np.float32)\n else: # load from file\n # default bg\n if self.opt.bg_img == '':\n self.opt.bg_img = os.path.join(self.root_path, 'bc.jpg')\n bg_img = cv2.imread(self.opt.bg_img, cv2.IMREAD_UNCHANGED) # [H, W, 3]\n if bg_img.shape[0] != self.H or bg_img.shape[1] != self.W:\n bg_img = cv2.resize(bg_img, (self.W, self.H), interpolation=cv2.INTER_AREA)\n bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2RGB)\n bg_img = bg_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.bg_img = bg_img\n\n self.poses = np.stack(self.poses, axis=0)\n\n # smooth camera path...\n if self.opt.smooth_path:\n self.poses = smooth_camera_path(self.poses, self.opt.smooth_path_window)\n \n self.poses = torch.from_numpy(self.poses) # [N, 4, 4]\n\n if self.preload > 0:\n self.images = torch.from_numpy(np.stack(self.images, axis=0)) # [N, H, W, C]\n self.torso_img = torch.from_numpy(np.stack(self.torso_img, axis=0)) # [N, H, W, C]\n else:\n self.images = np.array(self.images)\n self.torso_img = np.array(self.torso_img)\n\n if self.opt.asr:\n # live streaming, no pre-calculated auds\n self.auds = None\n else:\n # auds corresponding to images\n if self.opt.aud == '':\n self.auds = torch.stack(self.auds, dim=0) # [N, 32, 16]\n # auds is novel, may have a different length with images\n else:\n self.auds = aud_features\n \n self.bg_img = torch.from_numpy(self.bg_img)\n\n if self.opt.exp_eye:\n self.eye_area = np.array(self.eye_area, dtype=np.float32) # [N]\n print(f'[INFO] eye_area: {self.eye_area.min()} - {self.eye_area.max()}')\n\n if self.opt.smooth_eye:\n\n # naive 5 window average\n ori_eye = self.eye_area.copy()\n for i in range(ori_eye.shape[0]):\n start = max(0, i - 1)\n end = min(ori_eye.shape[0], i + 2)\n self.eye_area[i] = ori_eye[start:end].mean()\n\n self.eye_area = torch.from_numpy(self.eye_area).view(-1, 1) # [N, 1]\n\n \n # calculate mean radius of all camera poses\n self.radius = self.poses[:, :3, 3].norm(dim=-1).mean(0).item()\n #print(f'[INFO] dataset camera poses: radius = {self.radius:.4f}, bound = {self.bound}')\n\n \n # [debug] uncomment to view all training poses.\n # visualize_poses(self.poses.numpy())\n\n # [debug] uncomment to view examples of randomly generated poses.\n # visualize_poses(rand_poses(100, self.device, radius=self.radius).cpu().numpy())\n\n if self.preload > 1:\n self.poses = self.poses.to(self.device)\n\n if self.auds is not None:\n self.auds = self.auds.to(self.device)\n\n self.bg_img = self.bg_img.to(torch.half).to(self.device)\n\n self.torso_img = self.torso_img.to(torch.half).to(self.device)\n self.images = self.images.to(torch.half).to(self.device)\n \n if self.opt.exp_eye:\n self.eye_area = self.eye_area.to(self.device)\n\n # load intrinsics\n if 'focal_len' in transform:\n fl_x = fl_y = transform['focal_len']\n elif 'fl_x' in transform or 'fl_y' in transform:\n fl_x = (transform['fl_x'] if 'fl_x' in transform else transform['fl_y']) / downscale\n fl_y = (transform['fl_y'] if 'fl_y' in transform else transform['fl_x']) / downscale\n elif 'camera_angle_x' in transform or 'camera_angle_y' in transform:\n # blender, assert in radians. already downscaled since we use H/W\n fl_x = self.W / (2 * np.tan(transform['camera_angle_x'] / 2)) if 'camera_angle_x' in transform else None\n fl_y = self.H / (2 * np.tan(transform['camera_angle_y'] / 2)) if 'camera_angle_y' in transform else None\n if fl_x is None: fl_x = fl_y\n if fl_y is None: fl_y = fl_x\n else:\n raise RuntimeError('Failed to load focal length, please check the transforms.json!')\n\n cx = (transform['cx'] / downscale) if 'cx' in transform else (self.W / 2)\n cy = (transform['cy'] / downscale) if 'cy' in transform else (self.H / 2)\n \n self.intrinsics = np.array([fl_x, fl_y, cx, cy])\n\n # directly build the coordinate meshgrid in [-1, 1]^2\n self.bg_coords = get_bg_coords(self.H, self.W, self.device) # [1, H*W, 2] in [-1, 1]\n\n\n def mirror_index(self, index):\n size = self.poses.shape[0]\n turn = index // size\n res = index % size\n if turn % 2 == 0:\n return res\n else:\n return size - res - 1\n\n\n def collate(self, index):\n\n B = len(index) # a list of length 1\n # assert B == 1\n\n results = {}\n\n # audio use the original index\n if self.auds is not None:\n auds = get_audio_features(self.auds, self.opt.att, index[0]).to(self.device)\n results['auds'] = auds\n\n # head pose and bg image may mirror (replay --> <-- --> <--).\n index[0] = self.mirror_index(index[0])\n\n poses = self.poses[index].to(self.device) # [B, 4, 4]\n \n if self.training and self.opt.finetune_lips:\n rect = self.lips_rect[index[0]]\n results['rect'] = rect\n rays = get_rays(poses, self.intrinsics, self.H, self.W, -1, rect=rect)\n else:\n rays = get_rays(poses, self.intrinsics, self.H, self.W, self.num_rays, self.opt.patch_size)\n\n results['index'] = index # for ind. code\n results['H'] = self.H\n results['W'] = self.W\n results['rays_o'] = rays['rays_o']\n results['rays_d'] = rays['rays_d']\n\n # get a mask for rays inside rect_face\n if self.training:\n xmin, xmax, ymin, ymax = self.face_rect[index[0]]\n face_mask = (rays['j'] >= xmin) & (rays['j'] < xmax) & (rays['i'] >= ymin) & (rays['i'] < ymax) # [B, N]\n results['face_mask'] = face_mask\n \n xmin, xmax, ymin, ymax = self.lhalf_rect[index[0]]\n lhalf_mask = (rays['j'] >= xmin) & (rays['j'] < xmax) & (rays['i'] >= ymin) & (rays['i'] < ymax) # [B, N]\n results['lhalf_mask'] = lhalf_mask\n\n if self.opt.exp_eye:\n results['eye'] = self.eye_area[index].to(self.device) # [1]\n if self.training:\n results['eye'] += (np.random.rand()-0.5) / 10\n xmin, xmax, ymin, ymax = self.eye_rect[index[0]]\n eye_mask = (rays['j'] >= xmin) & (rays['j'] < xmax) & (rays['i'] >= ymin) & (rays['i'] < ymax) # [B, N]\n results['eye_mask'] = eye_mask\n\n else:\n results['eye'] = None\n\n # load bg\n bg_torso_img = self.torso_img[index]\n if self.preload == 0: # on the fly loading\n bg_torso_img = cv2.imread(bg_torso_img[0], cv2.IMREAD_UNCHANGED) # [H, W, 4]\n bg_torso_img = cv2.cvtColor(bg_torso_img, cv2.COLOR_BGRA2RGBA)\n bg_torso_img = bg_torso_img.astype(np.float32) / 255 # [H, W, 3/4]\n bg_torso_img = torch.from_numpy(bg_torso_img).unsqueeze(0)\n bg_torso_img = bg_torso_img[..., :3] * bg_torso_img[..., 3:] + self.bg_img * (1 - bg_torso_img[..., 3:])\n bg_torso_img = bg_torso_img.view(B, -1, 3).to(self.device)\n\n if not self.opt.torso:\n bg_img = bg_torso_img\n else:\n bg_img = self.bg_img.view(1, -1, 3).repeat(B, 1, 1).to(self.device)\n\n if self.training:\n bg_img = torch.gather(bg_img, 1, torch.stack(3 * [rays['inds']], -1)) # [B, N, 3]\n\n results['bg_color'] = bg_img\n\n if self.opt.torso and self.training:\n bg_torso_img = torch.gather(bg_torso_img, 1, torch.stack(3 * [rays['inds']], -1)) # [B, N, 3]\n results['bg_torso_color'] = bg_torso_img\n\n images = self.images[index] # [B, H, W, 3/4]\n if self.preload == 0:\n images = cv2.imread(images[0], cv2.IMREAD_UNCHANGED) # [H, W, 3]\n images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB)\n images = images.astype(np.float32) / 255 # [H, W, 3]\n images = torch.from_numpy(images).unsqueeze(0)\n images = images.to(self.device)\n\n if self.training:\n C = images.shape[-1]\n images = torch.gather(images.view(B, -1, C), 1, torch.stack(C * [rays['inds']], -1)) # [B, N, 3/4]\n \n results['images'] = images\n\n if self.training:\n bg_coords = torch.gather(self.bg_coords, 1, torch.stack(2 * [rays['inds']], -1)) # [1, N, 2]\n else:\n bg_coords = self.bg_coords # [1, N, 2]\n\n results['bg_coords'] = bg_coords\n\n # results['poses'] = convert_poses(poses) # [B, 6]\n # results['poses_matrix'] = poses # [B, 4, 4]\n results['poses'] = poses # [B, 4, 4]\n \n return results\n\n def dataloader(self):\n\n if self.training:\n # training len(poses) == len(auds)\n size = self.poses.shape[0]\n else:\n # test with novel auds, then use its length\n if self.auds is not None:\n size = self.auds.shape[0]\n # live stream test, use 2 * len(poses), so it naturally mirrors.\n else:\n size = 2 * self.poses.shape[0]\n\n loader = DataLoader(list(range(size)), batch_size=1, collate_fn=self.collate, shuffle=self.training, num_workers=0)\n loader._data = self # an ugly fix... we need poses in trainer.\n\n # do evaluate if has gt images and use self-driven setting\n loader.has_gt = (self.opt.aud == '')\n\n return loader "
},
{
"identifier": "NeRFNetwork",
"path": "nerf_triplane/network.py",
"snippet": "class NeRFNetwork(NeRFRenderer):\n def __init__(self,\n opt,\n # torso net (hard coded for now)\n ):\n super().__init__(opt)\n\n # audio embedding\n self.emb = self.opt.emb\n\n if 'esperanto' in self.opt.asr_model:\n self.audio_in_dim = 44\n elif 'deepspeech' in self.opt.asr_model:\n self.audio_in_dim = 29\n else:\n self.audio_in_dim = 32\n \n if self.emb:\n self.embedding = nn.Embedding(self.audio_in_dim, self.audio_in_dim)\n\n # audio network\n audio_dim = 32\n self.audio_dim = audio_dim\n self.audio_net = AudioNet(self.audio_in_dim, self.audio_dim)\n\n self.att = self.opt.att\n if self.att > 0:\n self.audio_att_net = AudioAttNet(self.audio_dim)\n\n # DYNAMIC PART\n self.num_levels = 12\n self.level_dim = 1\n self.encoder_xy, self.in_dim_xy = get_encoder('hashgrid', input_dim=2, num_levels=self.num_levels, level_dim=self.level_dim, base_resolution=64, log2_hashmap_size=14, desired_resolution=512 * self.bound)\n self.encoder_yz, self.in_dim_yz = get_encoder('hashgrid', input_dim=2, num_levels=self.num_levels, level_dim=self.level_dim, base_resolution=64, log2_hashmap_size=14, desired_resolution=512 * self.bound)\n self.encoder_xz, self.in_dim_xz = get_encoder('hashgrid', input_dim=2, num_levels=self.num_levels, level_dim=self.level_dim, base_resolution=64, log2_hashmap_size=14, desired_resolution=512 * self.bound)\n\n self.in_dim = self.in_dim_xy + self.in_dim_yz + self.in_dim_xz\n\n ## sigma network\n self.num_layers = 3\n self.hidden_dim = 64\n self.geo_feat_dim = 64\n self.eye_att_net = MLP(self.in_dim, 1, 16, 2)\n self.eye_dim = 1 if self.exp_eye else 0\n self.sigma_net = MLP(self.in_dim + self.audio_dim + self.eye_dim, 1 + self.geo_feat_dim, self.hidden_dim, self.num_layers)\n ## color network\n self.num_layers_color = 2\n self.hidden_dim_color = 64\n self.encoder_dir, self.in_dim_dir = get_encoder('spherical_harmonics')\n self.color_net = MLP(self.in_dim_dir + self.geo_feat_dim + self.individual_dim, 3, self.hidden_dim_color, self.num_layers_color)\n # 处理音频的\n self.unc_net = MLP(self.in_dim, 1, 32, 2)\n\n self.aud_ch_att_net = MLP(self.in_dim, self.audio_dim, 64, 2)\n\n self.testing = False\n\n if self.torso:\n # torso deform network\n self.register_parameter('anchor_points', \n nn.Parameter(torch.tensor([[0.01, 0.01, 0.1, 1], [-0.1, -0.1, 0.1, 1], [0.1, -0.1, 0.1, 1]])))\n self.torso_deform_encoder, self.torso_deform_in_dim = get_encoder('frequency', input_dim=2, multires=8)\n # self.torso_deform_encoder, self.torso_deform_in_dim = get_encoder('tiledgrid', input_dim=2, num_levels=16, level_dim=1, base_resolution=16, log2_hashmap_size=16, desired_resolution=512)\n self.anchor_encoder, self.anchor_in_dim = get_encoder('frequency', input_dim=6, multires=3)\n self.torso_deform_net = MLP(self.torso_deform_in_dim + self.anchor_in_dim + self.individual_dim_torso, 2, 32, 3)\n\n # torso color network\n self.torso_encoder, self.torso_in_dim = get_encoder('tiledgrid', input_dim=2, num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=16, desired_resolution=2048)\n self.torso_net = MLP(self.torso_in_dim + self.torso_deform_in_dim + self.anchor_in_dim + self.individual_dim_torso, 4, 32, 3)\n\n\n def forward_torso(self, x, poses, c=None):\n # x: [N, 2] in [-1, 1]\n # head poses: [1, 4, 4]\n # c: [1, ind_dim], individual code\n\n # test: shrink x\n x = x * self.opt.torso_shrink\n # 对pose进行了调整\n # deformation-based\n wrapped_anchor = self.anchor_points[None, ...] @ poses.permute(0, 2, 1).inverse()\n wrapped_anchor = (wrapped_anchor[:, :, :2] / wrapped_anchor[:, :, 3, None] / wrapped_anchor[:, :, 2, None]).view(1, -1)\n # print(wrapped_anchor)\n # enc_pose = self.pose_encoder(poses)\n enc_anchor = self.anchor_encoder(wrapped_anchor)\n enc_x = self.torso_deform_encoder(x)\n\n if c is not None:\n h = torch.cat([enc_x, enc_anchor.repeat(x.shape[0], 1), c.repeat(x.shape[0], 1)], dim=-1)\n else:\n h = torch.cat([enc_x, enc_anchor.repeat(x.shape[0], 1)], dim=-1)\n\n dx = self.torso_deform_net(h)\n \n x = (x + dx).clamp(-1, 1)\n\n x = self.torso_encoder(x, bound=1)\n\n # h = torch.cat([x, h, enc_a.repeat(x.shape[0], 1)], dim=-1)\n h = torch.cat([x, h], dim=-1)\n\n h = self.torso_net(h)\n\n alpha = torch.sigmoid(h[..., :1])*(1 + 2*0.001) - 0.001\n color = torch.sigmoid(h[..., 1:])*(1 + 2*0.001) - 0.001\n\n return alpha, color, dx\n\n\n @staticmethod\n @torch.jit.script\n def split_xyz(x):\n xy, yz, xz = x[:, :-1], x[:, 1:], torch.cat([x[:,:1], x[:,-1:]], dim=-1)\n return xy, yz, xz\n\n\n def encode_x(self, xyz, bound):\n # x: [N, 3], in [-bound, bound]\n N, M = xyz.shape\n xy, yz, xz = self.split_xyz(xyz)\n feat_xy = self.encoder_xy(xy, bound=bound)\n feat_yz = self.encoder_yz(yz, bound=bound)\n feat_xz = self.encoder_xz(xz, bound=bound)\n \n return torch.cat([feat_xy, feat_yz, feat_xz], dim=-1)\n \n\n def encode_audio(self, a):\n # a: [1, 29, 16] or [8, 29, 16], audio features from deepspeech\n # if emb, a should be: [1, 16] or [8, 16]\n\n # fix audio traininig\n if a is None: return None\n\n if self.emb:\n a = self.embedding(a).transpose(-1, -2).contiguous() # [1/8, 29, 16]\n\n enc_a = self.audio_net(a) # [1/8, 64]\n\n if self.att > 0:\n enc_a = self.audio_att_net(enc_a.unsqueeze(0)) # [1, 64]\n \n return enc_a\n\n \n def predict_uncertainty(self, unc_inp):\n if self.testing or not self.opt.unc_loss:\n unc = torch.zeros_like(unc_inp)\n else:\n unc = self.unc_net(unc_inp.detach())\n\n return unc\n\n\n def forward(self, x, d, enc_a, c, e=None):\n # x: [N, 3], in [-bound, bound]\n # d: [N, 3], nomalized in [-1, 1]\n # enc_a: [1, aud_dim]\n # c: [1, ind_dim], individual code\n # e: [1, 1], eye feature\n enc_x = self.encode_x(x, bound=self.bound)\n\n sigma_result = self.density(x, enc_a, e, enc_x)\n sigma = sigma_result['sigma']\n geo_feat = sigma_result['geo_feat']\n aud_ch_att = sigma_result['ambient_aud']\n eye_att = sigma_result['ambient_eye']\n\n # color\n enc_d = self.encoder_dir(d)\n\n if c is not None:\n h = torch.cat([enc_d, geo_feat, c.repeat(x.shape[0], 1)], dim=-1)\n else:\n h = torch.cat([enc_d, geo_feat], dim=-1)\n \n h_color = self.color_net(h)\n color = torch.sigmoid(h_color)*(1 + 2*0.001) - 0.001\n \n uncertainty = self.predict_uncertainty(enc_x)\n uncertainty = torch.log(1 + torch.exp(uncertainty))\n\n return sigma, color, aud_ch_att, eye_att, uncertainty[..., None]\n\n\n def density(self, x, enc_a, e=None, enc_x=None):\n # x: [N, 3], in [-bound, bound]\n if enc_x is None:\n enc_x = self.encode_x(x, bound=self.bound)\n\n enc_a = enc_a.repeat(enc_x.shape[0], 1)\n aud_ch_att = self.aud_ch_att_net(enc_x)\n enc_w = enc_a * aud_ch_att\n\n if e is not None:\n # e = self.encoder_eye(e)\n eye_att = torch.sigmoid(self.eye_att_net(enc_x))\n e = e * eye_att\n # e = e.repeat(enc_x.shape[0], 1)\n h = torch.cat([enc_x, enc_w, e], dim=-1)\n else:\n h = torch.cat([enc_x, enc_w], dim=-1)\n\n h = self.sigma_net(h)\n\n sigma = torch.exp(h[..., 0])\n geo_feat = h[..., 1:]\n\n return {\n 'sigma': sigma,\n 'geo_feat': geo_feat,\n 'ambient_aud' : aud_ch_att.norm(dim=-1, keepdim=True),\n 'ambient_eye' : eye_att,\n }\n\n\n # optimizer utils\n def get_params(self, lr, lr_net, wd=0):\n\n # ONLY train torso\n if self.torso:\n params = [\n {'params': self.torso_encoder.parameters(), 'lr': lr},\n {'params': self.torso_deform_encoder.parameters(), 'lr': lr, 'weight_decay': wd},\n {'params': self.torso_net.parameters(), 'lr': lr_net, 'weight_decay': wd},\n {'params': self.torso_deform_net.parameters(), 'lr': lr_net, 'weight_decay': wd},\n {'params': self.anchor_points, 'lr': lr_net, 'weight_decay': wd}\n ]\n\n if self.individual_dim_torso > 0:\n params.append({'params': self.individual_codes_torso, 'lr': lr_net, 'weight_decay': wd})\n\n return params\n\n params = [\n {'params': self.audio_net.parameters(), 'lr': lr_net, 'weight_decay': wd}, \n\n {'params': self.encoder_xy.parameters(), 'lr': lr},\n {'params': self.encoder_yz.parameters(), 'lr': lr},\n {'params': self.encoder_xz.parameters(), 'lr': lr},\n # {'params': self.encoder_xyz.parameters(), 'lr': lr},\n\n {'params': self.sigma_net.parameters(), 'lr': lr_net, 'weight_decay': wd},\n {'params': self.color_net.parameters(), 'lr': lr_net, 'weight_decay': wd}, \n ]\n if self.att > 0:\n params.append({'params': self.audio_att_net.parameters(), 'lr': lr_net * 5, 'weight_decay': 0.0001})\n if self.emb:\n params.append({'params': self.embedding.parameters(), 'lr': lr})\n if self.individual_dim > 0:\n params.append({'params': self.individual_codes, 'lr': lr_net, 'weight_decay': wd})\n if self.train_camera:\n params.append({'params': self.camera_dT, 'lr': 1e-5, 'weight_decay': 0})\n params.append({'params': self.camera_dR, 'lr': 1e-5, 'weight_decay': 0})\n\n params.append({'params': self.aud_ch_att_net.parameters(), 'lr': lr_net, 'weight_decay': wd})\n params.append({'params': self.unc_net.parameters(), 'lr': lr_net, 'weight_decay': wd})\n params.append({'params': self.eye_att_net.parameters(), 'lr': lr_net, 'weight_decay': wd})\n\n return params"
}
] | import torch
import argparse
from nerf_triplane.provider import NeRFDataset
from nerf_triplane.utils import *
from nerf_triplane.network import NeRFNetwork
from nerf_triplane.gui import NeRFGUI | 10,710 | parser.add_argument('--lr', type=float, default=1e-2, help="initial learning rate")
parser.add_argument('--lr_net', type=float, default=1e-3, help="initial learning rate")
parser.add_argument('--ckpt', type=str, default='latest')
parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step")
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)")
### loss set
parser.add_argument('--warmup_step', type=int, default=10000, help="warm up steps")
parser.add_argument('--amb_aud_loss', type=int, default=1, help="use ambient aud loss")
parser.add_argument('--amb_eye_loss', type=int, default=1, help="use ambient eye loss")
parser.add_argument('--unc_loss', type=int, default=1, help="use uncertainty loss")
parser.add_argument('--lambda_amb', type=float, default=1e-4, help="lambda for ambient loss")
### network backbone options
parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training")
parser.add_argument('--bg_img', type=str, default='', help="background image")
parser.add_argument('--fbg', action='store_true', help="frame-wise bg")
parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes")
parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye")
parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence")
parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform")
### dataset options
parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)")
parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.")
# (the default value is for the fox dataset)
parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.")
parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3")
parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location")
parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera")
parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)")
parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)")
parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable")
parser.add_argument('--init_lips', action='store_true', help="init lips region")
parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region")
parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...")
parser.add_argument('--torso', action='store_true', help="fix head and train torso")
parser.add_argument('--head_ckpt', type=str, default='', help="head model")
### GUI options
parser.add_argument('--gui', action='store_true', help="start a GUI")
parser.add_argument('--W', type=int, default=450, help="GUI width")
parser.add_argument('--H', type=int, default=450, help="GUI height")
parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center")
parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy")
parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel")
### else
parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)")
parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)")
parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits")
parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off")
parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size")
parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off")
parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension")
parser.add_argument('--part', action='store_true', help="use partial training data (1/10)")
parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)")
parser.add_argument('--train_camera', action='store_true', help="optimize camera pose")
parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size")
parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size")
# asr
parser.add_argument('--asr', action='store_true', help="load asr for real-time app")
parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input")
parser.add_argument('--asr_play', action='store_true', help="play out the audio")
parser.add_argument('--asr_model', type=str, default='deepspeech')
# parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto')
# parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self')
parser.add_argument('--asr_save_feats', action='store_true')
# audio FPS
parser.add_argument('--fps', type=int, default=50)
# sliding window left-middle-right length (unit: 20ms)
parser.add_argument('-l', type=int, default=10)
parser.add_argument('-m', type=int, default=50)
parser.add_argument('-r', type=int, default=10)
opt = parser.parse_args()
if opt.O:
opt.fp16 = True
opt.exp_eye = True
if opt.test and False:
opt.smooth_path = True
opt.smooth_eye = True
opt.smooth_lips = True
opt.cuda_ray = True
# assert opt.cuda_ray, "Only support CUDA ray mode."
if opt.patch_size > 1:
# assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss."
assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays."
# if opt.finetune_lips:
# # do not update density grid in finetune stage
# opt.update_extra_interval = 1e9
print(opt)
seed_everything(opt.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
# torch.autograd.set_detect_anomaly(True)
# Close tf32 features. Fix low numerical accuracy on rtx30xx gpu.
try:
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
except AttributeError as e:
print('Info. This pytorch version is not support with tf32.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('path', type=str)
parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye")
parser.add_argument('--test', action='store_true', help="test mode (load model and test dataset)")
parser.add_argument('--test_train', action='store_true', help="test mode (load model and train dataset)")
parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use")
parser.add_argument('--workspace', type=str, default='workspace')
parser.add_argument('--seed', type=int, default=0)
### training options
parser.add_argument('--iters', type=int, default=200000, help="training iters")
parser.add_argument('--lr', type=float, default=1e-2, help="initial learning rate")
parser.add_argument('--lr_net', type=float, default=1e-3, help="initial learning rate")
parser.add_argument('--ckpt', type=str, default='latest')
parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step")
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)")
### loss set
parser.add_argument('--warmup_step', type=int, default=10000, help="warm up steps")
parser.add_argument('--amb_aud_loss', type=int, default=1, help="use ambient aud loss")
parser.add_argument('--amb_eye_loss', type=int, default=1, help="use ambient eye loss")
parser.add_argument('--unc_loss', type=int, default=1, help="use uncertainty loss")
parser.add_argument('--lambda_amb', type=float, default=1e-4, help="lambda for ambient loss")
### network backbone options
parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training")
parser.add_argument('--bg_img', type=str, default='', help="background image")
parser.add_argument('--fbg', action='store_true', help="frame-wise bg")
parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes")
parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye")
parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence")
parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform")
### dataset options
parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)")
parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.")
# (the default value is for the fox dataset)
parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.")
parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3")
parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location")
parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera")
parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)")
parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)")
parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable")
parser.add_argument('--init_lips', action='store_true', help="init lips region")
parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region")
parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...")
parser.add_argument('--torso', action='store_true', help="fix head and train torso")
parser.add_argument('--head_ckpt', type=str, default='', help="head model")
### GUI options
parser.add_argument('--gui', action='store_true', help="start a GUI")
parser.add_argument('--W', type=int, default=450, help="GUI width")
parser.add_argument('--H', type=int, default=450, help="GUI height")
parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center")
parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy")
parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel")
### else
parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)")
parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)")
parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits")
parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off")
parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size")
parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off")
parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension")
parser.add_argument('--part', action='store_true', help="use partial training data (1/10)")
parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)")
parser.add_argument('--train_camera', action='store_true', help="optimize camera pose")
parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size")
parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size")
# asr
parser.add_argument('--asr', action='store_true', help="load asr for real-time app")
parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input")
parser.add_argument('--asr_play', action='store_true', help="play out the audio")
parser.add_argument('--asr_model', type=str, default='deepspeech')
# parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto')
# parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self')
parser.add_argument('--asr_save_feats', action='store_true')
# audio FPS
parser.add_argument('--fps', type=int, default=50)
# sliding window left-middle-right length (unit: 20ms)
parser.add_argument('-l', type=int, default=10)
parser.add_argument('-m', type=int, default=50)
parser.add_argument('-r', type=int, default=10)
opt = parser.parse_args()
if opt.O:
opt.fp16 = True
opt.exp_eye = True
if opt.test and False:
opt.smooth_path = True
opt.smooth_eye = True
opt.smooth_lips = True
opt.cuda_ray = True
# assert opt.cuda_ray, "Only support CUDA ray mode."
if opt.patch_size > 1:
# assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss."
assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays."
# if opt.finetune_lips:
# # do not update density grid in finetune stage
# opt.update_extra_interval = 1e9
print(opt)
seed_everything(opt.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
| model = NeRFNetwork(opt) | 1 | 2023-12-19 01:32:46+00:00 | 12k |
MingtaoGuo/AnimateAnyone_unofficial | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "isimage",
"path": "ldm/util.py",
"snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "mean_flat",
"path": "ldm/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
},
{
"identifier": "count_params",
"path": "ldm/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "ldm/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "normal_kl",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "IdentityFirstStage",
"path": "ldm/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "AutoencoderKL",
"path": "ldm/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x"
},
{
"identifier": "make_beta_schedule",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec"
}
] | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler | 9,767 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema: | self.model_ema = LitEma(self.model) | 8 | 2023-12-16 03:31:33+00:00 | 12k |
yasserben/CLOUDS | clouds/clouds.py | [
{
"identifier": "SetCriterion",
"path": "clouds/modeling/criterion.py",
"snippet": "class SetCriterion(nn.Module):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n\n def __init__(\n self,\n num_classes,\n matcher,\n weight_dict,\n eos_coef,\n losses,\n num_points,\n oversample_ratio,\n importance_sample_ratio,\n ):\n \"\"\"Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer(\"empty_weight\", empty_weight)\n\n # pointwise mask loss parameters\n self.num_points = num_points\n self.oversample_ratio = oversample_ratio\n self.importance_sample_ratio = importance_sample_ratio\n\n def loss_labels(self, outputs, targets, indices, num_masks):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert \"pred_logits\" in outputs\n src_logits = outputs[\"pred_logits\"].float()\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat(\n [t[\"labels\"][J] for t, (_, J) in zip(targets, indices)]\n )\n target_classes = torch.full(\n src_logits.shape[:2],\n self.num_classes,\n dtype=torch.int64,\n device=src_logits.device,\n )\n target_classes[idx] = target_classes_o\n\n loss_ce = F.cross_entropy(\n src_logits.transpose(1, 2), target_classes, self.empty_weight\n )\n losses = {\"loss_ce\": loss_ce}\n return losses\n\n def loss_masks(self, outputs, targets, indices, num_masks):\n \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n \"\"\"\n assert \"pred_masks\" in outputs\n\n src_idx = self._get_src_permutation_idx(indices)\n tgt_idx = self._get_tgt_permutation_idx(indices)\n src_masks = outputs[\"pred_masks\"]\n src_masks = src_masks[src_idx]\n masks = [t[\"masks\"] for t in targets]\n # TODO use valid to mask invalid areas due to padding in loss\n target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()\n target_masks = target_masks.to(src_masks)\n target_masks = target_masks[tgt_idx]\n\n # No need to upsample predictions as we are using normalized coordinates :)\n # N x 1 x H x W\n src_masks = src_masks[:, None]\n target_masks = target_masks[:, None]\n\n with torch.no_grad():\n # sample point_coords\n point_coords = get_uncertain_point_coords_with_randomness(\n src_masks,\n lambda logits: calculate_uncertainty(logits),\n self.num_points,\n self.oversample_ratio,\n self.importance_sample_ratio,\n )\n # get gt labels\n point_labels = point_sample(\n target_masks,\n point_coords,\n align_corners=False,\n ).squeeze(1)\n\n point_logits = point_sample(\n src_masks,\n point_coords,\n align_corners=False,\n ).squeeze(1)\n\n losses = {\n \"loss_mask\": sigmoid_ce_loss_jit(point_logits, point_labels, num_masks),\n \"loss_dice\": dice_loss_jit(point_logits, point_labels, num_masks),\n }\n\n del src_masks\n del target_masks\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat(\n [torch.full_like(src, i) for i, (src, _) in enumerate(indices)]\n )\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat(\n [torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]\n )\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_masks):\n loss_map = {\n \"labels\": self.loss_labels,\n \"masks\": self.loss_masks,\n }\n assert loss in loss_map, f\"do you really want to compute {loss} loss?\"\n return loss_map[loss](outputs, targets, indices, num_masks)\n\n def forward(self, outputs, targets):\n \"\"\"This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_masks = sum(len(t[\"labels\"]) for t in targets)\n num_masks = torch.as_tensor(\n [num_masks], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_masks)\n num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n l_dict = self.get_loss(\n loss, aux_outputs, targets, indices, num_masks\n )\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses\n\n def __repr__(self):\n head = \"Criterion \" + self.__class__.__name__\n body = [\n \"matcher: {}\".format(self.matcher.__repr__(_repr_indent=8)),\n \"losses: {}\".format(self.losses),\n \"weight_dict: {}\".format(self.weight_dict),\n \"num_classes: {}\".format(self.num_classes),\n \"eos_coef: {}\".format(self.eos_coef),\n \"num_points: {}\".format(self.num_points),\n \"oversample_ratio: {}\".format(self.oversample_ratio),\n \"importance_sample_ratio: {}\".format(self.importance_sample_ratio),\n ]\n _repr_indent = 4\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)"
},
{
"identifier": "HungarianMatcher",
"path": "clouds/modeling/matcher.py",
"snippet": "class HungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(\n self,\n cost_class: float = 1,\n cost_mask: float = 1,\n cost_dice: float = 1,\n num_points: int = 0,\n ):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost\n cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_mask = cost_mask\n self.cost_dice = cost_dice\n\n assert (\n cost_class != 0 or cost_mask != 0 or cost_dice != 0\n ), \"all costs cant be 0\"\n\n self.num_points = num_points\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n indices = []\n\n # Iterate through batch size\n for b in range(bs):\n\n out_prob = outputs[\"pred_logits\"][b].softmax(\n -1\n ) # [num_queries, num_classes]\n tgt_ids = targets[b][\"labels\"]\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n\n out_mask = outputs[\"pred_masks\"][b] # [num_queries, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[b][\"masks\"].to(out_mask)\n\n out_mask = out_mask[:, None]\n tgt_mask = tgt_mask[:, None]\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1),\n align_corners=False,\n ).squeeze(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1),\n align_corners=False,\n ).squeeze(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n\n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n\n indices.append(linear_sum_assignment(C))\n\n return [\n (\n torch.as_tensor(i, dtype=torch.int64),\n torch.as_tensor(j, dtype=torch.int64),\n )\n for i, j in indices\n ]\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_masks\": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"masks\": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n return self.memory_efficient_forward(outputs, targets)\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_mask: {}\".format(self.cost_mask),\n \"cost_dice: {}\".format(self.cost_dice),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)"
},
{
"identifier": "SAM",
"path": "clouds/sam.py",
"snippet": "class SAM(nn.Module):\n def __init__(self,\n *,\n mobile: bool,\n size_threshold: int,\n erosion: bool,\n erosion_size: int,\n num_points: int,\n selection_mode: str,\n rm_intersection: bool,\n refinement: bool,\n ):\n\n super().__init__()\n self.mobile = mobile\n self.sam_refinement = refinement\n self.sam_size_threshold = size_threshold\n self.sam_erosion = erosion\n self.sam_erosion_size = erosion_size\n self.sam_num_points = num_points\n self.sam_selection_mode = selection_mode\n self.sam_rm_intersection = rm_intersection\n\n if self.mobile:\n from mobile_sam import sam_model_registry, SamPredictor\n from mobile_sam.utils.transforms import ResizeLongestSide\n\n self.sam_preprocessor = ResizeLongestSide(1024)\n self.sam = sam_model_registry[\"vit_t\"](\n checkpoint=\"./weights/mobile_sam.pt\"\n )\n else:\n from segment_anything import sam_model_registry, SamPredictor\n from segment_anything.utils.transforms import ResizeLongestSide\n\n self.sam_preprocessor = ResizeLongestSide(1024)\n self.sam = sam_model_registry[\"vit_h\"](\n checkpoint=\"./weights/sam_vit_h_4b8939.pth\"\n )\n\n self.sam_predictor = SamPredictor(self.sam)\n self.sam_mask_generator = SamAutomaticMaskGenerator(self.sam)\n\n def forward(self, x):\n \"\"\"\n Define the forward pass for your inference model.\n\n Args:\n x: Input data or image tensor.\n\n Returns:\n output: Model's output tensor after the forward pass.\n \"\"\"\n\n return x\n\n def set_torch_image(self, image, size):\n with torch.no_grad():\n self.sam_predictor.set_torch_image(image, size)\n\n def predict_torch(self, point_coords, point_labels, multimask_output, mask_input=None):\n # self.sam.eval()\n with torch.no_grad():\n return self.sam_predictor.predict_torch(\n point_coords=point_coords,\n point_labels=point_labels,\n multimask_output=multimask_output,\n mask_input=mask_input,\n )\n\n def apply_image(self, image):\n with torch.no_grad():\n return self.sam_preprocessor.apply_image(image)\n\n def generate_mask(self, image):\n masks = self.sam_mask_generator.generate(image)\n return masks\n\n def apply_coords(self, coords, size):\n with torch.no_grad():\n return self.sam_preprocessor.apply_coords(coords,size)"
},
{
"identifier": "MaskPooling",
"path": "clouds/modeling/transformer_decoder/clouds_transformer_decoder.py",
"snippet": "class MaskPooling(nn.Module):\n def __init__(\n self,\n ):\n super().__init__()\n\n def forward(self, x, mask):\n \"\"\"\n Args:\n x: [B, C, H, W]\n mask: [B, Q, H, W]\n \"\"\"\n if not x.shape[-2:] == mask.shape[-2:]:\n # reshape mask to x\n mask = F.interpolate(\n mask, size=x.shape[-2:], mode=\"bilinear\", align_corners=False\n )\n with torch.no_grad():\n mask = mask.detach()\n mask = (mask > 0).to(mask.dtype)\n denorm = mask.sum(dim=(-1, -2), keepdim=True) + 1e-8\n\n mask_pooled_x = torch.einsum(\n \"bchw,bqhw->bqc\",\n x,\n mask / denorm,\n )\n return mask_pooled_x"
},
{
"identifier": "get_classification_logits",
"path": "clouds/modeling/transformer_decoder/clouds_transformer_decoder.py",
"snippet": "def get_classification_logits(x, text_classifier, logit_scale, num_templates=None):\n # x in shape of [B, *, C]\n # text_classifier in shape of [num_classes, C]\n # logit_scale is a learnable scalar https://github.com/mlfoundations/open_clip/blob/main/src/open_clip/model.py#L201\n # return: [B, *, num_classes]\n x = F.normalize(x, dim=-1)\n logit_scale = torch.clamp(logit_scale.exp(), max=100)\n pred_logits = logit_scale * x @ text_classifier.T # B, *, N + 1\n # max ensembel as in OpenSeg/ODISE\n final_pred_logits = []\n cur_idx = 0\n for num_t in num_templates:\n final_pred_logits.append(\n pred_logits[:, :, cur_idx : cur_idx + num_t].max(-1).values\n )\n cur_idx += num_t\n final_pred_logits.append(pred_logits[:, :, -1]) # the last classifier is for void\n final_pred_logits = torch.stack(final_pred_logits, dim=-1)\n return final_pred_logits"
}
] | from typing import Tuple
from copy import deepcopy
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from detectron2.utils.memory import retry_if_cuda_oom
from torch.nn.parallel import DistributedDataParallel
from .modeling.criterion import SetCriterion
from .modeling.matcher import HungarianMatcher
from my_utils import *
from scipy.ndimage import label, center_of_mass
from scipy.ndimage import binary_erosion
from scipy.ndimage import label, sum as ndi_sum
from .sam import SAM
from .modeling.transformer_decoder.clouds_transformer_decoder import (
MaskPooling,
get_classification_logits,
)
from torch.nn.modules.dropout import _DropoutNd
from timm.models.layers import DropPath
import numpy as np
import matplotlib.pyplot as plt
import torch
import os
import copy
import cv2 | 7,308 | seg_maps_target = self.predict_inference(
outputs_target,
features_clean["clip_vis_dense"],
text_classifier,
num_templates,
images_norm_clean,
batched_inputs_target,
)
targets_target = process_segmentation_maps(seg_maps_target)
if self.sam_enabled:
separate_dict = separate_shapes_list(
targets_target, size_threshold=self.sam_size_threshold
)
coordinate_dict = get_fixed_points(
separate_dict,
apply_erosion=self.sam_erosion,
num_points=self.sam_num_points,
erosion_size=self.sam_erosion_size,
selection_mode=self.sam_selection_mode,
)
last_targets_target = []
for i, dico in enumerate(batched_inputs_target):
image_i = dico["image"]
image_perm = image_i.permute(1, 2, 0).cpu().numpy()
image_perm = self.sam.apply_image(image_perm)
self.sam.set_torch_image(
torch.tensor(image_perm.transpose(2, 0, 1))
.unsqueeze(0)
.to(self.device),
(768, 768),
)
points_coords, count_per_key = dict_to_tensor(
coordinate_dict[i]
)
points_coords = self.sam.apply_coords(
points_coords.cpu().numpy(), (768, 768)
)
if points_coords.shape[0]:
(masks, logits, masks_input,) = self.sam.predict_torch(
point_coords=torch.tensor(points_coords).to(
self.device
),
point_labels=create_ones_tensor(points_coords).to(
self.device
),
multimask_output=True,
)
if self.sam_refinement:
masks_input = select_best_masks(masks_input, logits)
masks, logits, _, = self.sam.predict_torch(
point_coords=torch.tensor(points_coords).to(
self.device
),
point_labels=create_ones_tensor(
points_coords
).to(self.device),
mask_input=masks_input.unsqueeze(1),
multimask_output=True,
)
masks = select_best_masks(masks, logits)
if self.sam_rm_intersection:
masks = remove_intersecting_pixels(masks)
reconstructed_dict = reconstruct_dict(
masks, count_per_key
)
new_targets_target = transform_masks(reconstructed_dict)
last_targets_target.append(new_targets_target)
viz_targets_target = union_of_masks(reconstructed_dict)
visualize_semantic_map_maxed(viz_targets_target)
save_semantic_map_maxed(viz_targets_target, after=True)
else:
last_targets_target.append(targets_target[i])
targets_target = last_targets_target
for i, index in enumerate(order_target):
targets[index] = targets_target[i]
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
self.local_iter += 1
return losses
else:
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
if self.geometric_ensemble:
# We ensemble the pred logits of in-vocab and out-vocab
clip_feature = features["clip_vis_dense"]
mask_for_pooling = F.interpolate(
mask_pred_results,
size=clip_feature.shape[-2:],
mode="bilinear",
align_corners=False,
)
if "convnext" in self.backbone.model_name.lower():
pooled_clip_feature = self.mask_pooling(
clip_feature, mask_for_pooling
)
pooled_clip_feature = self.backbone.visual_prediction_forward(
pooled_clip_feature
)
elif "rn" in self.backbone.model_name.lower():
pooled_clip_feature = self.backbone.visual_prediction_forward(
clip_feature, mask_for_pooling
)
else:
raise NotImplementedError
| """
# ---------------------------------------------------------------
# Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved.
# Licensed under the Apache License, Version 2.0
Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py
https://github.com/bytedance/fc-clip/blob/main/fcclip/fcclip.py
# ---------------------------------------------------------------
"""
def is_element_in_string(my_list, my_string):
for element in my_list:
if element in my_string:
return True
return False
def show_anns(anns, val=0.35):
if len(anns) == 0:
return
sorted_anns = sorted(anns, key=(lambda x: x["area"]), reverse=True)
ax = plt.gca()
ax.set_autoscale_on(False)
for ann in sorted_anns:
m = ann["segmentation"]
img = np.ones((m.shape[0], m.shape[1], 3))
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:, :, i] = color_mask[i]
ax.imshow(np.dstack((img, m * val)))
def write_masks_to_png(
masks,
image,
filename,
path="segmented",
val=0.35,
) -> None:
plt.figure(figsize=(30, 30))
plt.imshow(image)
show_anns(masks, val)
plt.axis("off")
# plt.show()
# filename = f"masks.png"
plt.savefig(os.path.join(path, filename))
return
#
# pred = processed_results[0]["sem_seg"].unsqueeze(dim=0)
# pred = torch.argmax(pred, dim=1)
# pred_1 = torch.squeeze(pred)
# pred_1 = np.asarray(pred_1.cpu().data, dtype=np.uint8)
# pred_1_map = colorize_mask(pred_1, None)
VILD_PROMPT = [
"a photo of a {}.",
"This is a photo of a {}",
"There is a {} in the scene",
"There is the {} in the scene",
"a photo of a {} in the scene",
"a photo of a small {}.",
"a photo of a medium {}.",
"a photo of a large {}.",
"This is a photo of a small {}.",
"This is a photo of a medium {}.",
"This is a photo of a large {}.",
"There is a small {} in the scene.",
"There is a medium {} in the scene.",
"There is a large {} in the scene.",
]
def _params_equal(ema_model, model):
for ema_param, param in zip(ema_model.named_parameters(), model.named_parameters()):
if not torch.equal(ema_param[1].data, param[1].data):
# print("Difference in", ema_param[0])
return False
return True
@META_ARCH_REGISTRY.register()
class CLOUDS(nn.Module):
"""
Main class for mask classification semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
train_metadata,
test_metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# inference
semantic_on: bool,
panoptic_on: bool,
instance_on: bool,
test_topk_per_image: int,
# CLOUDS
geometric_ensemble_alpha: float,
geometric_ensemble_beta: float,
ensemble_on_valid_mask: bool,
geometric_ensemble: bool,
geometric_ensemble_ema: bool,
sam_enabled: bool,
sam_mobile: bool,
sam_minibatch: bool,
sam_size_threshold: int,
sam_erosion: bool,
sam_erosion_size: int,
sam_num_points: int,
sam_selection_mode: str,
sam_rm_intersection: bool,
sam_refinement: bool,
alpha_ema: float,
overwriting: bool,
iteration_update: int,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
semantic_on: bool, whether to output semantic segmentation prediction
instance_on: bool, whether to output instance segmentation prediction
panoptic_on: bool, whether to output panoptic segmentation prediction
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.sam_minibatch = sam_minibatch
self.overwriting = overwriting
if self.sam_minibatch:
self.sem_seg_head_ema = deepcopy(self.sem_seg_head)
self.local_iter = 0
self.criterion = criterion
self.num_queries = num_queries
self.iteration_update = iteration_update
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.train_metadata = train_metadata
self.test_metadata = test_metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer(
"pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False
)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
# additional args
self.semantic_on = semantic_on
self.instance_on = instance_on
self.panoptic_on = panoptic_on
self.test_topk_per_image = test_topk_per_image
if not self.semantic_on:
assert self.sem_seg_postprocess_before_inference
# CLOUDS args
self.mask_pooling = MaskPooling()
self.geometric_ensemble_alpha = geometric_ensemble_alpha
self.geometric_ensemble_beta = geometric_ensemble_beta
self.ensemble_on_valid_mask = ensemble_on_valid_mask
self.train_text_classifier = None
self.test_text_classifier = None
self.void_embedding = nn.Embedding(1, backbone.dim_latent) # use this for void
self.geometric_ensemble = geometric_ensemble
self.geometric_ensemble_ema = geometric_ensemble_ema
(
_,
self.train_num_templates,
self.train_class_names,
) = self.prepare_class_names_from_metadata(train_metadata, train_metadata)
(
self.category_overlapping_mask,
self.test_num_templates,
self.test_class_names,
) = self.prepare_class_names_from_metadata(test_metadata, train_metadata)
self.sam_enabled = sam_enabled
if self.sam_enabled:
self.sam = SAM(
mobile=sam_mobile,
size_threshold=sam_size_threshold,
erosion=sam_erosion,
erosion_size=sam_erosion_size,
num_points=sam_num_points,
selection_mode=sam_selection_mode,
rm_intersection=sam_rm_intersection,
refinement=sam_refinement,
)
self.sam_size_threshold = sam_size_threshold
self.sam_erosion = sam_erosion
self.sam_erosion_size = sam_erosion_size
self.sam_num_points = sam_num_points
self.sam_selection_mode = sam_selection_mode
self.sam_rm_intersection = sam_rm_intersection
self.sam_refinement = sam_refinement
self.alpha_ema = alpha_ema
def get_module(self, module):
"""Get `nn.ModuleDict` to fit the `MMDistributedDataParallel` interface.
Args:
module (MMDistributedDataParallel | nn.ModuleDict): The input
module that needs processing.
Returns:
nn.ModuleDict: The ModuleDict of multiple networks.
"""
if isinstance(module, DistributedDataParallel):
return module.module
return module
def get_ema_model(self):
return self.get_module(self.sem_seg_head_ema)
def get_model(self):
return self.get_module(self.sem_seg_head)
def init_ema_weights(self):
for param in self.get_ema_model().parameters():
param.detach_()
mp = list(self.get_model().parameters())
mcp = list(self.get_ema_model().parameters())
for i in range(0, len(mp)):
if not mcp[i].data.shape: # scalar tensor
mcp[i].data = mp[i].data.clone()
else:
mcp[i].data[:] = mp[i].data[:].clone()
def update_ema_weights(self, iter):
# alpha_teacher = min(1 - 1 / (iter + 1), self.alpha_ema)
alpha_teacher = self.alpha_ema
for ema_param, param in zip(
self.get_ema_model().parameters(), self.get_model().parameters()
):
if not param.data.shape: # scalar tensor
ema_param.data = (
alpha_teacher * ema_param.data + (1 - alpha_teacher) * param.data
)
else:
ema_param.data[:] = (
alpha_teacher * ema_param[:].data[:]
+ (1 - alpha_teacher) * param[:].data[:]
)
def prepare_class_names_from_metadata(self, metadata, train_metadata):
def split_labels(x):
res = []
for x_ in x:
x_ = x_.replace(", ", ",")
x_ = x_.split(",") # there can be multiple synonyms for single class
res.append(x_)
return res
# get text classifier
try:
class_names = split_labels(
metadata.stuff_classes
) # it includes both thing and stuff
train_class_names = split_labels(train_metadata.stuff_classes)
except:
# this could be for insseg, where only thing_classes are available
class_names = split_labels(metadata.thing_classes)
train_class_names = split_labels(train_metadata.thing_classes)
train_class_names = {l for label in train_class_names for l in label}
category_overlapping_list = []
for test_class_names in class_names:
is_overlapping = not set(train_class_names).isdisjoint(
set(test_class_names)
)
category_overlapping_list.append(is_overlapping)
category_overlapping_mask = torch.tensor(
category_overlapping_list, dtype=torch.long
)
def fill_all_templates_ensemble(x_=""):
res = []
for x in x_:
for template in VILD_PROMPT:
res.append(template.format(x))
return res, len(res) // len(VILD_PROMPT)
num_templates = []
templated_class_names = []
for x in class_names:
templated_classes, templated_classes_num = fill_all_templates_ensemble(x)
templated_class_names += templated_classes
num_templates.append(
templated_classes_num
) # how many templates for current classes
class_names = templated_class_names
# print("text for classification:", class_names)
return category_overlapping_mask, num_templates, class_names
def set_metadata(self, metadata):
self.test_metadata = metadata
(
self.category_overlapping_mask,
self.test_num_templates,
self.test_class_names,
) = self.prepare_class_names_from_metadata(metadata, self.train_metadata)
self.test_text_classifier = None
return
def get_text_classifier(self):
if self.training:
if self.train_text_classifier is None:
text_classifier = []
# this is needed to avoid oom, which may happen when num of class is large
bs = 128
for idx in range(0, len(self.train_class_names), bs):
text_classifier.append(
self.backbone.get_text_classifier(
self.train_class_names[idx : idx + bs], self.device
).detach()
)
text_classifier = torch.cat(text_classifier, dim=0)
# average across templates and normalization.
text_classifier /= text_classifier.norm(dim=-1, keepdim=True)
text_classifier = text_classifier.reshape(
text_classifier.shape[0] // len(VILD_PROMPT),
len(VILD_PROMPT),
text_classifier.shape[-1],
).mean(1)
text_classifier /= text_classifier.norm(dim=-1, keepdim=True)
self.train_text_classifier = text_classifier
return self.train_text_classifier, self.train_num_templates
else:
if self.test_text_classifier is None:
text_classifier = []
# this is needed to avoid oom, which may happen when num of class is large
bs = 128
for idx in range(0, len(self.test_class_names), bs):
text_classifier.append(
self.backbone.get_text_classifier(
self.test_class_names[idx : idx + bs], self.device
).detach()
)
text_classifier = torch.cat(text_classifier, dim=0)
# average across templates and normalization.
text_classifier /= text_classifier.norm(dim=-1, keepdim=True)
text_classifier = text_classifier.reshape(
text_classifier.shape[0] // len(VILD_PROMPT),
len(VILD_PROMPT),
text_classifier.shape[-1],
).mean(1)
text_classifier /= text_classifier.norm(dim=-1, keepdim=True)
self.test_text_classifier = text_classifier
return self.test_text_classifier, self.test_num_templates
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
# loss weights
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion
matcher = HungarianMatcher(
cost_class=class_weight,
cost_mask=mask_weight,
cost_dice=dice_weight,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
)
weight_dict = {
"loss_ce": class_weight,
"loss_mask": mask_weight,
"loss_dice": dice_weight,
}
if deep_supervision:
dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "masks"]
criterion = SetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"train_metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"test_metadata": MetadataCatalog.get(cfg.DATASETS.TEST[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": (
cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE
or cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON
or cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON
),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
# inference
"semantic_on": cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON,
"instance_on": cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON,
"panoptic_on": cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON,
"test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
"geometric_ensemble_alpha": cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_ALPHA,
"geometric_ensemble_beta": cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_BETA,
"ensemble_on_valid_mask": cfg.MODEL.CLOUDS.ENSEMBLE_ON_VALID_MASK,
"geometric_ensemble": cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE,
"geometric_ensemble_ema": cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_EMA,
"sam_enabled": cfg.MODEL.CLOUDS.SAM.ENABLED,
"sam_mobile": cfg.MODEL.CLOUDS.SAM.MOBILE,
"sam_minibatch": cfg.MODEL.CLOUDS.SAM.MINIBATCH,
"sam_size_threshold": cfg.MODEL.CLOUDS.SAM.SIZE_THRESHOLD,
"sam_erosion": cfg.MODEL.CLOUDS.SAM.EROSION,
"sam_erosion_size": cfg.MODEL.CLOUDS.SAM.EROSION_SIZE,
"sam_num_points": cfg.MODEL.CLOUDS.SAM.NUM_POINTS,
"sam_selection_mode": cfg.MODEL.CLOUDS.SAM.SELECTION_MODE,
"sam_rm_intersection": cfg.MODEL.CLOUDS.SAM.RM_INTERSECTION,
"sam_refinement": cfg.MODEL.CLOUDS.SAM.REFINEMENT,
"alpha_ema": cfg.MODEL.CLOUDS.SAM.ALPHA_EMA,
"overwriting": cfg.MODEL.CLOUDS.OVERWRITING,
"iteration_update": cfg.MODEL.CLOUDS.ITERATION_UPDATE,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
if self.training:
if self.sam_minibatch:
# Init/update ema model
if self.local_iter == 0:
self.init_ema_weights()
# assert _params_equal(self.get_ema_model(), self.get_model())
if not self.local_iter % self.iteration_update:
self.update_ema_weights(self.local_iter)
# assert not _params_equal(self.get_ema_model(), self.get_model())
# assert self.get_ema_model().training
# We select the source images and augmented version of the generated ones
images = [
x["image_aug"].to(self.device)
if "image_aug" in x
else x["image"].to(self.device)
for x in batched_inputs
]
images_norm_list = [(x - self.pixel_mean) / self.pixel_std for x in images]
images_norm = ImageList.from_tensors(images_norm_list, self.size_divisibility)
# We select the clean version of the generated ones
images_clean = [
x["image"].to(self.device) for x in batched_inputs if "image_aug" in x
]
if images_clean:
images_norm_list_clean = [
(x - self.pixel_mean) / self.pixel_std for x in images_clean
]
images_norm_clean = ImageList.from_tensors(
images_norm_list_clean, self.size_divisibility
)
with torch.no_grad():
features_clean = self.backbone(images_norm_clean.tensor)
features = self.backbone(images_norm.tensor)
text_classifier, num_templates = self.get_text_classifier()
# Append void class weight
text_classifier = torch.cat(
[text_classifier, F.normalize(self.void_embedding.weight, dim=-1)], dim=0
)
features["text_classifier"] = text_classifier
features["num_templates"] = num_templates
if images_clean:
features_clean["text_classifier"] = text_classifier
features_clean["num_templates"] = num_templates
outputs = self.sem_seg_head(features)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances, images_norm)
if images_clean:
(
batched_inputs_target,
order_target,
) = separate_dicts_by_filename(batched_inputs)
for m in self.get_ema_model().modules():
if isinstance(m, _DropoutNd):
m.training = False
if isinstance(m, DropPath):
m.training = False
with torch.no_grad():
outputs_target = self.get_ema_model()(features_clean)
seg_maps_target = self.predict_inference(
outputs_target,
features_clean["clip_vis_dense"],
text_classifier,
num_templates,
images_norm_clean,
batched_inputs_target,
)
targets_target = process_segmentation_maps(seg_maps_target)
if self.sam_enabled:
separate_dict = separate_shapes_list(
targets_target, size_threshold=self.sam_size_threshold
)
coordinate_dict = get_fixed_points(
separate_dict,
apply_erosion=self.sam_erosion,
num_points=self.sam_num_points,
erosion_size=self.sam_erosion_size,
selection_mode=self.sam_selection_mode,
)
last_targets_target = []
for i, dico in enumerate(batched_inputs_target):
image_i = dico["image"]
image_perm = image_i.permute(1, 2, 0).cpu().numpy()
image_perm = self.sam.apply_image(image_perm)
self.sam.set_torch_image(
torch.tensor(image_perm.transpose(2, 0, 1))
.unsqueeze(0)
.to(self.device),
(768, 768),
)
points_coords, count_per_key = dict_to_tensor(
coordinate_dict[i]
)
points_coords = self.sam.apply_coords(
points_coords.cpu().numpy(), (768, 768)
)
if points_coords.shape[0]:
(masks, logits, masks_input,) = self.sam.predict_torch(
point_coords=torch.tensor(points_coords).to(
self.device
),
point_labels=create_ones_tensor(points_coords).to(
self.device
),
multimask_output=True,
)
if self.sam_refinement:
masks_input = select_best_masks(masks_input, logits)
masks, logits, _, = self.sam.predict_torch(
point_coords=torch.tensor(points_coords).to(
self.device
),
point_labels=create_ones_tensor(
points_coords
).to(self.device),
mask_input=masks_input.unsqueeze(1),
multimask_output=True,
)
masks = select_best_masks(masks, logits)
if self.sam_rm_intersection:
masks = remove_intersecting_pixels(masks)
reconstructed_dict = reconstruct_dict(
masks, count_per_key
)
new_targets_target = transform_masks(reconstructed_dict)
last_targets_target.append(new_targets_target)
viz_targets_target = union_of_masks(reconstructed_dict)
visualize_semantic_map_maxed(viz_targets_target)
save_semantic_map_maxed(viz_targets_target, after=True)
else:
last_targets_target.append(targets_target[i])
targets_target = last_targets_target
for i, index in enumerate(order_target):
targets[index] = targets_target[i]
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
self.local_iter += 1
return losses
else:
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
if self.geometric_ensemble:
# We ensemble the pred logits of in-vocab and out-vocab
clip_feature = features["clip_vis_dense"]
mask_for_pooling = F.interpolate(
mask_pred_results,
size=clip_feature.shape[-2:],
mode="bilinear",
align_corners=False,
)
if "convnext" in self.backbone.model_name.lower():
pooled_clip_feature = self.mask_pooling(
clip_feature, mask_for_pooling
)
pooled_clip_feature = self.backbone.visual_prediction_forward(
pooled_clip_feature
)
elif "rn" in self.backbone.model_name.lower():
pooled_clip_feature = self.backbone.visual_prediction_forward(
clip_feature, mask_for_pooling
)
else:
raise NotImplementedError
| out_vocab_cls_results = get_classification_logits( | 4 | 2023-12-15 15:40:58+00:00 | 12k |
modelscope/scepter | scepter/modules/solver/train_val_solver.py | [
{
"identifier": "BaseSolver",
"path": "scepter/modules/solver/base_solver.py",
"snippet": "class BaseSolver(object, metaclass=ABCMeta):\n \"\"\" Base Solver.\n To initialize the solver.\n We have to initialize the data, model, optimizer and schedule.\n To process the common processing we also have to initialize the hooks.\n How to support Pytorch_lightning framework? Take a simple task as an examples.\n \"\"\"\n para_dict = {\n 'TRAIN_PRECISION': {\n 'value': 32,\n 'description': 'The precision for train process.'\n },\n 'FILE_SYSTEM': {},\n 'ACCU_STEP': {\n 'value':\n 1,\n 'description':\n 'When use ddp, the grad accumulate steps for each process.'\n },\n 'RESUME_FROM': {\n 'value': '',\n 'description': 'Resume from some state of training!'\n },\n 'MAX_EPOCHS': {\n 'value': 10,\n 'description': 'Max epochs for training.'\n },\n 'NUM_FOLDS': {\n 'value': 1,\n 'description': 'Num folds for training.'\n },\n 'WORK_DIR': {\n 'value': '',\n 'description': 'Save dir of the training log or model.'\n },\n 'LOG_FILE': {\n 'value': '',\n 'description': 'Save log path.'\n },\n 'EVAL_INTERVAL': {\n 'value': 1,\n 'description': 'Eval the model interval.'\n },\n 'EXTRA_KEYS': {\n 'value': [],\n 'description': 'The extra keys for metric.'\n },\n 'TRAIN_DATA': {\n 'description': 'Train data config.'\n },\n 'EVAL_DATA': {\n 'description': 'Eval data config.'\n },\n 'TEST_DATA': {\n 'description': 'Test data config.'\n },\n 'TRAIN_HOOKS': [],\n 'EVAL_HOOKS': [],\n 'TEST_HOOKS': [],\n 'MODEL': {},\n 'OPTIMIZER': {},\n 'LR_SCHEDULER': {},\n 'METRICS': []\n }\n\n def __init__(self, cfg, logger=None):\n # initialize some hyperparameters\n self.file_system = cfg.get('FILE_SYSTEM', None)\n self.work_dir: str = cfg.WORK_DIR\n self.pl_dir = self.work_dir\n self.log_file = osp_path(self.work_dir, cfg.LOG_FILE)\n self.optimizer, self.lr_scheduler = None, None\n self.cfg = cfg\n self.logger = logger\n self.resume_from: str = cfg.RESUME_FROM\n self.max_epochs: int = cfg.MAX_EPOCHS\n self.use_pl = we.use_pl\n self.train_precision = self.cfg.get('TRAIN_PRECISION', 32)\n self._mode_set = set()\n self._mode = 'train'\n self.probe_ins = {}\n self.clear_probe_ins = {}\n self._num_folds: int = 1\n if not self.use_pl:\n world_size = we.world_size\n if world_size > 1:\n self._num_folds: int = cfg.NUM_FOLDS\n if cfg.have('MODE'):\n self._mode_set.add(cfg.MODE)\n self._mode = cfg.MODE\n if we.is_distributed:\n self.accu_step = cfg.get('ACCU_STEP', 1)\n\n self.do_step = True\n self.hooks_dict = {'train': [], 'eval': [], 'test': []}\n self.datas = {}\n # Other initialized parameters\n self._epoch: int = 0\n # epoch_max_iter, iter, total_iter, iter_outputs, epoch_outputs\n # values is different according to self._mode\n self._epoch_max_iter: defaultdict = defaultdict(int)\n self._iter: defaultdict = defaultdict(int)\n self._total_iter: defaultdict = defaultdict(int)\n self._iter_outputs = defaultdict(dict)\n self._agg_iter_outputs = defaultdict(dict)\n self._epoch_outputs = defaultdict(dict)\n self._probe_data = defaultdict(dict)\n self._dist_data = defaultdict(dict)\n self._model_parameters = 0\n self._model_flops = 0\n self._loss = None # loss tensor\n self._local_rank = we.rank\n if isinstance(self.file_system, list):\n for file_sys in self.file_system:\n FS.init_fs_client(file_sys, logger=self.logger)\n elif self.file_system is not None:\n FS.init_fs_client(self.file_system, logger=self.logger)\n self._prefix = FS.get_fs_client(self.work_dir).get_prefix()\n if not FS.exists(self.work_dir):\n FS.make_dir(self.work_dir)\n self.logger.info(\n f\"Parse work dir {self.work_dir}'s prefix is {self._prefix}\")\n\n def set_up_pre(self):\n # initialize Enviranment\n if self._local_rank == 0:\n if self.log_file.startswith('file://'):\n save_folder = get_relative_folder(self.log_file, -1)\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n elif not self.log_file.startswith(self._prefix):\n self.log_file = os.path.join(self._prefix, self.log_file)\n init_logger(self.logger,\n log_file=self.log_file,\n dist_launcher='pytorch')\n self.construct_hook()\n\n def __setattr__(self, key, value):\n if isinstance(value, BaseModel):\n self.probe_ins[key] = value.probe_data\n self.clear_probe_ins[key] = value.clear_probe\n super().__setattr__(key, value)\n\n def set_up(self):\n self.construct_data()\n self.construct_model()\n self.construct_metrics()\n if not self.use_pl:\n self.model_to_device()\n self.init_opti()\n if self.use_pl:\n self.local_work_dir, _ = FS.map_to_local(self.work_dir)\n os.makedirs(self.local_work_dir, exist_ok=True)\n # resume\n resume_local_file = None\n if self.resume_from is not None and FS.exists(self.resume_from):\n with FS.get_from(self.resume_from,\n wait_finish=True) as local_file:\n self.logger.info(\n f'Loading checkpoint from {self.resume_from}')\n resume_local_file = local_file\n self.pl_ins = PyLightningWrapper(self)\n self.pl_trainer = pl.Trainer(\n default_root_dir=self.local_work_dir,\n max_epochs=self.max_epochs,\n precision=self.train_precision,\n accelerator='auto',\n devices='auto',\n check_val_every_n_epoch=self.eval_interval,\n resume_from_checkpoint=resume_local_file)\n self.pl_dir = os.path.join(\n self.work_dir,\n '/'.join(self.pl_trainer.log_dir.split('/')[-2:]))\n\n def construct_data(self):\n def one_device_init():\n # initialize data\n assert self.cfg.have('TRAIN_DATA') or self.cfg.have(\n 'EVAL_DATA') or self.cfg.have('TEST_DATA')\n if self.cfg.have('TRAIN_DATA') and ('train' in self._mode_set\n or len(self._mode_set) < 1):\n self.cfg.TRAIN_DATA.NUM_FOLDS = self.num_folds\n train_data = DATASETS.build(self.cfg.TRAIN_DATA,\n logger=self.logger)\n self.datas['train'] = train_data\n self._mode_set.add('train')\n if not self.use_pl:\n self._epoch_max_iter['train'] = len(\n train_data.dataloader) // self.num_folds + 1\n else:\n self._epoch_max_iter['train'] = -1\n if self.cfg.have('EVAL_DATA'):\n eval_data = DATASETS.build(self.cfg.EVAL_DATA,\n logger=self.logger)\n self.datas['eval'] = eval_data\n self._mode_set.add('eval')\n if not self.use_pl:\n self._epoch_max_iter['eval'] = len(eval_data.dataloader)\n else:\n self._epoch_max_iter['eval'] = -1\n if self.cfg.have('TEST_DATA'):\n test_data = DATASETS.build(self.cfg.TEST_DATA,\n logger=self.logger)\n self.datas['test'] = test_data\n if not self.use_pl:\n self._epoch_max_iter['test'] = len(test_data.dataloader)\n else:\n self._epoch_max_iter['test'] = -1\n self._mode_set.add('test')\n\n one_device_init()\n\n def construct_hook(self):\n # initialize data\n assert self.use_pl or self.cfg.have('TRAIN_HOOKS') or self.cfg.have(\n 'EVAL_HOOKS') or self.cfg.have('TEST_HOOKS')\n if self.cfg.have('TRAIN_HOOKS') and ('train' in self._mode_set\n or len(self._mode_set) < 1):\n self.hooks_dict['train'] = self._load_hook(self.cfg.TRAIN_HOOKS)\n if self.cfg.have('EVAL_HOOKS'):\n assert self.cfg.have('EVAL_HOOKS')\n self.hooks_dict['eval'] = self._load_hook(self.cfg.EVAL_HOOKS)\n if self.cfg.have('TEST_HOOKS') and ('test' in self._mode_set\n or 'eval' not in self._mode_set):\n self.hooks_dict['test'] = self._load_hook(self.cfg.TEST_HOOKS)\n\n def construct_model(self):\n # initialize Model\n assert self.cfg.have('MODEL')\n self.model = MODELS.build(self.cfg.MODEL, logger=self.logger)\n\n def construct_metrics(self):\n # Initial metric\n self.metrics = []\n self.eval_interval = self.cfg.get('EVAL_INTERVAL', 1)\n\n def model_to_device(self, tg_model_ins=None):\n # Initialize distributed model\n if tg_model_ins is None:\n tg_model = self.model\n else:\n tg_model = tg_model_ins\n if we.is_distributed and we.sync_bn is True:\n self.logger.info('Convert BatchNorm to Synchronized BatchNorm...')\n tg_model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(tg_model)\n tg_model = tg_model.to(we.device_id)\n if we.is_distributed:\n tg_model = DistributedDataParallel(\n tg_model,\n device_ids=[torch.cuda.current_device()],\n output_device=torch.cuda.current_device(),\n broadcast_buffers=True)\n self.logger.info('Transfer to ddp ...')\n if tg_model_ins is None:\n self.model = tg_model\n else:\n return tg_model\n\n def init_opti(self):\n if self.cfg.have('OPTIMIZER'):\n self.optimizer = OPTIMIZERS.build(\n self.cfg.OPTIMIZER,\n logger=self.logger,\n parameters=self.model.parameters())\n if self.cfg.have('LR_SCHEDULER') and self.optimizer is not None:\n self.lr_scheduler = LR_SCHEDULERS.build(self.cfg.LR_SCHEDULER,\n logger=self.logger,\n optimizer=self.optimizer)\n\n def solve(self, epoch=None, every_epoch=False):\n if not self.use_pl:\n if epoch is not None:\n self.epoch = epoch\n self.before_solve()\n if self.epoch >= self.max_epochs:\n self.logger.info(\n f'Nothing to do because current epoch {self.epoch} greater max epoches {self.epoch}'\n )\n while self.epoch < self.max_epochs:\n self.solve_train()\n self.solve_eval()\n self.solve_test()\n if 'train' not in self._mode_set and not every_epoch:\n break\n self.after_solve()\n else:\n train_dataloader = None\n if 'train' in self.datas:\n train_dataloader = self.datas['train'].dataloader\n val_dataloader = None\n if 'eval' in self.datas:\n val_dataloader = self.datas['eval'].dataloader\n self.pl_trainer.fit(self.pl_ins,\n train_dataloaders=train_dataloader,\n val_dataloaders=val_dataloader)\n if 'test' in self.datas:\n self.pl_trainer.test(self.pl_ins,\n dataloaders=self.datas['test'].dataloader)\n\n def solve_train(self):\n current_mode = 'train'\n if current_mode in self._mode_set:\n self.logger.info(\n f'Begin to solve {current_mode} at Epoch [{self.epoch}/{self.max_epochs}]...'\n )\n self.before_epoch(self.hooks_dict[current_mode])\n self.run_train()\n self.after_epoch(self.hooks_dict[current_mode])\n\n def solve_eval(self):\n current_mode = 'eval'\n if current_mode in self._mode_set and self.epoch % self.eval_interval == 0:\n self.logger.info(\n f'Begin to solve {current_mode} at Epoch [{self.epoch}/{self.max_epochs}]...'\n )\n self.before_epoch(self.hooks_dict[current_mode])\n self.run_eval()\n self.after_epoch(self.hooks_dict[current_mode])\n\n def solve_test(self):\n current_mode = 'test'\n if current_mode in self._mode_set:\n self.logger.info(\n f'Begin to solve {current_mode} at Epoch [{self.epoch}/{self.max_epochs}]...'\n )\n self.before_epoch(self.hooks_dict[current_mode])\n self.run_test()\n self.after_epoch(self.hooks_dict[current_mode])\n\n def before_solve(self):\n for k, hooks in self.hooks_dict.items():\n [t.before_solve(self) for t in hooks]\n\n def after_solve(self):\n for k, hooks in self.hooks_dict.items():\n [t.after_solve(self) for t in hooks]\n\n def run_train(self):\n self.train_mode()\n self.before_all_iter(self.hooks_dict[self._mode])\n for batch_idx, batch_data in enumerate(\n self.datas[self._mode].dataloader):\n self.before_iter(self.hooks_dict[self._mode])\n results = self.run_step_train(transfer_data_to_cuda(batch_data),\n batch_idx,\n step=self.total_iter,\n rank=we.rank)\n self._iter_outputs[self._mode] = self._reduce_scalar(results)\n self.after_iter(self.hooks_dict[self._mode])\n self.after_all_iter(self.hooks_dict[self._mode])\n\n def run_step_train(self, batch_data, batch_idx=0, step=None, rank=None):\n results = self.model(**batch_data)\n return results\n\n @torch.no_grad()\n def run_eval(self):\n self.eval_mode()\n self.before_all_iter(self.hooks_dict[self._mode])\n for batch_idx, batch_data in enumerate(\n self.datas[self._mode].dataloader):\n self.before_iter(self.hooks_dict[self._mode])\n results = self.run_step_eval(transfer_data_to_cuda(batch_data),\n batch_idx,\n step=self.total_iter,\n rank=we.rank)\n self._iter_outputs[self._mode] = self._reduce_scalar(results)\n self.after_iter(self.hooks_dict[self._mode])\n self.after_all_iter(self.hooks_dict[self._mode])\n\n def run_step_eval(self, batch_data, batch_idx=0, step=None, rank=None):\n results = self.model(**batch_data)\n return results\n\n @torch.no_grad()\n def run_test(self):\n self.test_mode()\n self.before_all_iter(self.hooks_dict[self._mode])\n for batch_idx, batch_data in enumerate(\n self.datas[self._mode].dataloader):\n self.before_iter(self.hooks_dict[self._mode])\n results = self.run_step_test(transfer_data_to_cuda(batch_data),\n batch_idx,\n step=self.total_iter,\n rank=we.rank)\n self._iter_outputs[self._mode] = self._reduce_scalar(results)\n self.after_iter(self.hooks_dict[self._mode])\n self.after_all_iter(self.hooks_dict[self._mode])\n\n def run_step_test(self, batch_data, batch_idx=0, step=None, rank=None):\n results = self.model(**batch_data)\n return results\n\n @torch.no_grad()\n def register_flops(self, data, keys=[]):\n from fvcore.nn import FlopCountAnalysis\n if len(keys) < 1:\n keys = list(data.keys())\n for key in data:\n if isinstance(data[key], torch.Tensor):\n batch_one_data = data[key][0, ...]\n batch_one_data = torch.unsqueeze(batch_one_data, dim=0)\n data[key] = batch_one_data\n elif isinstance(data[key], list):\n data[key] = data[key][0]\n\n tensor = [data[k] for k in keys]\n flops = FlopCountAnalysis(self.model, tuple(tensor))\n self._model_flops = flops.total()\n\n def before_epoch(self, hooks):\n [t.before_epoch(self) for t in hooks]\n\n def before_all_iter(self, hooks):\n [t.before_all_iter(self) for t in hooks]\n\n def before_iter(self, hooks):\n if not self.use_pl and self.is_train_mode:\n self._epoch = self._total_iter[self._mode] // self._epoch_max_iter[\n self._mode] + 1\n if self._iter[self._mode] % self._epoch_max_iter[self._mode] == 0:\n self._iter[self._mode] = 0\n [t.before_iter(self) for t in hooks]\n\n def after_iter(self, hooks):\n [t.after_iter(self) for t in hooks]\n if not self.use_pl:\n self._total_iter[self._mode] += 1\n self._iter[self._mode] += 1\n self.clear_probe()\n\n def after_all_iter(self, hooks):\n [t.after_all_iter(self) for t in hooks]\n\n def after_epoch(self, hooks):\n [t.after_epoch(self) for t in hooks]\n self._iter.clear()\n self._iter_outputs.clear()\n self._epoch_outputs.clear()\n if self.use_pl:\n FS.put_dir_from_local_dir(self.local_work_dir, self.work_dir)\n\n def collect_log_vars(self) -> OrderedDict:\n ret = OrderedDict()\n if self.is_train_mode and self.optimizer is not None:\n for idx, pg in enumerate(self.optimizer.param_groups):\n ret[f'pg{idx}_lr'] = pg['lr']\n return ret\n\n def load_checkpoint(self, checkpoint: dict):\n \"\"\"\n Load checkpoint function\n :param checkpoint: all tensors are on cpu, you need to transfer to gpu by hand\n :return:\n \"\"\"\n pass\n\n def save_checkpoint(self) -> dict:\n \"\"\"\n Save checkpoint function, you need to transfer all tensors to cpu by hand\n :return:\n \"\"\"\n pass\n\n @property\n def num_folds(self) -> int:\n return self._num_folds\n\n @property\n def epoch(self) -> int:\n return self._epoch\n\n @epoch.setter\n def epoch(self, new_epoch):\n self._epoch = new_epoch\n\n @property\n def iter(self) -> int:\n return self._iter[self._mode]\n\n @property\n def probe_data(self):\n return self._probe_data[self._mode]\n\n @property\n def total_iter(self) -> int:\n return self._total_iter[self._mode]\n\n @property\n def epoch_max_iter(self) -> int:\n return self._epoch_max_iter[self._mode]\n\n @property\n def mode(self) -> str:\n return self._mode\n\n @property\n def iter_outputs(self) -> dict:\n return self._iter_outputs[self._mode]\n\n @property\n def agg_iter_outputs(self) -> dict:\n return self._agg_iter_outputs\n\n @agg_iter_outputs.setter\n def agg_iter_outputs(self, new_outputs):\n assert type(new_outputs) is dict\n self._agg_iter_outputs[self._mode] = new_outputs\n\n @property\n def epoch_outputs(self) -> dict:\n return self._epoch_outputs\n\n @property\n def is_train_mode(self):\n return self._mode == 'train'\n\n @property\n def is_eval_mode(self):\n return self._mode == 'eval'\n\n @property\n def is_test_mode(self):\n return self._mode == 'test'\n\n def train_mode(self):\n self.model.train()\n self._mode = 'train'\n\n def eval_mode(self):\n self.model.eval()\n self._mode = 'eval'\n\n def test_mode(self):\n self.model.eval()\n self._mode = 'test'\n\n def register_probe(self, probe_data: dict):\n probe_da, dist_da = register_data(probe_data,\n key_prefix=__class__.__name__)\n self._probe_data[self.mode].update(probe_da)\n for key in dist_da:\n if key not in self._dist_data[self.mode]:\n self._dist_data[self.mode][key] = dist_da[key]\n else:\n for k, v in dist_da[key].items():\n if k in self._dist_data[self.mode][key]:\n self._dist_data[self.mode][key][k] += v\n else:\n self._dist_data[self.mode][key][k] = v\n\n @property\n def probe_data(self): # noqa\n gather_probe_data = gather_data(self._probe_data[self.mode])\n _dist_data_list = gather_data([self._dist_data[self.mode] or {}])\n if not we.rank == 0:\n self._probe_data[self.mode] = {}\n self._dist_data[self.mode] = {}\n # Iterate recurse the sub class's probe data.\n for k, func in self.probe_ins.items():\n for kk, vv in func().items():\n self._probe_data[self.mode][f'{k}/{kk}'] = vv\n if gather_probe_data is not None:\n # Before processing, just merge the data.\n self._probe_data[self.mode] = merge_gathered_probe(\n gather_probe_data)\n if _dist_data_list is not None and len(_dist_data_list) > 0:\n reduce_dist_data = {}\n for one_data in _dist_data_list:\n for k, v in one_data.items():\n if k in reduce_dist_data:\n for kk, vv in v.items():\n if kk in reduce_dist_data[k]:\n reduce_dist_data[k][kk] += vv\n else:\n reduce_dist_data[k][kk] = vv\n else:\n reduce_dist_data[k] = v\n self._dist_data[self.mode] = reduce_dist_data\n self._probe_data[\n self.mode][f'{__class__.__name__}_distribute'] = ProbeData(\n self._dist_data[self.mode])\n norm_dist_data = {}\n for key, value in self._dist_data[self.mode].items():\n total = 0\n for k, v in value.items():\n total += v\n norm_v = {}\n for k, v in value.items():\n norm_v[k] = v / total\n norm_dist_data[key] = norm_v\n self._probe_data[\n self.mode][f'{__class__.__name__}_norm_distribute'] = ProbeData(\n norm_dist_data)\n ret_data = copy.deepcopy(self._probe_data[self.mode])\n self._probe_data[self.mode] = {}\n return ret_data\n\n def clear_probe(self):\n self._probe_data[self.mode].clear()\n # Iterate recurse the sub class's probe data.\n for k, func in self.clear_probe_ins.items():\n func()\n\n def _load_hook(self, hooks):\n ret_hooks = []\n if hooks is not None and len(hooks) > 0:\n for hook_cfg in hooks:\n if self.use_pl:\n if 'backward' in hook_cfg.NAME.lower(\n ) or 'lrhook' in hook_cfg.NAME.lower(\n ) or 'samplerhook' in hook_cfg.NAME.lower():\n self.logger.info(\n f'Hook {hook_cfg.NAME} is not useful when use PytorchLightning!'\n )\n continue\n ret_hooks.append(HOOKS.build(hook_cfg, logger=self.logger))\n ret_hooks.sort(key=lambda a: a.priority)\n return ret_hooks\n\n def get_optim_parameters(self):\n return self.model.parameters()\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}'\n\n def _reduce_scalar(self, data_dict: dict):\n \"\"\" Only reduce all scalar tensor values if distributed.\n Any way, loss tensor will be specially processed just in case.\n\n Args:\n data_dict: Dict result returned by model.\n\n Returns:\n A new data dict whose tensor scalar values is all-reduced.\n\n \"\"\"\n if 'loss' in data_dict:\n self.loss = data_dict['loss']\n data_dict['loss'] = self.loss.data.clone()\n\n if isinstance(data_dict, OrderedDict):\n keys = data_dict.keys()\n else:\n keys = sorted(list(data_dict.keys()))\n\n ret = OrderedDict()\n # print([(key, type(data_dict[key])) for key in keys], f\"{dist.get_rank()}\", f\"{self.iter}\")\n for key in keys:\n value = data_dict[key]\n if isinstance(value, torch.Tensor) and value.ndim == 0:\n if dist.is_available() and dist.is_initialized():\n value = value.data.clone()\n dist.all_reduce(value.div_(dist.get_world_size()))\n ret[key] = value\n else:\n ret[key] = value\n\n return ret\n\n def _build_metrics(self, cfgs, logger=None):\n if isinstance(cfgs, (list, tuple)):\n for cfg in cfgs:\n self._build_metrics(cfg, logger=logger)\n elif isinstance(cfgs, Config):\n fn = METRICS.build(cfgs, logger)\n keys = cfgs.KEYS\n self.metrics.append({'fn': fn, 'keys': keys})\n self._collect_keys.update(keys)\n\n def print_memory_status(self):\n if torch.cuda.is_available():\n nvi_info = os.popen('nvidia-smi').read()\n gpu_mem = nvi_info.split('\\n')[9].split('|')[2].split(\n '/')[0].strip()\n else:\n gpu_mem = ''\n return gpu_mem\n\n def print_model_params_status(self, model=None, logger=None):\n \"\"\"Print the status and parameters of the model\"\"\"\n if model is None:\n model = self.model\n if logger is None:\n logger = self.logger\n train_param_dict = {}\n forzen_param_dict = {}\n all_param_numel = 0\n for key, val in model.named_parameters():\n if val.requires_grad:\n sub_key = '.'.join(key.split('.', 1)[-1].split('.', 2)[:2])\n if sub_key in train_param_dict:\n train_param_dict[sub_key] += val.numel()\n else:\n train_param_dict[sub_key] = val.numel()\n else:\n sub_key = '.'.join(key.split('.', 1)[-1].split('.', 1)[:1])\n if sub_key in forzen_param_dict:\n forzen_param_dict[sub_key] += val.numel()\n else:\n forzen_param_dict[sub_key] = val.numel()\n all_param_numel += val.numel()\n train_param_numel = sum(train_param_dict.values())\n forzen_param_numel = sum(forzen_param_dict.values())\n logger.info(\n f'Load trainable params {train_param_numel} / {all_param_numel} = '\n f'{train_param_numel / all_param_numel:.2%}, '\n f'train part: {train_param_dict}.')\n logger.info(\n f'Load forzen params {forzen_param_numel} / {all_param_numel} = '\n f'{forzen_param_numel / all_param_numel:.2%}, '\n f'forzen part: {forzen_param_dict}.')\n\n @staticmethod\n def get_config_template():\n '''\n { \"ENV\" :\n { \"description\" : \"\",\n \"A\" : {\n \"value\": 1.0,\n \"description\": \"\"\n }\n }\n }\n :return:\n '''\n return dict_to_yaml('solvername',\n __class__.__name__,\n BaseSolver.para_dict,\n set_name=True)"
},
{
"identifier": "SOLVERS",
"path": "scepter/modules/solver/registry.py",
"snippet": "SOLVERS = Registry('SOLVERS', build_func=build_solver, allow_types=('class', ))"
},
{
"identifier": "dict_to_yaml",
"path": "scepter/modules/utils/config.py",
"snippet": "def dict_to_yaml(module_name, name, json_config, set_name=False):\n '''\n { \"ENV\" :\n { \"description\" : \"\",\n \"A\" : {\n \"value\": 1.0,\n \"description\": \"\"\n }\n }\n }\n convert std dict to yaml\n :param module_name:\n :param json_config:\n :return:\n '''\n def convert_yaml_style(level=1,\n name='ENV',\n description='ENV PARA',\n default='',\n type_name='',\n is_sys=False):\n new_line = ''\n new_line += '{}# {} DESCRIPTION: {} TYPE: {} default: {}\\n'.format(\n '\\t' * (level - 1), name.upper(), description, type_name,\n f'\\'{default}\\'' if isinstance(default, str) else default)\n if is_sys:\n if name == '-':\n new_line += '{}{}\\n'.format('\\t' * (level - 1), name.upper())\n else:\n new_line += '{}{}:\\n'.format('\\t' * (level - 1), name.upper())\n else:\n # if isinstance(default, str):\n # default = f'\\'{default}\\''\n if default is None:\n new_line += '{}# {}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n else:\n new_line += '{}{}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n return new_line\n\n def parse_dict(json_config,\n level_num,\n parent_key,\n set_name=False,\n name='',\n parent_type='dict'):\n yaml_str = ''\n # print(level_num, json_config)\n if isinstance(json_config, dict):\n if 'value' in json_config:\n value = json_config['value']\n if isinstance(value, dict):\n assert len(value) < 1\n value = None\n description = json_config.get('description', '')\n yaml_str += convert_yaml_style(level=level_num - 1,\n name=parent_key,\n description=description,\n default=value,\n type_name=type(value).__name__)\n return True, yaml_str\n else:\n if len(json_config) < 1:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default='',\n type_name='')\n level_num += 1\n for k, v in json_config.items():\n if k == 'description':\n continue\n if isinstance(v, dict):\n is_final, new_yaml_str = parse_dict(v,\n level_num,\n k,\n parent_type='dict')\n if not is_final and parent_type == 'dict':\n description = v.get('description', '')\n yaml_str += convert_yaml_style(\n level=level_num - 1,\n name=k,\n description=description,\n default='',\n type_name='',\n is_sys=True)\n if not is_final and parent_type == 'list':\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=k,\n type_name='')\n yaml_str += new_yaml_str\n elif isinstance(v, list):\n base_yaml_str = convert_yaml_style(level=level_num - 1,\n name=k,\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += base_yaml_str\n for tup in v:\n is_final, new_yaml_str = parse_dict(\n tup, level_num, '-', parent_type='list')\n if not is_final:\n yaml_str += convert_yaml_style(level=level_num,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += new_yaml_str\n else:\n raise KeyError(\n f'json config {json_config} must be a dict of list'\n )\n\n elif isinstance(json_config, list):\n level_num += 1\n for tup in json_config:\n is_final, new_yaml_str = parse_dict(tup, level_num, '-')\n if not is_final:\n\n yaml_str += convert_yaml_style(level=level_num - 1,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n if set_name:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n raise KeyError(f'json config {json_config} must be a dict')\n return False, yaml_str\n\n if isinstance(json_config, dict):\n first_dict, sec_dict, third_dict = {}, {}, {}\n for key, value in json_config.items():\n if isinstance(value, dict) and len(value) > 0:\n first_dict[key] = value\n elif isinstance(value, dict) and len(value) == 0:\n sec_dict[key] = value\n elif isinstance(value, list):\n third_dict[key] = value\n else:\n raise f'Config {json_config} is illegal'\n json_config = {}\n json_config.update(first_dict)\n json_config.update(sec_dict)\n json_config.update(third_dict)\n\n yaml_str = f'[{module_name}] module yaml examples:\\n'\n level_num = 1\n base_yaml_str = convert_yaml_style(level=level_num,\n name=module_name,\n description='',\n default='',\n type_name='',\n is_sys=True)\n level_num += 1\n\n is_final, new_yaml_str = parse_dict(json_config,\n level_num,\n module_name,\n set_name=isinstance(json_config, list)\n and set_name,\n name=name)\n if not is_final:\n yaml_str += base_yaml_str\n if set_name and not isinstance(json_config, list):\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n yaml_str += new_yaml_str[1:]\n\n return yaml_str"
},
{
"identifier": "transfer_data_to_cpu",
"path": "scepter/modules/utils/data.py",
"snippet": "def transfer_data_to_cpu(data_map: dict) -> dict:\n \"\"\" Transfer tensors in data_map to cpu device.\n Will recursively walk through inner list, tuple and dict values.\n\n Args:\n data_map (dict): a dictionary which contains tensors to be transferred\n\n Returns:\n A dict which has same structure with input `data_map`.\n \"\"\"\n if not isinstance(data_map, dict):\n return data_map\n ret = OrderedDict()\n for key, value in data_map.items():\n if isinstance(value, torch.Tensor):\n ret[key] = value.detach().cpu()\n elif isinstance(value, dict):\n ret[key] = transfer_data_to_cpu(value)\n elif isinstance(value, (list, tuple)):\n ret[key] = type(value)([transfer_data_to_cpu(t) for t in value])\n else:\n ret[key] = value\n torch.cuda.empty_cache()\n return ret"
},
{
"identifier": "transfer_data_to_cuda",
"path": "scepter/modules/utils/data.py",
"snippet": "def transfer_data_to_cuda(data_map: dict) -> dict:\n \"\"\" Transfer tensors in data_map to current default gpu device.\n Will recursively walk through inner list, tuple and dict values.\n\n Args:\n data_map (dict): a dictionary which contains tensors to be transferred\n\n Returns:\n A dict which has same structure with input `data_map`.\n \"\"\"\n import platform\n if platform.system() == 'Darwin':\n return data_map\n if not isinstance(data_map, dict):\n return data_map\n ret = OrderedDict()\n for key, value in data_map.items():\n if isinstance(value, torch.Tensor):\n if value.is_cuda:\n ret[key] = value\n else:\n ret[key] = value.cuda(non_blocking=True)\n elif isinstance(value, dict):\n ret[key] = transfer_data_to_cuda(value)\n elif isinstance(value, (list, tuple)):\n ret[key] = type(value)([transfer_data_to_cuda(t) for t in value])\n else:\n ret[key] = value\n return ret"
},
{
"identifier": "gather_data",
"path": "scepter/modules/utils/distribute.py",
"snippet": " def set_random_seed(seed):\ndef get_dist_info():\ndef gather_data(data):\ndef gather_list(data):\ndef gather_picklable(data):\ndef _gather_picklable_custom(data):\ndef gather_gpu_tensors(tensor, all_recv=False, is_cat=True):\ndef broadcast(tensor, src, group=None, **kwargs):\ndef barrier():\ndef get_global_gloo_group():\ndef reduce_scatter(output,\n input_list,\n op=dist.ReduceOp.SUM,\n group=None,\n **kwargs):\ndef all_reduce(tensor, op=dist.ReduceOp.SUM, group=None, **kwargs):\ndef reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, **kwargs):\ndef _serialize_to_tensor(data):\ndef _unserialize_from_tensor(recv_data):\ndef send(tensor, dst, group=None, **kwargs):\ndef recv(tensor, src=None, group=None, **kwargs):\ndef isend(tensor, dst, group=None, **kwargs):\ndef irecv(tensor, src=None, group=None, **kwargs):\ndef scatter(data, scatter_list=None, src=0, group=None, **kwargs):\ndef shared_random_seed():\ndef mp_worker(gpu, ngpus_per_node, cfg, fn, pmi_rank, world_size, work_env):\n def __init__(self):\n def init_env(self, config, fn, logger=None):\n def get_env(self):\n def set_env(self, we_env):\n def __str__(self):\nclass Workenv(object):"
},
{
"identifier": "FS",
"path": "scepter/modules/utils/file_system.py",
"snippet": "FS = FileSystem()"
}
] | import os.path as osp
import torch
from collections import OrderedDict, defaultdict
from scepter.modules.solver.base_solver import BaseSolver
from scepter.modules.solver.registry import SOLVERS
from scepter.modules.utils.config import dict_to_yaml
from scepter.modules.utils.data import (transfer_data_to_cpu,
transfer_data_to_cuda)
from scepter.modules.utils.distribute import gather_data, we
from scepter.modules.utils.file_system import FS | 9,402 | # -*- coding: utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
def _get_value(data: dict, key: str):
""" Recursively get value from data by a multi-level key.
Args:
data (dict):
key (str): 'data', 'meta.path', 'a.b.c'
Returns:
Value.
"""
if not isinstance(data, dict):
return None
if key in data:
return data[key]
elif '.' in key:
par_key = key.split('.')[0]
sub_key = '.'.join(key.split('.')[1:])
if par_key in data:
return _get_value(data[par_key], sub_key)
return None
@SOLVERS.register_class()
| # -*- coding: utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
def _get_value(data: dict, key: str):
""" Recursively get value from data by a multi-level key.
Args:
data (dict):
key (str): 'data', 'meta.path', 'a.b.c'
Returns:
Value.
"""
if not isinstance(data, dict):
return None
if key in data:
return data[key]
elif '.' in key:
par_key = key.split('.')[0]
sub_key = '.'.join(key.split('.')[1:])
if par_key in data:
return _get_value(data[par_key], sub_key)
return None
@SOLVERS.register_class() | class TrainValSolver(BaseSolver): | 0 | 2023-12-21 02:01:48+00:00 | 12k |
pigeonai-org/ViDove | src/task.py | [
{
"identifier": "SrtScript",
"path": "src/srt_util/srt.py",
"snippet": "class SrtScript(object):\n def __init__(self, src_lang, tgt_lang, segments, domain=\"General\") -> None:\n self.domain = domain\n self.src_lang = src_lang\n self.tgt_lang = tgt_lang\n self.segments = [SrtSegment(self.src_lang, self.tgt_lang, seg) for seg in segments]\n\n if self.domain != \"General\":\n if os.path.exists(f\"{dict_path}/{self.domain}\") and\\\n os.path.exists(f\"{dict_path}/{self.domain}/{src_lang}.csv\") and os.path.exists(f\"{dict_path}/{self.domain}/{tgt_lang}.csv\" ):\n # TODO: load dictionary\n self.dict = dict_util.term_dict(f\"{dict_path}/{self.domain}\", src_lang, tgt_lang)\n ...\n else:\n logging.error(f\"domain {self.domain} or related dictionary({src_lang} or {tgt_lang}) doesn't exist, fallback to general domain, this will disable correct_with_force_term and spell_check_term\")\n self.domain = \"General\"\n\n\n @classmethod\n def parse_from_srt_file(cls, src_lang, tgt_lang, domain, path = None, srt_str = None):\n if path is not None:\n with open(path, 'r', encoding=\"utf-8\") as f:\n script_lines = [line.rstrip() for line in f.readlines()]\n elif srt_str is not None:\n script_lines = srt_str.splitlines()\n else:\n raise RuntimeError(\"need input Srt Path or Srt String\")\n\n bilingual = False\n if script_lines[2] != '' and script_lines[3] != '':\n bilingual = True\n segments = []\n if bilingual:\n for i in range(0, len(script_lines), 5):\n segments.append(list(script_lines[i:i + 5]))\n else:\n for i in range(0, len(script_lines), 4):\n segments.append(list(script_lines[i:i + 4]))\n return cls(src_lang, tgt_lang, segments, domain)\n\n def merge_segs(self, idx_list) -> SrtSegment:\n \"\"\"\n Merge entire segment list to a single segment\n :param idx_list: List of index to merge\n :return: Merged list\n \"\"\"\n if not idx_list:\n raise NotImplementedError('Empty idx_list')\n seg_result = deepcopy(self.segments[idx_list[0]])\n if len(idx_list) == 1:\n return seg_result\n\n for idx in range(1, len(idx_list)):\n seg_result += self.segments[idx_list[idx]]\n\n return seg_result\n\n def form_whole_sentence(self):\n \"\"\"\n Concatenate or Strip sentences and reconstruct segments list. This is because of\n improper segmentation from openai-whisper.\n :return: None\n \"\"\"\n logging.info(\"Forming whole sentences...\")\n merge_list = [] # a list of indices that should be merged e.g. [[0], [1, 2, 3, 4], [5, 6], [7]]\n sentence = []\n ending_puncs = punctuation_dict[self.src_lang][\"sentence_end\"]\n # Get each entire sentence of distinct segments, fill indices to merge_list\n for i, seg in enumerate(self.segments):\n if seg.source_text[-1] in ending_puncs and len(seg.source_text) > 10 and 'vs.' not in seg.source_text:\n sentence.append(i)\n merge_list.append(sentence)\n sentence = []\n else:\n sentence.append(i)\n\n # Reconstruct segments, each with an entire sentence\n segments = []\n for idx_list in merge_list:\n if len(idx_list) > 1:\n logging.info(\"merging segments: %s\", idx_list)\n segments.append(self.merge_segs(idx_list))\n\n self.segments = segments\n\n def remove_trans_punctuation(self):\n \"\"\"\n Post-process: remove all punc after translation and split\n :return: None\n \"\"\"\n for i, seg in enumerate(self.segments):\n seg.remove_trans_punc()\n logging.info(\"Removed punctuation in translation.\")\n\n def set_translation(self, translate: str, id_range: tuple, model, video_name, video_link=None):\n start_seg_id = id_range[0]\n end_seg_id = id_range[1]\n\n src_text = \"\"\n for i, seg in enumerate(self.segments[start_seg_id - 1:end_seg_id]):\n src_text += seg.source_text\n src_text += '\\n\\n'\n\n def inner_func(target, input_str):\n # handling merge sentences issue.\n response = openai.ChatCompletion.create(\n model=\"gpt-4\",\n messages=[\n {\"role\": \"system\",\n \"content\": \"Your task is to merge or split sentences into a specified number of lines as required. You need to ensure the meaning of the sentences as much as possible, but when necessary, a sentence can be divided into two lines for output\"},\n {\"role\": \"system\", \"content\": \"Note: You only need to output the processed {} sentences. If you need to output a sequence number, please separate it with a colon.\".format(self.tgt_lang)},\n {\"role\": \"user\", \"content\": 'Please split or combine the following sentences into {} sentences:\\n{}'.format(target, input_str)}\n ],\n temperature=0.15\n )\n return response['choices'][0]['message']['content'].strip()\n\n # handling merge sentences issue.\n lines = translate.split('\\n\\n')\n if len(lines) < (end_seg_id - start_seg_id + 1):\n count = 0\n solved = True\n while count < 5 and len(lines) != (end_seg_id - start_seg_id + 1):\n count += 1\n print(\"Solving Unmatched Lines|iteration {}\".format(count))\n logging.error(\"Solving Unmatched Lines|iteration {}\".format(count))\n\n flag = True\n while flag:\n flag = False\n try:\n translate = inner_func(end_seg_id - start_seg_id + 1, translate)\n except Exception as e:\n print(\"An error has occurred during solving unmatched lines:\", e)\n print(\"Retrying...\")\n logging.error(\"An error has occurred during solving unmatched lines:\", e)\n logging.error(\"Retrying...\")\n flag = True\n lines = translate.split('\\n')\n\n if len(lines) < (end_seg_id - start_seg_id + 1):\n solved = False\n print(\"Failed Solving unmatched lines, Manually parse needed\")\n logging.error(\"Failed Solving unmatched lines, Manually parse needed\")\n\n # FIXME: put the error log in our log file\n if not os.path.exists(\"./logs\"):\n os.mkdir(\"./logs\")\n if video_link:\n log_file = \"./logs/log_link.csv\"\n log_exist = os.path.exists(log_file)\n with open(log_file, \"a\") as log:\n if not log_exist:\n log.write(\"range_of_text,iterations_solving,solved,file_length,video_link\" + \"\\n\")\n log.write(str(id_range) + ',' + str(count) + ',' + str(solved) + ',' + str(\n len(self.segments)) + ',' + video_link + \"\\n\")\n else:\n log_file = \"./logs/log_name.csv\"\n log_exist = os.path.exists(log_file)\n with open(log_file, \"a\") as log:\n if not log_exist:\n log.write(\"range_of_text,iterations_solving,solved,file_length,video_name\" + \"\\n\")\n log.write(str(id_range) + ',' + str(count) + ',' + str(solved) + ',' + str(\n len(self.segments)) + ',' + video_name + \"\\n\")\n # print(lines)\n\n for i, seg in enumerate(self.segments[start_seg_id - 1:end_seg_id]):\n # naive way to due with merge translation problem\n # TODO: need a smarter solution\n\n if i < len(lines):\n if \"Note:\" in lines[i]: # to avoid note\n lines.remove(lines[i])\n max_num -= 1\n if i == len(lines) - 1:\n break\n if lines[i][0] in [' ', '\\n']:\n lines[i] = lines[i][1:]\n seg.translation = lines[i]\n\n def split_seg(self, seg, text_threshold, time_threshold):\n # evenly split seg to 2 parts and add new seg into self.segments\n # ignore the initial comma to solve the recursion problem\n src_comma_str = punctuation_dict[self.src_lang][\"comma\"]\n tgt_comma_str = punctuation_dict[self.tgt_lang][\"comma\"]\n\n if len(seg.source_text) > 2:\n if seg.source_text[:2] == src_comma_str:\n seg.source_text = seg.source_text[2:]\n if seg.translation[0] == tgt_comma_str:\n seg.translation = seg.translation[1:]\n\n source_text = seg.source_text\n translation = seg.translation\n\n # split the text based on commas\n src_commas = [m.start() for m in re.finditer(src_comma_str, source_text)]\n trans_commas = [m.start() for m in re.finditer(tgt_comma_str, translation)]\n if len(src_commas) != 0:\n src_split_idx = src_commas[len(src_commas) // 2] if len(src_commas) % 2 == 1 else src_commas[\n len(src_commas) // 2 - 1]\n else:\n # split the text based on spaces\n src_space = [m.start() for m in re.finditer(' ', source_text)]\n if len(src_space) > 0:\n src_split_idx = src_space[len(src_space) // 2] if len(src_space) % 2 == 1 else src_space[\n len(src_space) // 2 - 1]\n else:\n src_split_idx = 0\n\n if len(trans_commas) != 0:\n trans_split_idx = trans_commas[len(trans_commas) // 2] if len(trans_commas) % 2 == 1 else trans_commas[\n len(trans_commas) // 2 - 1]\n else:\n trans_split_idx = len(translation) // 2\n\n # to avoid split English word\n for i in range(trans_split_idx, len(translation)):\n if not translation[i].encode('utf-8').isalpha():\n trans_split_idx = i\n break\n\n # split the time duration based on text length\n time_split_ratio = trans_split_idx / (len(seg.translation) - 1)\n\n src_seg1 = source_text[:src_split_idx]\n src_seg2 = source_text[src_split_idx:]\n trans_seg1 = translation[:trans_split_idx]\n trans_seg2 = translation[trans_split_idx:]\n\n start_seg1 = seg.start\n end_seg1 = start_seg2 = seg.start + (seg.end - seg.start) * time_split_ratio\n end_seg2 = seg.end\n\n seg1_dict = {}\n seg1_dict['text'] = src_seg1\n seg1_dict['start'] = start_seg1\n seg1_dict['end'] = end_seg1\n seg1 = SrtSegment(self.src_lang, self.tgt_lang, seg1_dict)\n seg1.translation = trans_seg1\n\n seg2_dict = {}\n seg2_dict['text'] = src_seg2\n seg2_dict['start'] = start_seg2\n seg2_dict['end'] = end_seg2\n seg2 = SrtSegment(self.src_lang, self.tgt_lang, seg2_dict)\n seg2.translation = trans_seg2\n\n result_list = []\n if len(seg1.translation) > text_threshold and (seg1.end - seg1.start) > time_threshold:\n result_list += self.split_seg(seg1, text_threshold, time_threshold)\n else:\n result_list.append(seg1)\n\n if len(seg2.translation) > text_threshold and (seg2.end - seg2.start) > time_threshold:\n result_list += self.split_seg(seg2, text_threshold, time_threshold)\n else:\n result_list.append(seg2)\n\n return result_list\n\n def check_len_and_split(self, text_threshold=30, time_threshold=1.0):\n # if sentence length >= threshold and sentence duration > time_threshold, split this segments to two\n logging.info(\"performing check_len_and_split\")\n segments = []\n for i, seg in enumerate(self.segments):\n if len(seg.translation) > text_threshold and (seg.end - seg.start) > time_threshold:\n seg_list = self.split_seg(seg, text_threshold, time_threshold)\n logging.info(\"splitting segment {} in to {} parts\".format(i + 1, len(seg_list)))\n segments += seg_list\n else:\n segments.append(seg)\n\n self.segments = segments\n logging.info(\"check_len_and_split finished\")\n\n def check_len_and_split_range(self, range, text_threshold=30, time_threshold=1.0):\n # DEPRECATED\n # if sentence length >= text_threshold, split this segments to two\n start_seg_id = range[0]\n end_seg_id = range[1]\n extra_len = 0\n segments = []\n for i, seg in enumerate(self.segments[start_seg_id - 1:end_seg_id]):\n if len(seg.translation) > text_threshold and (seg.end - seg.start) > time_threshold:\n seg_list = self.split_seg(seg, text_threshold, time_threshold)\n segments += seg_list\n extra_len += len(seg_list) - 1\n else:\n segments.append(seg)\n\n self.segments[start_seg_id - 1:end_seg_id] = segments\n return extra_len\n\n def correct_with_force_term(self):\n ## force term correction\n logging.info(\"performing force term correction\")\n\n # check domain\n if self.domain == \"General\":\n logging.info(\"General domain could not perform correct_with_force_term. skip this step.\")\n pass\n else:\n keywords = list(self.dict.keys())\n keywords.sort(key=lambda x: len(x), reverse=True)\n\n for word in keywords:\n for i, seg in enumerate(self.segments):\n if word in seg.source_text.lower():\n seg.source_text = re.sub(fr\"({word}es|{word}s?)\\b\", \"{}\".format(self.dict.get(word)),\n seg.source_text, flags=re.IGNORECASE)\n logging.info(\n \"replace term: \" + word + \" --> \" + self.dict.get(word) + \" in time stamp {}\".format(\n i + 1))\n logging.info(\"source text becomes: \" + seg.source_text)\n\n\n def fetchfunc(self, word, threshold):\n import enchant\n result = word\n distance = 0\n threshold = threshold * len(word)\n temp = \"\"\n for matched in self.dict:\n if (\" \" in matched and \" \" in word) or (\" \" not in matched and \" \" not in word):\n if enchant.utils.levenshtein(word, matched) < enchant.utils.levenshtein(word, temp):\n temp = matched\n if enchant.utils.levenshtein(word, temp) < threshold:\n distance = enchant.utils.levenshtein(word, temp)\n result = temp\n return distance, result\n\n def extract_words(self, sentence, n):\n # this function split the sentence to chunks by n of words\n # e.g. sentence: \"this, is a sentence\", n = 2\n # result: [\"this,\", \"is\", \"a\", [\"sentence\"], [\"this,\", \"is\"], \"is a\", \"a sentence\"]\n words = sentence.split()\n res = []\n for j in range(n, 0, -1):\n res += [words[i:i + j] for i in range(len(words) - j + 1)]\n return res\n\n def spell_check_term(self):\n logging.info(\"performing spell check\")\n\n # check domain\n if self.domain == \"General\":\n logging.info(\"General domain could not perform spell_check_term. skip this step.\")\n pass\n\n import enchant\n dict = enchant.Dict('en_US')\n\n for seg in tqdm(self.segments):\n ready_words = self.extract_words(seg.source_text, 2)\n for i in range(len(ready_words)):\n word_list = ready_words[i]\n word, real_word, pos = self.get_real_word(word_list)\n if not dict.check(real_word) and (real_word not in self.dict.keys()):\n distance, correct_term = self.fetchfunc(real_word, 0.3)\n if distance != 0:\n seg.source_text = re.sub(word[:pos], correct_term, seg.source_text, flags=re.IGNORECASE)\n logging.info(\n \"replace: \" + word[:pos] + \" to \" + correct_term + \"\\t distance = \" + str(distance))\n\n def get_real_word(self, word_list: list):\n word = \"\"\n for w in word_list:\n word += f\"{w} \"\n word = word[:-1] # \"this, is\"\n if word[-2:] == \".\\n\":\n real_word = word[:-2].lower()\n n = -2\n elif word[-1:] in [\".\", \"\\n\", \",\", \"!\", \"?\"]:\n real_word = word[:-1].lower()\n n = -1\n else:\n real_word = word.lower()\n n = 0\n return word, real_word, len(word) + n\n\n ## WRITE AND READ FUNCTIONS ##\n\n def get_source_only(self):\n # return a string with pure source text\n result = \"\"\n for i, seg in enumerate(self.segments):\n result += f'{seg.source_text}\\n\\n\\n' # f'SENTENCE {i+1}: {seg.source_text}\\n\\n\\n'\n\n return result\n\n def reform_src_str(self):\n result = \"\"\n for i, seg in enumerate(self.segments):\n result += f'{i + 1}\\n'\n result += str(seg)\n return result\n\n def reform_trans_str(self):\n result = \"\"\n for i, seg in enumerate(self.segments):\n result += f'{i + 1}\\n'\n result += seg.get_trans_str()\n return result\n\n def form_bilingual_str(self):\n result = \"\"\n for i, seg in enumerate(self.segments):\n result += f'{i + 1}\\n'\n result += seg.get_bilingual_str()\n return result\n\n def write_srt_file_src(self, path: str):\n # write srt file to path\n with open(path, \"w\", encoding='utf-8') as f:\n f.write(self.reform_src_str())\n pass\n\n def write_srt_file_translate(self, path: str):\n logging.info(\"writing to \" + path)\n with open(path, \"w\", encoding='utf-8') as f:\n f.write(self.reform_trans_str())\n pass\n\n def write_srt_file_bilingual(self, path: str):\n logging.info(\"writing to \" + path)\n with open(path, \"w\", encoding='utf-8') as f:\n f.write(self.form_bilingual_str())\n pass\n\n def realtime_write_srt(self, path, range, length, idx):\n # DEPRECATED\n start_seg_id = range[0]\n end_seg_id = range[1]\n with open(path, \"a\", encoding='utf-8') as f:\n # for i, seg in enumerate(self.segments[start_seg_id-1:end_seg_id+length]):\n # f.write(f'{i+idx}\\n')\n # f.write(seg.get_trans_str())\n for i, seg in enumerate(self.segments):\n if i < range[0] - 1: continue\n if i >= range[1] + length: break\n f.write(f'{i + idx}\\n')\n f.write(seg.get_trans_str())\n pass\n\n def realtime_bilingual_write_srt(self, path, range, length, idx):\n # DEPRECATED\n start_seg_id = range[0]\n end_seg_id = range[1]\n with open(path, \"a\", encoding='utf-8') as f:\n for i, seg in enumerate(self.segments):\n if i < range[0] - 1: continue\n if i >= range[1] + length: break\n f.write(f'{i + idx}\\n')\n f.write(seg.get_bilingual_str())\n pass"
},
{
"identifier": "srt2ass",
"path": "src/srt_util/srt2ass.py",
"snippet": "def srt2ass(input_file, sub_style, is_split, split_method):\n if '.ass' in input_file:\n return input_file\n\n if not os.path.isfile(input_file):\n print(input_file + ' not exist')\n return\n\n src = fileopen(input_file)\n srt_content = src[0]\n # encoding = src[1] # Will not encode so do not need to pass codec para\n src = ''\n utf8bom = ''\n\n if u'\\ufeff' in srt_content:\n srt_content = srt_content.replace(u'\\ufeff', '')\n utf8bom = u'\\ufeff'\n \n srt_content = srt_content.replace(\"\\r\", \"\")\n lines = [x.strip() for x in srt_content.split(\"\\n\") if x.strip()]\n subLines = ''\n dlgLines = '' # dialogue line\n lineCount = 0\n output_file = '.'.join(input_file.split('.')[:-1])\n output_file += '.ass'\n\n for ln in range(len(lines)):\n line = lines[ln]\n if line.isdigit() and re.match('-?\\d\\d:\\d\\d:\\d\\d', lines[(ln+1)]):\n if dlgLines:\n subLines += dlgLines + \"\\n\"\n dlgLines = ''\n lineCount = 0\n continue\n else:\n if re.match('-?\\d\\d:\\d\\d:\\d\\d', line):\n line = line.replace('-0', '0')\n if sub_style =='default':\n dlgLines += 'Dialogue: 0,' + line + ',default,,0,0,0,,'\n elif sub_style =='ikedaCN':\n dlgLines += 'Dialogue: 0,' + line + ',池田字幕1080p,,0,0,0,,'\n elif sub_style == 'sugawaraCN':\n dlgLines += 'Dialogue: 0,' + line + ',中字 1080P,,0,0,0,,'\n elif sub_style == 'kaedeCN':\n dlgLines += 'Dialogue: 0,' + line + ',den SR红色,,0,0,0,,'\n elif sub_style == 'taniguchiCN':\n dlgLines += 'Dialogue: 0,' + line + ',正文_1080P,,0,0,0,,'\n elif sub_style == 'asukaCN':\n dlgLines += 'Dialogue: 0,' + line + ',DEFAULT1,,0,0,0,,'\n elif sub_style == 'starPigeon':\n dlgLines += 'Dialogue: 0,' + line + ',Starcraft 2 下(一般字幕),,0,0,0,,'\n else:\n if lineCount < 2:\n dlg_string = line\n if is_split == \"Yes\" and split_method == 'Modest':\n # do not split if space proceed and followed by non-ASC-II characters\n # do not split if space followed by word that less than 5 characters\n split_string = re.sub(r'(?<=[^\\x00-\\x7F])\\s+(?=[^\\x00-\\x7F])(?=\\w{5})', r'|', dlg_string)\n # print(split_string)\n if len(split_string.split('|')) > 1:\n dlgLines += (split_string.replace('|', \"(adjust_required)\\n\" + dlgLines)) + \"(adjust_required)\"\n else:\n dlgLines += line\n elif is_split == \"Yes\" and split_method == 'Aggressive':\n # do not split if space proceed and followed by non-ASC-II characters\n # split at all the rest spaces\n split_string = re.sub(r'(?<=[^\\x00-\\x7F])\\s+(?=[^\\x00-\\x7F])', r'|', dlg_string)\n if len(split_string.split('|')) > 1:\n dlgLines += (split_string.replace('|',\"(adjust_required)\\n\" + dlgLines)) + \"(adjust_required)\"\n else:\n dlgLines += line\n else:\n dlgLines += line\n else:\n dlgLines += \"\\n\" + line\n lineCount += 1\n ln += 1\n\n\n subLines += dlgLines + \"\\n\"\n\n subLines = re.sub(r'\\d(\\d:\\d{2}:\\d{2}),(\\d{2})\\d', '\\\\1.\\\\2', subLines)\n subLines = re.sub(r'\\s+-->\\s+', ',', subLines)\n # replace style\n # subLines = re.sub(r'<([ubi])>', \"{\\\\\\\\\\g<1>1}\", subLines)\n # subLines = re.sub(r'</([ubi])>', \"{\\\\\\\\\\g<1>0}\", subLines)\n # subLines = re.sub(r'<font\\s+color=\"?#(\\w{2})(\\w{2})(\\w{2})\"?>', \"{\\\\\\\\c&H\\\\3\\\\2\\\\1&}\", subLines)\n # subLines = re.sub(r'</font>', \"\", subLines)\n\n if sub_style == 'default':\n head_name = 'head_str_default'\n elif sub_style == 'ikedaCN':\n head_name = 'head_str_ikeda'\n elif sub_style == 'sugawaraCN':\n head_name = 'head_str_sugawara'\n elif sub_style == 'kaedeCN':\n head_name = 'head_str_kaede'\n elif sub_style == \"taniguchiCN\":\n head_name = 'head_str_taniguchi'\n elif sub_style == 'asukaCN':\n head_name = 'head_str_asuka'\n elif sub_style == 'starPigeon':\n head_name = 'head_str_pigeon'\n\n head_str = STYLE_DICT.get(head_name)\n output_str = utf8bom + head_str + '\\n' + subLines\n # encode again for head string\n output_str = output_str.encode('utf8')\n\n with open(output_file, 'wb') as output:\n output.write(output_str)\n\n output_file = output_file.replace('\\\\', '\\\\\\\\')\n output_file = output_file.replace('/', '//')\n return output_file"
},
{
"identifier": "get_translation",
"path": "src/translators/translation.py",
"snippet": "def get_translation(srt, model, video_name, prompt = None, chunk_size = 1000):\n # print(srt.get_source_only())\n script_arr, range_arr = split_script(srt.get_source_only(),chunk_size)\n translate(srt, script_arr, range_arr, model, video_name, task=prompt)\n pass"
},
{
"identifier": "prompt_selector",
"path": "src/translators/translation.py",
"snippet": "def prompt_selector(src_lang, tgt_lang, domain):\n language_map = {\n \"EN\": \"English\",\n \"ZH\": \"Chinese\",\n \"ES\": \"Spanish\",\n \"FR\": \"France\",\n \"DE\": \"Germany\",\n \"RU\": \"Russian\",\n \"JA\": \"Japanese\",\n \"AR\": \"Arabic\",\n }\n try:\n src_lang = language_map[src_lang]\n tgt_lang = language_map[tgt_lang]\n except:\n print(\"Unsupported language, is your abbreviation correct?\")\n logging.info(\"Unsupported language detected\")\n prompt = f\"\"\"\n you are a translation assistant, your job is to translate a video in domain of {domain} from {src_lang} to {tgt_lang}, \n you will be provided with a segement in {src_lang} parsed by line, where your translation text should keep the original \n meaning and the number of lines.\n \"\"\"\n return prompt"
}
] | import threading
import time
import openai
import logging
import subprocess
import torch
import stable_whisper
import shutil
from pytube import YouTube
from os import getenv, getcwd
from pathlib import Path
from enum import Enum, auto
from src.srt_util.srt import SrtScript
from src.srt_util.srt2ass import srt2ass
from time import time, strftime, gmtime, sleep
from src.translators.translation import get_translation, prompt_selector
from datetime import datetime | 8,492 | self.result = None
self.s_t = None
self.t_e = None
self.t_s = time()
# logging setting
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt, handlers=[
logging.FileHandler(
"{}/{}_{}.log".format(task_local_dir, f"task_{task_id}", datetime.now().strftime("%m%d%Y_%H%M%S")),
'w', encoding='utf-8')])
print(f"Task ID: {self.task_id}")
logging.info(f"Task ID: {self.task_id}")
logging.info(f"{self.source_lang} -> {self.target_lang} task in {self.field}")
logging.info(f"Translation Model: {self.translation_model}")
logging.info(f"subtitle_type: {self.output_type['subtitle']}")
logging.info(f"video_ouput: {self.output_type['video']}")
logging.info(f"bilingual_ouput: {self.output_type['bilingual']}")
logging.info("Pre-process setting:")
for key in self.pre_setting:
logging.info(f"{key}: {self.pre_setting[key]}")
logging.info("Post-process setting:")
for key in self.post_setting:
logging.info(f"{key}: {self.post_setting[key]}")
@staticmethod
def fromYoutubeLink(youtube_url, task_id, task_dir, task_cfg):
"""
Creates a YoutubeTask instance from a YouTube URL.
"""
return YoutubeTask(task_id, task_dir, task_cfg, youtube_url)
@staticmethod
def fromAudioFile(audio_path, task_id, task_dir, task_cfg):
"""
Creates an AudioTask instance from an audio file path.
"""
return AudioTask(task_id, task_dir, task_cfg, audio_path)
@staticmethod
def fromVideoFile(video_path, task_id, task_dir, task_cfg):
"""
Creates a VideoTask instance from a video file path.
"""
return VideoTask(task_id, task_dir, task_cfg, video_path)
@staticmethod
def fromSRTFile(srt_path, task_id, task_dir, task_cfg):
"""
Creates a SRTTask instance from a srt file path.
"""
return SRTTask(task_id, task_dir, task_cfg, srt_path)
# Module 1 ASR: audio --> SRT_script
def get_srt_class(self):
"""
Handles the ASR module to convert audio to SRT script format.
"""
# Instead of using the script_en variable directly, we'll use script_input
# TODO: setup ASR module like translator
self.status = TaskStatus.INITIALIZING_ASR
if self.SRT_Script != None:
logging.info("SRT input mode, skip ASR Module")
return
method = self.ASR_setting["whisper_config"]["method"]
whisper_model = self.ASR_setting["whisper_config"]["whisper_model"]
src_srt_path = self.task_local_dir.joinpath(f"task_{self.task_id}_{self.source_lang}.srt")
if not Path.exists(src_srt_path):
# extract script from audio
logging.info("extract script from audio")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info(f"Module 1: ASR inference method: {method}")
init_prompt = "Hello, welcome to my lecture." if self.source_lang == "EN" else ""
if method == "api":
with open(self.audio_path, 'rb') as audio_file:
transcript = openai.Audio.transcribe(model="whisper-1", file=audio_file, response_format="srt", language=self.source_lang.lower(), prompt=init_prompt)
elif method == "stable":
model = stable_whisper.load_model(whisper_model, device)
transcript = model.transcribe(str(self.audio_path), regroup=False,
initial_prompt=init_prompt)
(
transcript
.split_by_punctuation(['.', '。', '?'])
.merge_by_gap(.15, max_words=3)
.merge_by_punctuation([' '])
.split_by_punctuation(['.', '。', '?'])
)
transcript = transcript.to_dict()
transcript = transcript['segments']
# after get the transcript, release the gpu resource
torch.cuda.empty_cache()
else:
raise RuntimeError(f"unavaliable ASR inference method: {method}")
if isinstance(transcript, str):
self.SRT_Script = SrtScript.parse_from_srt_file(self.source_lang, self.target_lang, domain = self.field, srt_str = transcript.rstrip())
else:
self.SRT_Script = SrtScript(self.source_lang, self.target_lang, transcript, self.field)
# save the srt script to local
self.SRT_Script.write_srt_file_src(src_srt_path)
# Module 2: SRT preprocess: perform preprocess steps
def preprocess(self):
"""
Performs preprocessing steps on the SRT script.
"""
self.status = TaskStatus.PRE_PROCESSING
logging.info("--------------------Start Preprocessing SRT class--------------------")
if self.pre_setting["sentence_form"]:
self.SRT_Script.form_whole_sentence()
if self.pre_setting["spell_check"]:
self.SRT_Script.spell_check_term()
if self.pre_setting["term_correct"]:
self.SRT_Script.correct_with_force_term()
processed_srt_path_src = str(Path(self.task_local_dir) / f'{self.task_id}_processed.srt')
self.SRT_Script.write_srt_file_src(processed_srt_path_src)
if self.output_type["subtitle"] == "ass":
logging.info("write English .srt file to .ass")
|
class TaskStatus(str, Enum):
"""
An enumeration class representing the different statuses a task can have in the translation pipeline.
TODO: add translation progress indicator (%).
"""
CREATED = 'CREATED'
INITIALIZING_ASR = 'INITIALIZING_ASR'
PRE_PROCESSING = 'PRE_PROCESSING'
TRANSLATING = 'TRANSLATING'
POST_PROCESSING = 'POST_PROCESSING'
OUTPUT_MODULE = 'OUTPUT_MODULE'
class Task:
"""
A class representing a task in the translation pipeline. It includes methods for handling different stages of the task.
If one want to add a new entry type (e.g. add support for different video formats),
one should extend this class and override the `run` method.
"""
@property
def status(self):
with self.__status_lock:
return self.__status
@status.setter
def status(self, new_status):
"""
Sets the new status of the task, ensuring thread safety with a lock.
"""
with self.__status_lock:
self.__status = new_status
def __init__(self, task_id, task_local_dir, task_cfg):
"""
Constructor for initializing a task with its ID, local directory, and configuration settings.
"""
self.__status_lock = threading.Lock()
self.__status = TaskStatus.CREATED
self.gpu_status = 0
openai.api_key = getenv("OPENAI_API_KEY")
self.task_id = task_id
self.task_local_dir = task_local_dir
self.ASR_setting = task_cfg["ASR"]
self.translation_setting = task_cfg["translation"]
self.translation_model = self.translation_setting["model"]
self.output_type = task_cfg["output_type"]
self.target_lang = task_cfg["target_lang"]
self.source_lang = task_cfg["source_lang"]
self.field = task_cfg["field"]
self.pre_setting = task_cfg["pre_process"]
self.post_setting = task_cfg["post_process"]
self.audio_path = None
self.SRT_Script = None
self.result = None
self.s_t = None
self.t_e = None
self.t_s = time()
# logging setting
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt, handlers=[
logging.FileHandler(
"{}/{}_{}.log".format(task_local_dir, f"task_{task_id}", datetime.now().strftime("%m%d%Y_%H%M%S")),
'w', encoding='utf-8')])
print(f"Task ID: {self.task_id}")
logging.info(f"Task ID: {self.task_id}")
logging.info(f"{self.source_lang} -> {self.target_lang} task in {self.field}")
logging.info(f"Translation Model: {self.translation_model}")
logging.info(f"subtitle_type: {self.output_type['subtitle']}")
logging.info(f"video_ouput: {self.output_type['video']}")
logging.info(f"bilingual_ouput: {self.output_type['bilingual']}")
logging.info("Pre-process setting:")
for key in self.pre_setting:
logging.info(f"{key}: {self.pre_setting[key]}")
logging.info("Post-process setting:")
for key in self.post_setting:
logging.info(f"{key}: {self.post_setting[key]}")
@staticmethod
def fromYoutubeLink(youtube_url, task_id, task_dir, task_cfg):
"""
Creates a YoutubeTask instance from a YouTube URL.
"""
return YoutubeTask(task_id, task_dir, task_cfg, youtube_url)
@staticmethod
def fromAudioFile(audio_path, task_id, task_dir, task_cfg):
"""
Creates an AudioTask instance from an audio file path.
"""
return AudioTask(task_id, task_dir, task_cfg, audio_path)
@staticmethod
def fromVideoFile(video_path, task_id, task_dir, task_cfg):
"""
Creates a VideoTask instance from a video file path.
"""
return VideoTask(task_id, task_dir, task_cfg, video_path)
@staticmethod
def fromSRTFile(srt_path, task_id, task_dir, task_cfg):
"""
Creates a SRTTask instance from a srt file path.
"""
return SRTTask(task_id, task_dir, task_cfg, srt_path)
# Module 1 ASR: audio --> SRT_script
def get_srt_class(self):
"""
Handles the ASR module to convert audio to SRT script format.
"""
# Instead of using the script_en variable directly, we'll use script_input
# TODO: setup ASR module like translator
self.status = TaskStatus.INITIALIZING_ASR
if self.SRT_Script != None:
logging.info("SRT input mode, skip ASR Module")
return
method = self.ASR_setting["whisper_config"]["method"]
whisper_model = self.ASR_setting["whisper_config"]["whisper_model"]
src_srt_path = self.task_local_dir.joinpath(f"task_{self.task_id}_{self.source_lang}.srt")
if not Path.exists(src_srt_path):
# extract script from audio
logging.info("extract script from audio")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info(f"Module 1: ASR inference method: {method}")
init_prompt = "Hello, welcome to my lecture." if self.source_lang == "EN" else ""
if method == "api":
with open(self.audio_path, 'rb') as audio_file:
transcript = openai.Audio.transcribe(model="whisper-1", file=audio_file, response_format="srt", language=self.source_lang.lower(), prompt=init_prompt)
elif method == "stable":
model = stable_whisper.load_model(whisper_model, device)
transcript = model.transcribe(str(self.audio_path), regroup=False,
initial_prompt=init_prompt)
(
transcript
.split_by_punctuation(['.', '。', '?'])
.merge_by_gap(.15, max_words=3)
.merge_by_punctuation([' '])
.split_by_punctuation(['.', '。', '?'])
)
transcript = transcript.to_dict()
transcript = transcript['segments']
# after get the transcript, release the gpu resource
torch.cuda.empty_cache()
else:
raise RuntimeError(f"unavaliable ASR inference method: {method}")
if isinstance(transcript, str):
self.SRT_Script = SrtScript.parse_from_srt_file(self.source_lang, self.target_lang, domain = self.field, srt_str = transcript.rstrip())
else:
self.SRT_Script = SrtScript(self.source_lang, self.target_lang, transcript, self.field)
# save the srt script to local
self.SRT_Script.write_srt_file_src(src_srt_path)
# Module 2: SRT preprocess: perform preprocess steps
def preprocess(self):
"""
Performs preprocessing steps on the SRT script.
"""
self.status = TaskStatus.PRE_PROCESSING
logging.info("--------------------Start Preprocessing SRT class--------------------")
if self.pre_setting["sentence_form"]:
self.SRT_Script.form_whole_sentence()
if self.pre_setting["spell_check"]:
self.SRT_Script.spell_check_term()
if self.pre_setting["term_correct"]:
self.SRT_Script.correct_with_force_term()
processed_srt_path_src = str(Path(self.task_local_dir) / f'{self.task_id}_processed.srt')
self.SRT_Script.write_srt_file_src(processed_srt_path_src)
if self.output_type["subtitle"] == "ass":
logging.info("write English .srt file to .ass") | assSub_src = srt2ass(processed_srt_path_src, "default", "No", "Modest") | 1 | 2023-12-20 01:46:47+00:00 | 12k |
YyzHarry/shortcut-ood-fairness | train.py | [
{
"identifier": "datasets",
"path": "dataset/datasets.py",
"snippet": "DATASETS = [\n 'MIMIC',\n 'CheXpert',\n 'NIH',\n 'PadChest',\n 'VinDr',\n 'SIIM',\n 'ISIC',\n 'ODIR'\n]\nCXR_DATASETS = [\n 'MIMIC',\n 'CheXpert',\n 'NIH',\n 'PadChest',\n 'VinDr',\n 'SIIM'\n]\nATTRS = ['sex', 'ethnicity', 'age', 'sex_ethnicity']\nTASKS = ['No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',\n 'Cataract', 'Retinopathy']\n N_STEPS = 5001 # Default, subclasses may override\n CHECKPOINT_FREQ = 100 # Default, subclasses may override\n N_WORKERS = 8 # Default, subclasses may override\n INPUT_SHAPE = None # Subclasses should override\n AVAILABLE_ATTRS = None # Subclasses should override\n SPLITS = { # Default, subclasses may override\n 'tr': 0,\n 'va': 1,\n 'te': 2\n }\n EVAL_SPLITS = ['te'] # Default, subclasses may override\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age', 'ethnicity', 'sex_ethnicity']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age', 'ethnicity', 'sex_ethnicity']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'\n ]\n SPLITS = {\n 'te': 2\n }\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'Pneumothorax'\n ]\n SPLITS = {\n 'te': 2\n }\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding'\n ]\n N_STEPS = 30001\n CHECKPOINT_FREQ = 1000\n N_WORKERS = 16\n INPUT_SHAPE = (3, 224, 224,)\n AVAILABLE_ATTRS = ['sex', 'age']\n TASKS = [\n 'No Finding', 'Cataract', 'Retinopathy'\n ]\ndef get_dataset_class(dataset_name):\ndef num_environments(dataset_name):\n def __init__(self, root, split, metadata, transform, group_def='group', subsample_type=None, duplicates=None, subset_query=None):\n def _count_groups(self):\n def subsample(self, subsample_type):\n def duplicate(self, duplicates):\n def __getitem__(self, index):\n def __len__(self):\n def __init__(self, metadata, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def transform(self, x):\n def __init__(self, dss):\n def __getitem__(self, idx):\n def __len__(self):\n def __init__(self, ds, idxs):\n def __getitem__(self, idx):\n def __len__(self):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\n def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):\nclass SubpopDataset:\nclass BaseImageDataset(SubpopDataset):\nclass ConcatImageDataset(SubpopDataset):\nclass SubsetImageDataset(SubpopDataset):\nclass MIMIC(BaseImageDataset):\nclass CheXpert(BaseImageDataset):\nclass NIH(BaseImageDataset):\nclass PadChest(BaseImageDataset):\nclass VinDr(BaseImageDataset):\nclass SIIM(BaseImageDataset):\nclass ISIC(BaseImageDataset):\nclass ODIR(BaseImageDataset):"
},
{
"identifier": "algorithms",
"path": "learning/algorithms.py",
"snippet": "ALGORITHMS = [\n 'ERM',\n 'StratifiedERM',\n # subgroup methods\n 'GroupDRO',\n 'IRM',\n 'CVaRDRO',\n 'JTT',\n 'LISA',\n 'DFR',\n # data augmentation\n 'Mixup',\n # domain generalization methods\n 'MMD',\n 'CORAL',\n 'DANN',\n 'CDANN',\n # imbalanced learning methods\n 'ReSample',\n 'ReWeight',\n 'SqrtReWeight',\n 'CBLoss',\n 'Focal',\n 'LDAM',\n 'BSoftmax',\n 'CRT',\n 'ReWeightCRT',\n 'VanillaCRT',\n # flat minima optimizer\n 'MA',\n 'SAM',\n # attribute balancing\n 'GroupDROAttr',\n 'ReSampleAttr',\n 'ReWeightAttr',\n]\n D = self.my_cdist(x, y)\n K = torch.zeros_like(D)\n W = size[2]\n H = size[3]\ndef get_algorithm_class(algorithm_name):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _init_model(self):\n def _compute_loss(self, i, x, y, a, step):\n def update(self, minibatch, step):\n def return_feats(self, x):\n def predict(self, x):\n def return_groups(self, y, a):\n def return_attributes(all_a):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _init_model(self):\n def _compute_loss(self, i, x, y, a, step):\n def update(self, minibatch, step):\n def return_feats(self, x):\n def predict(self, x):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None, group_def='group'):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def focal_loss(input_values, gamma):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _irm_penalty(logits, y):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams,\n grp_sizes=None, attr_sizes=None, gaussian=False):\n def my_cdist(x1, x2):\n def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100, 1000]):\n def mmd(self, x, y):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams,\n grp_sizes=None, attr_sizes=None, conditional=False, class_balance=False):\n def update(self, minibatch, step):\n def return_feats(self, x):\n def predict(self, x):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def update(self, minibatch, step):\n def return_feats(self, x):\n def predict(self, x):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step, stage1_model):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _to_ohe(self, y):\n def _lisa_mixup_data(self, s, a, x, y, alpha):\n def _rand_bbox(size, lam):\n def _mix_up(alpha, x1, x2, y1, y2):\n def _cut_mix_up(self, alpha, x1, x2, y1, y2):\n def _compute_loss(self, i, x, y, a, step):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def _compute_loss(self, i, x, y, a, step):\n def update(self, minibatch, step):\n def predict(self, x):\n def update_ma(self):\n def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):\n def update(self, minibatch, step):\n def norm(tensor_list, p=2):\nclass Algorithm(torch.nn.Module):\nclass ERM(Algorithm):\nclass GroupDRO(ERM):\nclass GroupDROAttr(ERM):\nclass StratifiedERM(ERM):\nclass ReSample(ERM):\nclass ReSampleAttr(ERM):\nclass ReWeightBase(ERM):\nclass ReWeight(ReWeightBase):\nclass ReWeightAttr(ReWeightBase):\nclass SqrtReWeight(ReWeight):\nclass CBLoss(ReWeight):\nclass Focal(ERM):\nclass LDAM(ERM):\nclass BSoftmax(ERM):\nclass CRT(ERM):\nclass ReWeightCRT(ReWeight):\nclass VanillaCRT(ERM):\nclass DFR(ERM):\nclass IRM(ERM):\nclass Mixup(ERM):\nclass AbstractMMD(ERM):\nclass MMD(AbstractMMD):\nclass CORAL(AbstractMMD):\nclass AbstractDANN(Algorithm):\nclass DANN(AbstractDANN):\nclass CDANN(AbstractDANN):\nclass CVaRDRO(ERM):\nclass AbstractTwoStage(Algorithm):\nclass JTT_Stage2(ERM): \nclass JTT(AbstractTwoStage):\nclass LISA(ERM):\nclass MA(ERM):\nclass SAM(ERM):"
},
{
"identifier": "early_stopping",
"path": "learning/early_stopping.py",
"snippet": "class EarlyStopping:\n def __init__(self, patience=5, lower_is_better=True):\n def __call__(self, metric, step, state_dict, path):\ndef save_model(state_dict, path):"
},
{
"identifier": "swad_utils",
"path": "learning/swad_utils.py",
"snippet": "class AveragedModel(Module):\nclass SWADBase:\nclass LossValley(SWADBase):\n def __init__(self, model, device=None, avg_fn=None, rm_optimizer=False):\n def avg_fn(averaged_model_parameter, model_parameter, num_averaged):\n def forward(self, *args, **kwargs):\n def predict(self, *args, **kwargs):\n def network(self):\n def update_parameters(self, model, step=None, start_step=None, end_step=None):\n def clone(self):\n def update_and_evaluate(self, segment_swa, val_loss):\n def get_final_model(self):\n def __init__(self, n_converge, n_tolerance, tolerance_ratio, **kwargs):\n def get_smooth_loss(self, idx):\n def is_converged(self):\n def update_and_evaluate(self, segment_swa, val_loss):\n def get_final_model(self):\n Q = list(self.smooth_Q)[: converge_idx + 1]"
},
{
"identifier": "misc",
"path": "utils/misc.py",
"snippet": "def pickle_save(filename, obj):\ndef pickle_load(filename):\ndef mac_pickle_load(file_path):\ndef mac_pickle_dump(filename, obj):\ndef load_json(json_path):\ndef save_json(json_path, data):\n def default(self, obj):\n def format(cls, text, color='white'):\ndef log(text, color='white', style='normal', with_time=True, handle=None):\ndef print_yellow(text, with_time=True):\ndef print_cyan(text, with_time=True):\ndef print_green(text, with_time=True):\ndef prepare_folders(args):\ndef l2_between_dicts(dict_1, dict_2):\n def __init__(self, ema, oneminusema_correction=True):\n def update(self, dict_data):\ndef count_samples_per_class(targets, num_labels):\ndef make_balanced_weights_per_sample(targets):\ndef pdb():\ndef seed_hash(*args):\ndef print_separator():\ndef print_row(row, colwidth=10, latex=False):\n def format_val(x):\ndef safe_load(parsed):\n def __init__(self, underlying_dataset, keys):\n def __getitem__(self, key):\n def __len__(self):\ndef split_dataset(dataset, n, seed=0):\ndef random_pairs_of_minibatches(minibatches):\ndef mixup_data(x, y, alpha=1., device=\"cpu\"):\ndef accuracy(network, loader, device):\ndef adjust_learning_rate(optimizer, lr, step, total_steps, schedule, cos=False):\n def __init__(self, fname, mode=\"a\"):\n def write(self, message):\n def flush(self):\n def __init__(self, *args, **kwargs):\n def _prototype(self, other, op):\n def __add__(self, other):\n def __rmul__(self, other):\n def __neg__(self):\n def __rsub__(self, other):\n def __truediv__(self, other):\ndef make_grid(tensor, nrow=8, padding=2, normalize=False, ranges=None, scale_each=False, pad_value=0):\n def norm_ip(img, min, max):\n def norm_range(t, ranges):\ndef save_image(tensor, filename, nrow=8, padding=2, normalize=False, ranges=None, scale_each=False, pad_value=0):\nclass NumpyEncoder(json.JSONEncoder):\nclass TextFormat:\nclass MovingAverage:\nclass _SplitDataset(torch.utils.data.Dataset):\nclass Tee:\nclass ParamDict(OrderedDict):"
},
{
"identifier": "eval_helper",
"path": "utils/eval_helper.py",
"snippet": "def predict_on_set(algorithm, loader, device):\ndef eval_metrics(algorithm, loader, device, thress=[0.5], thress_suffix=['_50'], add_arrays=False):\ndef binary_metrics(targets, preds, label_set=[0, 1], suffix='', return_arrays=False):\ndef prob_metrics(targets, preds, label_set, return_arrays=False):\n CM = confusion_matrix(targets, preds, labels=label_set)\n CM = confusion_matrix(targets, preds, labels=label_set)"
},
{
"identifier": "InfiniteDataLoader",
"path": "dataset/fast_dataloader.py",
"snippet": "class InfiniteDataLoader:\n\n def __init__(self, dataset, weights, batch_size, num_workers):\n super().__init__()\n\n if weights is not None:\n sampler = torch.utils.data.WeightedRandomSampler(\n weights, replacement=True, num_samples=batch_size)\n else:\n sampler = torch.utils.data.RandomSampler(dataset, replacement=True)\n\n batch_sampler = torch.utils.data.BatchSampler(\n sampler,\n batch_size=batch_size,\n drop_last=True)\n\n self._infinite_iterator = iter(torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=_InfiniteSampler(batch_sampler)\n ))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n raise ValueError"
}
] | import argparse
import collections
import json
import os
import random
import sys
import time
import numpy as np
import pandas as pd
import PIL
import torch
import torchvision
import torch.utils.data
import pickle
import hparams_registry
import wandb
import hashlib
from tensorboard_logger import Logger
from pathlib import Path
from torch.utils.data import DataLoader
from dataset import datasets
from learning import algorithms, early_stopping, swad_utils
from utils import misc, eval_helper
from dataset.fast_dataloader import InfiniteDataLoader
from collections import OrderedDict | 7,521 | for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset, misc.seed_hash(args.hparams_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
hparams.update({
'image_arch': args.image_arch,
'data_augmentation': args.aug,
'task': args.task,
'attr': args.attr,
'group_def': args.group_def
})
if args.log_online:
wandb.init(project='subpop_fairness', config={**vars(args), **hparams},
name=f"train_{args.dataset}_{args.task}_{args.algorithm}_{args.attr}_"
f"{hashlib.md5(str({**vars(args), **hparams}).encode('utf-8')).hexdigest()[:8]}_"
f"{os.environ['SLURM_JOB_ID'] if 'SLURM_JOB_ID' in os.environ else ''}")
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
with open(os.path.join(output_dir, 'args.json'), 'w') as f:
json.dump(vars(args), f, indent=4)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["TOKENIZERS_PARALLELISM"] = "false"
torch.multiprocessing.set_sharing_strategy('file_system')
device = "cuda" if torch.cuda.is_available() else "cpu"
def make_combined_dataset(names, sset, group_def, override_attr=None):
ind_datasets = []
for ds in names:
ind_datasets.append(vars(datasets)[ds](args.data_dir, sset, hparams, group_def=group_def, override_attr=override_attr))
return datasets.ConcatImageDataset(ind_datasets)
if len(args.dataset) == 1:
if args.dataset[0] in vars(datasets):
train_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'tr', hparams, group_def=args.group_def)
val_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'va', hparams, group_def='group')
test_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'te', hparams, group_def='group')
else:
raise NotImplementedError
else:
train_dataset = make_combined_dataset(args.dataset, 'tr', args.group_def)
val_dataset = make_combined_dataset(args.dataset, 'va', 'group')
test_dataset = make_combined_dataset(args.dataset, 'te', 'group')
if args.algorithm == 'DFR':
train_datasets = []
for ds in args.dataset:
train_datasets.append(vars(datasets)[ds](
args.data_dir, 'va', hparams, group_def=args.group_def, subsample_type='group'))
train_dataset = datasets.ConcatImageDataset(train_datasets)
elif args.algorithm == 'StratifiedERM':
assert args.stratified_erm_subset is not None
train_dataset = datasets.SubsetImageDataset(
train_dataset, idxs=np.argwhere(np.array(train_dataset.a) == args.stratified_erm_subset).squeeze())
val_dataset = datasets.SubsetImageDataset(
val_dataset, idxs=np.argwhere(np.array(val_dataset.a) == args.stratified_erm_subset).squeeze())
test_dataset = datasets.SubsetImageDataset(
test_dataset, idxs=np.argwhere(np.array(test_dataset.a) == args.stratified_erm_subset).squeeze())
num_workers = train_dataset.N_WORKERS
input_shape = train_dataset.INPUT_SHAPE
num_labels = train_dataset.num_labels
num_attributes = train_dataset.num_attributes
data_type = train_dataset.data_type
n_steps = args.steps or train_dataset.N_STEPS
checkpoint_freq = args.checkpoint_freq or train_dataset.CHECKPOINT_FREQ
hparams.update({
"steps": n_steps
})
print(f"Dataset:\n\t[train]\t{len(train_dataset)}"
f"\n\t[val]\t{len(val_dataset)}")
if hparams['group_balanced']:
# if attribute not available, groups degenerate to classes
train_weights = np.asarray(train_dataset.weights_g)
train_weights /= np.sum(train_weights)
elif hparams['attr_balanced']:
train_weights = np.asarray(train_dataset.weights_a)
train_weights /= np.sum(train_weights)
else:
train_weights = None
train_loader = InfiniteDataLoader(
dataset=train_dataset,
weights=train_weights,
batch_size=min(len(train_dataset), hparams['batch_size']),
num_workers=num_workers
)
split_names = ['va', 'te']
eval_loaders = [DataLoader(
dataset=dset,
batch_size=max(128, hparams['batch_size'] * 2),
num_workers=num_workers,
shuffle=False)
for dset in [val_dataset, test_dataset]
]
algorithm_class = algorithms.get_algorithm_class(args.algorithm)
algorithm = algorithm_class(data_type, input_shape, num_labels, num_attributes, len(train_dataset), hparams,
grp_sizes=train_dataset.group_sizes, attr_sizes=train_dataset.attr_sizes)
es_group = args.es_metric.split(':')[0]
es_metric = args.es_metric.split(':')[1]
|
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Shortcut Learning in Chest X-rays')
# training
parser.add_argument('--store_name', type=str, default='debug')
parser.add_argument('--dataset', type=str, default=["MIMIC"], nargs='+')
parser.add_argument('--task', type=str, default="No Finding", choices=datasets.TASKS + datasets.ATTRS)
parser.add_argument('--attr', type=str, default="sex", choices=datasets.ATTRS)
parser.add_argument('--group_def', type=str, default="group", choices=['group', 'label'])
parser.add_argument('--algorithm', type=str, default="ERM", choices=algorithms.ALGORITHMS)
# others
parser.add_argument('--output_dir', type=str, default='output')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--hparams', type=str, help='JSON-serialized hparams dict')
parser.add_argument('--hparams_seed', type=int, default=0, help='Seed for random hparams (0 for "default hparams")')
parser.add_argument('--seed', type=int, default=0, help='Seed for everything else')
parser.add_argument('--steps', type=int, default=None)
parser.add_argument('--log_online', help='Log online using wandb', action='store_true')
parser.add_argument('--skip_ood_eval', help='skip evals on OOD datasets', action='store_true')
parser.add_argument('--log_all', help='Log all val metrics at each step to tb and wandb', action='store_true')
parser.add_argument('--stratified_erm_subset', type=int, default=None)
# two-stage related
parser.add_argument('--stage1_folder', type=str)
# early stopping
parser.add_argument('--use_es', action='store_true')
parser.add_argument('--es_strategy', choices=['metric'], default='metric')
parser.add_argument('--es_metric', type=str, default='min_group:accuracy')
parser.add_argument('--es_patience', type=int, default=5, help='Stop after this many checkpoints w/ no improvement')
# checkpoints
parser.add_argument('--resume', '-r', type=str, default='')
parser.add_argument('--checkpoint_freq', type=int, default=None, help='Checkpoint every N steps')
parser.add_argument('--skip_model_save', action='store_true')
parser.add_argument('--debug', action='store_true')
# architectures and pre-training sources
parser.add_argument('--image_arch', default='densenet_sup_in1k',
choices=['densenet_sup_in1k', 'resnet_sup_in1k', 'resnet_sup_in21k', 'resnet_simclr_in1k',
'resnet_barlow_in1k', 'vit_sup_in1k', 'vit_sup_in21k', 'vit_sup_swag', 'vit_clip_oai',
'vit_clip_laion', 'vit_dino_in1k', 'resnet_dino_in1k'])
# data augmentations
parser.add_argument('--aug', default='basic2',
choices=['none', 'basic', 'basic2', 'auto_aug', 'rand_aug', 'trivial_aug', 'augmix'])
args = parser.parse_args()
start_step = 0
misc.prepare_folders(args)
output_dir = os.path.join(args.output_dir, args.store_name)
if not args.debug:
sys.stdout = misc.Tee(os.path.join(output_dir, 'out.txt'))
sys.stderr = misc.Tee(os.path.join(output_dir, 'err.txt'))
tb_logger = Logger(logdir=output_dir, flush_secs=2)
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset, misc.seed_hash(args.hparams_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
hparams.update({
'image_arch': args.image_arch,
'data_augmentation': args.aug,
'task': args.task,
'attr': args.attr,
'group_def': args.group_def
})
if args.log_online:
wandb.init(project='subpop_fairness', config={**vars(args), **hparams},
name=f"train_{args.dataset}_{args.task}_{args.algorithm}_{args.attr}_"
f"{hashlib.md5(str({**vars(args), **hparams}).encode('utf-8')).hexdigest()[:8]}_"
f"{os.environ['SLURM_JOB_ID'] if 'SLURM_JOB_ID' in os.environ else ''}")
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
with open(os.path.join(output_dir, 'args.json'), 'w') as f:
json.dump(vars(args), f, indent=4)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["TOKENIZERS_PARALLELISM"] = "false"
torch.multiprocessing.set_sharing_strategy('file_system')
device = "cuda" if torch.cuda.is_available() else "cpu"
def make_combined_dataset(names, sset, group_def, override_attr=None):
ind_datasets = []
for ds in names:
ind_datasets.append(vars(datasets)[ds](args.data_dir, sset, hparams, group_def=group_def, override_attr=override_attr))
return datasets.ConcatImageDataset(ind_datasets)
if len(args.dataset) == 1:
if args.dataset[0] in vars(datasets):
train_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'tr', hparams, group_def=args.group_def)
val_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'va', hparams, group_def='group')
test_dataset = vars(datasets)[args.dataset[0]](args.data_dir, 'te', hparams, group_def='group')
else:
raise NotImplementedError
else:
train_dataset = make_combined_dataset(args.dataset, 'tr', args.group_def)
val_dataset = make_combined_dataset(args.dataset, 'va', 'group')
test_dataset = make_combined_dataset(args.dataset, 'te', 'group')
if args.algorithm == 'DFR':
train_datasets = []
for ds in args.dataset:
train_datasets.append(vars(datasets)[ds](
args.data_dir, 'va', hparams, group_def=args.group_def, subsample_type='group'))
train_dataset = datasets.ConcatImageDataset(train_datasets)
elif args.algorithm == 'StratifiedERM':
assert args.stratified_erm_subset is not None
train_dataset = datasets.SubsetImageDataset(
train_dataset, idxs=np.argwhere(np.array(train_dataset.a) == args.stratified_erm_subset).squeeze())
val_dataset = datasets.SubsetImageDataset(
val_dataset, idxs=np.argwhere(np.array(val_dataset.a) == args.stratified_erm_subset).squeeze())
test_dataset = datasets.SubsetImageDataset(
test_dataset, idxs=np.argwhere(np.array(test_dataset.a) == args.stratified_erm_subset).squeeze())
num_workers = train_dataset.N_WORKERS
input_shape = train_dataset.INPUT_SHAPE
num_labels = train_dataset.num_labels
num_attributes = train_dataset.num_attributes
data_type = train_dataset.data_type
n_steps = args.steps or train_dataset.N_STEPS
checkpoint_freq = args.checkpoint_freq or train_dataset.CHECKPOINT_FREQ
hparams.update({
"steps": n_steps
})
print(f"Dataset:\n\t[train]\t{len(train_dataset)}"
f"\n\t[val]\t{len(val_dataset)}")
if hparams['group_balanced']:
# if attribute not available, groups degenerate to classes
train_weights = np.asarray(train_dataset.weights_g)
train_weights /= np.sum(train_weights)
elif hparams['attr_balanced']:
train_weights = np.asarray(train_dataset.weights_a)
train_weights /= np.sum(train_weights)
else:
train_weights = None
train_loader = InfiniteDataLoader(
dataset=train_dataset,
weights=train_weights,
batch_size=min(len(train_dataset), hparams['batch_size']),
num_workers=num_workers
)
split_names = ['va', 'te']
eval_loaders = [DataLoader(
dataset=dset,
batch_size=max(128, hparams['batch_size'] * 2),
num_workers=num_workers,
shuffle=False)
for dset in [val_dataset, test_dataset]
]
algorithm_class = algorithms.get_algorithm_class(args.algorithm)
algorithm = algorithm_class(data_type, input_shape, num_labels, num_attributes, len(train_dataset), hparams,
grp_sizes=train_dataset.group_sizes, attr_sizes=train_dataset.attr_sizes)
es_group = args.es_metric.split(':')[0]
es_metric = args.es_metric.split(':')[1] | es = early_stopping.EarlyStopping( | 2 | 2023-12-15 04:10:31+00:00 | 12k |
RomGai/BrainVis | cascade_diffusion.py | [
{
"identifier": "PLMSSampler",
"path": "dc_ldm/models/diffusion/plms.py",
"snippet": "class PLMSSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, generator=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device, generator=generator)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running PLMS Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps, t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t"
},
{
"identifier": "instantiate_from_config",
"path": "dc_ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "Dataset",
"path": "dataset.py",
"snippet": "class Dataset(Data.Dataset):\n def __init__(self, device, mode, data, wave_len):\n self.device = device\n self.datas, self.label ,self.clip,self.clip_moreinf = data\n self.mode = mode\n self.wave_len = wave_len\n self.__padding__()\n\n def __padding__(self):\n origin_len = self.datas[0].shape[0]\n if origin_len % self.wave_len:\n padding_len = self.wave_len - (origin_len % self.wave_len)\n padding = np.zeros((len(self.datas), padding_len, self.datas[0].shape[1]), dtype=np.float32)\n self.datas = np.concatenate([self.datas, padding], axis=-2)\n\n def __len__(self):\n return len(self.datas)\n\n def __getitem__(self, item):\n data = torch.tensor(self.datas[item]).to(self.device)\n label = self.label[item]\n clip=torch.tensor(self.clip[item]).to(self.device)\n clip_moreinf = torch.tensor(self.clip_moreinf[item]).to(self.device)\n\n return data, torch.tensor(label).to(self.device), clip,clip_moreinf\n\n def shape(self):\n return self.datas[0].shape"
},
{
"identifier": "TimeEncoder",
"path": "model/BrainVisModels.py",
"snippet": "class TimeEncoder(nn.Module):\n def __init__(self, args):\n super(TimeEncoder, self).__init__()\n d_model = args.d_model\n self.d=d_model\n self.momentum = args.momentum\n self.linear_proba = True\n self.nocliptune=True\n self.device = args.device\n self.data_shape = args.data_shape\n self.max_len = int(self.data_shape[0] / args.wave_length)\n print(self.max_len)\n self.mask_len = int(args.mask_ratio * self.max_len)\n self.position = PositionalEmbedding(self.max_len, d_model)\n self.mask_token = nn.Parameter(torch.randn(d_model, ))\n self.input_projection = nn.Conv1d(args.data_shape[1], d_model, kernel_size=args.wave_length,\n stride=args.wave_length)\n self.encoder = TransformerEncoder(args)\n self.momentum_encoder = TransformerEncoder(args)\n self.tokenizer = Tokenizer(d_model, args.vocab_size)\n self.reg = Regressor(d_model, args.attn_heads, 4 * d_model, 1, args.reg_layers)\n self.predict_head = nn.Linear(d_model, args.num_class)\n self.channelmapping=ChannelMapping(self.max_len,77)\n self.dimmapping = nn.Linear(d_model, 768)\n self.apply(self._init_weights)\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n xavier_normal_(module.weight.data)\n if module.bias is not None:\n constant_(module.bias.data, 0.1)\n\n def copy_weight(self):\n with torch.no_grad():\n for (param_a, param_b) in zip(self.encoder.parameters(), self.momentum_encoder.parameters()):\n param_b.data = param_a.data\n\n def momentum_update(self):\n with torch.no_grad():\n for (param_a, param_b) in zip(self.encoder.parameters(), self.momentum_encoder.parameters()):\n param_b.data = self.momentum * param_b.data + (1 - self.momentum) * param_a.data\n\n def pretrain_forward(self, x):\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n tokens = self.tokenizer(x)\n\n x += self.position(x)\n\n rep_mask_token = self.mask_token.repeat(x.shape[0], x.shape[1], 1) + self.position(x)\n\n index = np.arange(x.shape[1])\n random.shuffle(index)\n v_index = index[:-self.mask_len]\n m_index = index[-self.mask_len:]\n visible = x[:, v_index, :]\n mask = x[:, m_index, :]\n tokens = tokens[:, m_index]\n\n rep_mask_token = rep_mask_token[:, m_index, :]\n\n rep_visible = self.encoder(visible)\n with torch.no_grad():\n rep_mask = self.momentum_encoder(mask)\n\n rep_mask_prediction = self.reg(rep_visible, rep_mask_token)\n token_prediction_prob = self.tokenizer.center(rep_mask_prediction)\n\n return [rep_mask, rep_mask_prediction], [token_prediction_prob, tokens]\n\n def forward(self, x):\n if self.linear_proba==True and self.nocliptune==True:\n #with torch.no_grad():\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n x += self.position(x)\n x = self.encoder(x)\n return torch.mean(x, dim=1)\n\n if self.linear_proba==False and self.nocliptune==True:\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n x += self.position(x)\n x = self.encoder(x)\n #lastrep=torch.mean(x, dim=1)\n lastrep=x\n xcls=self.predict_head(torch.mean(x, dim=1))\n return lastrep, torch.mean(x, dim=1), xcls\n\n if self.nocliptune == False: #CLIP\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n x += self.position(x)\n x = self.encoder(x)\n lastrep=torch.mean(x, dim=1)\n x=self.channelmapping(x)\n x = self.dimmapping(x)\n\n return lastrep#,x\n\n def get_tokens(self, x):\n x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()\n tokens = self.tokenizer(x)\n return tokens"
},
{
"identifier": "AlignNet",
"path": "model/BrainVisModels.py",
"snippet": "class AlignNet(nn.Module):\n def __init__(self, input_size, freq_size, output_size,pretrained_model):\n super(AlignNet, self).__init__()\n\n self.pretrained_model = pretrained_model#TimeFreqEncoder\n\n self.fc01=nn.Linear(input_size+freq_size+40, 4*input_size)\n self.tanh = nn.Tanh()\n self.fc02 = nn.Linear(4*input_size, input_size)\n self.tanh = nn.Tanh()\n self.fc03=nn.Linear(input_size, 4*input_size)\n self.tanh = nn.Tanh()\n self.fc04 = nn.Linear(4*input_size, input_size)\n self.tanh = nn.Tanh()\n self.fc05=nn.Linear(input_size, 4*input_size)\n self.tanh = nn.Tanh()\n self.fc6 = nn.Linear(4*input_size, output_size)\n\n def forward(self, x):\n lastrep,encoded,scores=self.pretrained_model(x)\n x = torch.cat((encoded, scores), dim=1)\n x = self.fc01(x)\n x = self.tanh(x)\n res_4is_1=x\n x = self.fc02(x)\n x = self.tanh(x)\n res_is_2 = x\n x = self.fc03(x)+res_4is_1\n x = self.tanh(x)\n res_4is_2 = x\n x = self.fc04(x)+res_is_2\n x = self.tanh(x)\n x = self.fc05(x)+res_4is_2\n x = self.tanh(x)\n x = self.fc6(x)\n return x"
},
{
"identifier": "TimeFreqEncoder",
"path": "model/BrainVisModels.py",
"snippet": "class TimeFreqEncoder(nn.Module):\n def __init__(self, pretrained_model_time,pretrained_model_freq,args):\n super(TimeFreqEncoder, self).__init__()\n\n self.pretrained_model_time = pretrained_model_time\n self.pretrained_model_time.nocliptune=True\n self.pretrained_model_time.linear_proba=False\n self.pretrained_model_freq=pretrained_model_freq\n\n self.fc01 =nn.Linear( args.d_model+128, args.num_class)\n\n def forward(self,x):\n lastrep,time_feature,cls=self.pretrained_model_time(x)\n lstmcls,freq_feature=self.pretrained_model_freq(x)\n x = torch.cat((time_feature, freq_feature), dim=1)\n\n lastrep = x\n encoded=x\n x = self.fc01(encoded)\n\n scores=x\n return lastrep,encoded,scores"
},
{
"identifier": "FreqEncoder",
"path": "model/BrainVisModels.py",
"snippet": "class FreqEncoder(nn.Module):\n\n def __init__(self, input_size=128, lstm_size=128, lstm_layers=1, output_size=128):\n # Call parent\n super().__init__()\n # Define parameters\n self.input_size = input_size\n self.lstm_size = lstm_size\n self.lstm_layers = lstm_layers\n self.output_size = output_size\n\n # Define internal modules\n self.lstm = nn.LSTM(input_size, lstm_size, num_layers=lstm_layers, batch_first=True)\n self.output = nn.Linear(lstm_size, output_size)\n self.classifier = nn.Linear(output_size, 40)\n\n def forward(self, x):\n batch_size = x.size(0)\n x = x.permute(0, 2, 1)\n x = x.cpu()\n fourier_transform = np.fft.fft(x, axis=2)\n half_spectrum = fourier_transform[:, :, 1:440 // 2 + 1]\n amplitude_spectrum = np.abs(half_spectrum)\n\n amplitude_spectrum = torch.tensor(amplitude_spectrum).float()\n\n x = amplitude_spectrum.permute(0, 2, 1)\n x = x.to(\"cuda\")\n\n lstm_init = (torch.zeros(self.lstm_layers, batch_size, self.lstm_size),\n torch.zeros(self.lstm_layers, batch_size, self.lstm_size))\n if x.is_cuda: lstm_init = (lstm_init[0].cuda(), lstm_init[0].cuda())\n lstm_init = (Variable(lstm_init[0], volatile=x.volatile), Variable(lstm_init[1], volatile=x.volatile))\n\n x = self.lstm(x, lstm_init)[0][:, -1, :]\n reps = x\n # Forward output\n xa = F.relu(self.output(x))\n x = self.classifier(xa)\n return x, xa"
},
{
"identifier": "args",
"path": "args.py",
"snippet": ""
}
] | import torch
import os
import numpy as np
import torchvision.transforms as transforms
import argparse
from omegaconf import OmegaConf
from dc_ldm.models.diffusion.plms import PLMSSampler
from einops import rearrange, repeat
from dc_ldm.util import instantiate_from_config
from torch.utils.data import Dataset, DataLoader
from dataset import Dataset as selfdataset
from model.BrainVisModels import TimeEncoder, AlignNet,TimeFreqEncoder,FreqEncoder
from args import args, Test_data, Train_data_all, Train_data, Train_data_all_with_image_name, Train_data_with_image_name, Test_data_with_image_name
from diffusers import StableDiffusionImg2ImgPipeline
from PIL import Image
| 7,533 | '[32]': 'n07753592',
'[19]': 'n07873807',
'[9]': 'n11939491',
'[33]': 'n13054560'
}
parser = argparse.ArgumentParser(description="Template")
parser.add_argument('-mp','--model_params', default='', nargs='*', help='list of key=value pairs of model options')
opt = parser.parse_args()
# Path
datapath='data/EEG_Feature_Label/'
img_file_type='.JPEG'
device = "cuda"
test_img_names_file=datapath+'test_image_names.pth'
test_seq_file=datapath+'test_seqs.pth'
dff_model_path = "pretrained_model/v1-5-pruned-emaonly.ckpt"
dff_yaml_path = "pretrained_model/config15.yaml"
test_pred_file=datapath+'test_pred.pth'
output_path="picture"
logger=None
ddim_steps=40
global_pool=True
use_time_cond=False
clip_tune=False
cls_tune=False
def normalize(img):
if img.shape[-1] == 3:
img = rearrange(img, 'h w c -> c h w')
img = torch.tensor(img)
img = img * 2.0 - 1.0 # to -1 ~ 1
return img
def channel_last(img):
if img.shape[-1] == 3:
return img
return rearrange(img, 'c h w -> h w c')
class Dataset(Dataset):
def __init__(self, img_names_file,seq_file,labels_file):
self.image_names = torch.load(img_names_file)
self.seqs = torch.load(seq_file)
self.labels = torch.load(labels_file)
def __len__(self):
return len(self.seqs)
def __getitem__(self, idx):
input_vec=torch.tensor(self.seqs[idx]).to("cuda")
img_label = self.image_names[idx].split("_")[0]
img_path = "data/image/" + img_label + "/" + self.image_names[idx] + img_file_type
image = Image.open(img_path).convert('RGB')
img_transform_test = transforms.Compose([
normalize,
transforms.Resize((512, 512)),
channel_last
])
gt_image = np.array(image) / 255.0
gt_image = img_transform_test(gt_image)
prompt = propmt_dict[lable_number_dict[str(self.labels[idx])]]
return input_vec, gt_image,self.image_names[idx],prompt
#Load data
batch_size = 1
test_dataset = Dataset(test_img_names_file,test_seq_file, test_pred_file)
test_loader = DataLoader(test_dataset, batch_size=batch_size)
train_dataset = selfdataset(device=args.device, mode='pretrain', data=Train_data_all, wave_len=args.wave_length)
args.data_shape = train_dataset.shape()
#Load AlignNet
time_model=TimeEncoder(args)
time_model=time_model.to("cuda")
freq_model_options = {key: int(value) if value.isdigit() else (float(value) if value[0].isdigit() else value) for
(key, value) in [x.split("=") for x in opt.model_params]}
freq_model = FreqEncoder(**freq_model_options)
timefreq_model = TimeFreqEncoder(time_model, freq_model, args)
timefreq_model=timefreq_model.to("cuda")
time_size=128
freq_size=128
clip_size=int(77*768)
model_eegtoclip=AlignNet(time_size,freq_size,clip_size,timefreq_model)
eegtoclip_state_dict = torch.load('exp/epilepsy/test/clipfinetune_model.pkl', map_location="cuda")#device)
model_eegtoclip.load_state_dict(eegtoclip_state_dict)
model_eegtoclip.to("cuda")
model_eegtoclip.eval()
#Load stable diffusion
ckp_path = os.path.join(dff_model_path)
config_path = os.path.join(dff_yaml_path)
config = OmegaConf.load(config_path)
config.model.params.unet_config.params.use_time_cond = use_time_cond
config.model.params.unet_config.params.global_pool = global_pool
cond_dim = config.model.params.unet_config.params.context_dim
model = instantiate_from_config(config.model)
pl_sd = torch.load(ckp_path, map_location=device)['state_dict']
m, u = model.load_state_dict(pl_sd, strict=False)
model.cond_stage_trainable = False
model.ddim_steps = ddim_steps
model.re_init_ema()
model.p_channels = config.model.params.channels
model.p_image_size = config.model.params.image_size
model.ch_mult = config.model.params.first_stage_config.params.ddconfig.ch_mult
model.clip_tune = clip_tune
model.cls_tune = cls_tune
model = model.to(device)
|
propmt_dict = {'n02106662': 'german shepherd dog',
'n02124075': 'cat ',
'n02281787': 'lycaenid butterfly',
'n02389026': 'sorrel horse',
'n02492035': 'Cebus capucinus',
'n02504458': 'African elephant',
'n02510455': 'panda',
'n02607072': 'anemone fish',
'n02690373': 'airliner',
'n02906734': 'broom',
'n02951358': 'canoe or kayak',
'n02992529': 'cellular telephone',
'n03063599': 'coffee mug',
'n03100240': 'old convertible',
'n03180011': 'desktop computer',
'n03197337': 'digital watch',
'n03272010': 'electric guitar',
'n03272562': 'electric locomotive',
'n03297495': 'espresso maker',
'n03376595': 'folding chair',
'n03445777': 'golf ball',
'n03452741': 'grand piano',
'n03584829': 'smoothing iron',
'n03590841': 'Orange jack-o’-lantern',
'n03709823': 'mailbag',
'n03773504': 'missile',
'n03775071': 'mitten,glove',
'n03792782': 'mountain bike, all-terrain bike',
'n03792972': 'mountain tent',
'n03877472': 'pajama',
'n03888257': 'parachute',
'n03982430': 'pool table, billiard table, snooker table ',
'n04044716': 'radio telescope',
'n04069434': 'eflex camera',
'n04086273': 'revolver, six-shooter',
'n04120489': 'running shoe',
'n07753592': 'banana',
'n07873807': 'pizza',
'n11939491': 'daisy',
'n13054560': 'bolete'
}
lable_number_dict={
'[12]': 'n02106662',
'[39]': 'n02124075',
'[11]': 'n02281787',
'[0]': 'n02389026',
'[21]': 'n02492035',
'[35]': 'n02504458',
'[8]': 'n02510455',
'[3]': 'n02607072',
'[36]': 'n02690373',
'[18]': 'n02906734',
'[10]': 'n02951358',
'[15]': 'n02992529',
'[5]': 'n03063599',
'[24]': 'n03100240',
'[17]': 'n03180011',
'[34]': 'n03197337',
'[28]': 'n03272010',
'[37]': 'n03272562',
'[4]': 'n03297495',
'[25]': 'n03376595',
'[16]': 'n03445777',
'[30]': 'n03452741',
'[2]': 'n03584829',
'[14]': 'n03590841',
'[23]': 'n03709823',
'[20]': 'n03773504',
'[27]': 'n03775071',
'[6]': 'n03792782',
'[31]': 'n03792972',
'[26]': 'n03877472',
'[1]': 'n03888257',
'[22]': 'n03982430',
'[38]': 'n04044716',
'[29]': 'n04069434',
'[7]': 'n04086273',
'[13]': 'n04120489',
'[32]': 'n07753592',
'[19]': 'n07873807',
'[9]': 'n11939491',
'[33]': 'n13054560'
}
parser = argparse.ArgumentParser(description="Template")
parser.add_argument('-mp','--model_params', default='', nargs='*', help='list of key=value pairs of model options')
opt = parser.parse_args()
# Path
datapath='data/EEG_Feature_Label/'
img_file_type='.JPEG'
device = "cuda"
test_img_names_file=datapath+'test_image_names.pth'
test_seq_file=datapath+'test_seqs.pth'
dff_model_path = "pretrained_model/v1-5-pruned-emaonly.ckpt"
dff_yaml_path = "pretrained_model/config15.yaml"
test_pred_file=datapath+'test_pred.pth'
output_path="picture"
logger=None
ddim_steps=40
global_pool=True
use_time_cond=False
clip_tune=False
cls_tune=False
def normalize(img):
if img.shape[-1] == 3:
img = rearrange(img, 'h w c -> c h w')
img = torch.tensor(img)
img = img * 2.0 - 1.0 # to -1 ~ 1
return img
def channel_last(img):
if img.shape[-1] == 3:
return img
return rearrange(img, 'c h w -> h w c')
class Dataset(Dataset):
def __init__(self, img_names_file,seq_file,labels_file):
self.image_names = torch.load(img_names_file)
self.seqs = torch.load(seq_file)
self.labels = torch.load(labels_file)
def __len__(self):
return len(self.seqs)
def __getitem__(self, idx):
input_vec=torch.tensor(self.seqs[idx]).to("cuda")
img_label = self.image_names[idx].split("_")[0]
img_path = "data/image/" + img_label + "/" + self.image_names[idx] + img_file_type
image = Image.open(img_path).convert('RGB')
img_transform_test = transforms.Compose([
normalize,
transforms.Resize((512, 512)),
channel_last
])
gt_image = np.array(image) / 255.0
gt_image = img_transform_test(gt_image)
prompt = propmt_dict[lable_number_dict[str(self.labels[idx])]]
return input_vec, gt_image,self.image_names[idx],prompt
#Load data
batch_size = 1
test_dataset = Dataset(test_img_names_file,test_seq_file, test_pred_file)
test_loader = DataLoader(test_dataset, batch_size=batch_size)
train_dataset = selfdataset(device=args.device, mode='pretrain', data=Train_data_all, wave_len=args.wave_length)
args.data_shape = train_dataset.shape()
#Load AlignNet
time_model=TimeEncoder(args)
time_model=time_model.to("cuda")
freq_model_options = {key: int(value) if value.isdigit() else (float(value) if value[0].isdigit() else value) for
(key, value) in [x.split("=") for x in opt.model_params]}
freq_model = FreqEncoder(**freq_model_options)
timefreq_model = TimeFreqEncoder(time_model, freq_model, args)
timefreq_model=timefreq_model.to("cuda")
time_size=128
freq_size=128
clip_size=int(77*768)
model_eegtoclip=AlignNet(time_size,freq_size,clip_size,timefreq_model)
eegtoclip_state_dict = torch.load('exp/epilepsy/test/clipfinetune_model.pkl', map_location="cuda")#device)
model_eegtoclip.load_state_dict(eegtoclip_state_dict)
model_eegtoclip.to("cuda")
model_eegtoclip.eval()
#Load stable diffusion
ckp_path = os.path.join(dff_model_path)
config_path = os.path.join(dff_yaml_path)
config = OmegaConf.load(config_path)
config.model.params.unet_config.params.use_time_cond = use_time_cond
config.model.params.unet_config.params.global_pool = global_pool
cond_dim = config.model.params.unet_config.params.context_dim
model = instantiate_from_config(config.model)
pl_sd = torch.load(ckp_path, map_location=device)['state_dict']
m, u = model.load_state_dict(pl_sd, strict=False)
model.cond_stage_trainable = False
model.ddim_steps = ddim_steps
model.re_init_ema()
model.p_channels = config.model.params.channels
model.p_image_size = config.model.params.image_size
model.ch_mult = config.model.params.first_stage_config.params.ddconfig.ch_mult
model.clip_tune = clip_tune
model.cls_tune = cls_tune
model = model.to(device)
| sampler = PLMSSampler(model)
| 0 | 2023-12-16 12:52:14+00:00 | 12k |
tonnetonne814/PL-Bert-VITS2 | train_ms.py | [
{
"identifier": "DistributedBucketSampler",
"path": "data_utils.py",
"snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n i=0\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size"
},
{
"identifier": "TextAudioSpeakerCollate",
"path": "data_utils.py",
"snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n # sid = 1\n max_bert_len = max([x[4].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n bert_lengths = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n bert_padded = torch.FloatTensor(len(batch), 13, max_bert_len, 768)\n\n text_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n bert = row[4]\n bert_padded[i, :, :bert.size(1),:] = bert\n bert_lengths[i] = bert.size(1)\n\n\n if self.return_ids:\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n ids_sorted_decreasing,\n )\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n )"
},
{
"identifier": "TextAudioSpeakerLoader",
"path": "data_utils.py",
"snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.hparams = hparams\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.text_cleaners = hparams.text_cleaners\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 999)\n self.min_audio_len = getattr(hparams, \"min_audio_len\", 8192)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n self.count = 0\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n for data in self.audiopaths_sid_text:\n audiopath, sid, ph, text, bert, emo, style = data\n if not os.path.isfile(audiopath):\n continue\n if self.min_text_len <= len(text) and len(text) <= self.max_text_len:\n audiopaths_sid_text_new.append([audiopath, sid, ph, text, bert, emo, style])\n length = os.path.getsize(audiopath) // (2 * self.hop_length)\n if length < self.min_audio_len // self.hop_length:\n print(\"DATA PASS\")\n continue\n lengths.append(length)\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n print(f\"INFO:{len(self.audiopaths_sid_text)} is used as Training Dataset.\")\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, ph, text, pl_bert, emo, style = (\n audiopath_sid_text[0],\n audiopath_sid_text[1],\n audiopath_sid_text[2],\n audiopath_sid_text[3],\n audiopath_sid_text[4],\n audiopath_sid_text[5],\n audiopath_sid_text[6],\n )\n ph = self.get_text(ph)\n spec, wav = self.get_audio(audiopath)\n bert = self.get_pl_bert(pl_bert)\n sid = self.get_sid(sid)\n\n # parameter checker \n assert len(ph) == bert.size(1)\n\n return (ph, spec, wav, sid, bert)\n \n def get_pl_bert(self, filename):\n path = os.path.join(\"pl_bert_embeddings\", f\"{filename}.PlBertJa\")\n data = torch.load(path)\n if self.add_blank:\n L, T, H = data.shape\n new_data = torch.zeros(size=(L,2*T+1,H), dtype=data.dtype)\n for idx in range(T):\n target_idx = idx*2+1\n new_data[:, target_idx, :] = data[:, idx, :]\n data = new_data\n return data\n\n def get_audio(self, filename):\n # TODO : if linear spec exists convert to mel from existing linear spec\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate\n )\n )\n # audio_norm = audio / self.max_wav_value\n audio_norm = audio.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n if os.path.exists(spec_filename):\n spec = torch.load(spec_filename)\n else:\n if self.use_mel_spec_posterior:\n \"\"\"TODO : (need verification)\n if linear spec exists convert to\n mel from existing linear spec (uncomment below lines)\"\"\"\n # if os.path.exists(filename.replace(\".wav\", \".spec.pt\")):\n # # spec, n_fft, num_mels, sampling_rate, fmin, fmax\n # spec = spec_to_mel_torch(\n # torch.load(filename.replace(\".wav\", \".spec.pt\")),\n # self.filter_length, self.n_mel_channels, self.sampling_rate,\n # self.hparams.mel_fmin, self.hparams.mel_fmax)\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text):\n if self.cleaned_text:\n text_norm = cleaned_text_to_sequence(text)\n else:\n text_norm = text_to_sequence(text, self.text_cleaners)\n if self.add_blank:\n text_norm = commons.intersperse(text_norm, 0)\n text_norm = torch.LongTensor(text_norm)\n return text_norm\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)"
},
{
"identifier": "discriminator_loss",
"path": "losses.py",
"snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses"
},
{
"identifier": "feature_loss",
"path": "losses.py",
"snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2"
},
{
"identifier": "generator_loss",
"path": "losses.py",
"snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses"
},
{
"identifier": "kl_loss",
"path": "losses.py",
"snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l"
},
{
"identifier": "mel_spectrogram_torch",
"path": "mel_processing.py",
"snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n if version.parse(torch.__version__) >= version.parse(\"2\"):\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n else:\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "spec_to_mel_torch",
"path": "mel_processing.py",
"snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec"
},
{
"identifier": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES",
"path": "models.py",
"snippet": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES = [\n \"dur_disc_1\",\n \"dur_disc_2\",\n]"
},
{
"identifier": "AVAILABLE_FLOW_TYPES",
"path": "models.py",
"snippet": "AVAILABLE_FLOW_TYPES = [\n \"pre_conv\",\n \"pre_conv2\",\n \"fft\",\n \"mono_layer_inter_residual\",\n \"mono_layer_post_residual\",\n]"
},
{
"identifier": "DurationDiscriminatorV1",
"path": "models.py",
"snippet": "class DurationDiscriminatorV1(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_1(x)\n # x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_2(x)\n # x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_1(x)\n # x = self.drop(x)\n x = self.conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_2(x)\n # x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs"
},
{
"identifier": "DurationDiscriminatorV2",
"path": "models.py",
"snippet": "class DurationDiscriminatorV2(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append([output_prob])\n\n return output_probs"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "models.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11, 17, 23, 37]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "SynthesizerTrn",
"path": "models.py",
"snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n bert_emb_size,\n n_speakers=0,\n gin_channels=0,\n use_sdp=True,\n **kwargs,\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", False\n )\n self.use_transformer_flows = kwargs.get(\"use_transformer_flows\", False)\n self.transformer_flow_type = kwargs.get(\n \"transformer_flow_type\", \"mono_layer_post_residual\"\n )\n if self.use_transformer_flows:\n assert (\n self.transformer_flow_type in AVAILABLE_FLOW_TYPES\n ), f\"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}\"\n self.use_sdp = use_sdp\n # self.use_duration_discriminator = kwargs.get(\"use_duration_discriminator\", False)\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n else:\n self.enc_gin_channels = 0\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n bert_emb_size=bert_emb_size,\n gin_channels=self.enc_gin_channels,\n )\n\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n # self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)\n self.flow = ResidualCouplingTransformersBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 4,\n gin_channels=gin_channels,\n use_transformer_flows=self.use_transformer_flows,\n transformer_flow_type=self.transformer_flow_type,\n )\n\n if use_sdp:\n self.dp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n else:\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers > 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n\n # 重み付け加算式を取る\n self.WSL = WeightSumLayer(n_layers=13)\n\n def forward(self, x, x_lengths, y, y_lengths, bert, bert_lengths, sid=None):\n bert = self.WSL(bert)\n\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n if self.use_sdp:\n l_length = self.dp(x, x_mask, w, g=g)\n l_length = l_length / torch.sum(x_mask)\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n logw_ = torch.log(w + 1e-6) * x_mask\n else:\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n bert,\n bert_lengths,\n sid=None,\n noise_scale=1,\n length_scale=1,\n noise_scale_w=1.0,\n max_len=None,\n ):\n bert = self.WSL(bert)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n if self.use_sdp:\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)\n else:\n logw = self.dp(x, x_mask, g=g)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n # currently vits-2 is not capable of voice conversion\n ## comment - choihkk\n ## Assuming the use of the ResidualCouplingTransformersLayer2 module, it seems that voice conversion is possible \n def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):\n assert self.n_speakers > 0, \"n_speakers have to be larger than 0.\"\n g_src = self.emb_g(sid_src).unsqueeze(-1)\n g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)\n z_p = self.flow(z, y_mask, g=g_src)\n z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)\n o_hat = self.dec(z_hat * y_mask, g=g_tgt)\n return o_hat, y_mask, (z, z_p, z_hat)"
},
{
"identifier": "symbols",
"path": "PL_BERT_ja/text/symbols.py",
"snippet": ""
}
] | import argparse
import itertools
import json
import math
import os
import logging
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import tqdm
import commons
import models
import utils
from torch import nn, optim
from torch.cuda.amp import GradScaler, autocast
from torch.nn import functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from data_utils import (DistributedBucketSampler, TextAudioSpeakerCollate,
TextAudioSpeakerLoader)
from losses import discriminator_loss, feature_loss, generator_loss, kl_loss
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
from models import (AVAILABLE_DURATION_DISCRIMINATOR_TYPES,
AVAILABLE_FLOW_TYPES,
DurationDiscriminatorV1, DurationDiscriminatorV2,
MultiPeriodDiscriminator, SynthesizerTrn)
from PL_BERT_ja.text.symbols import symbols | 10,111 |
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
# from tensorboardX import SummaryWriter
torch.backends.cudnn.benchmark = True
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6060"
hps = utils.get_hparams()
mp.spawn(
run,
nprocs=n_gpus,
args=(
n_gpus,
hps,
),
)
def run(rank, n_gpus, hps):
net_dur_disc = None
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(
backend="nccl", init_method="env://", world_size=n_gpus, rank=rank
)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
if (
"use_mel_posterior_encoder" in hps.model.keys()
and hps.model.use_mel_posterior_encoder == True
):
print("Using mel posterior encoder for VITS2")
posterior_channels = 128 # vits2
hps.data.use_mel_posterior_encoder = True
else:
print("Using lin posterior encoder for VITS1")
posterior_channels = hps.data.filter_length // 2 + 1
hps.data.use_mel_posterior_encoder = False
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32, 300, 500, 700, 900, 1100, 1300, 1500, 3000],
num_replicas=n_gpus,
rank=rank,
shuffle=True,
)
|
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
# from tensorboardX import SummaryWriter
torch.backends.cudnn.benchmark = True
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6060"
hps = utils.get_hparams()
mp.spawn(
run,
nprocs=n_gpus,
args=(
n_gpus,
hps,
),
)
def run(rank, n_gpus, hps):
net_dur_disc = None
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(
backend="nccl", init_method="env://", world_size=n_gpus, rank=rank
)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
if (
"use_mel_posterior_encoder" in hps.model.keys()
and hps.model.use_mel_posterior_encoder == True
):
print("Using mel posterior encoder for VITS2")
posterior_channels = 128 # vits2
hps.data.use_mel_posterior_encoder = True
else:
print("Using lin posterior encoder for VITS1")
posterior_channels = hps.data.filter_length // 2 + 1
hps.data.use_mel_posterior_encoder = False
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32, 300, 500, 700, 900, 1100, 1300, 1500, 3000],
num_replicas=n_gpus,
rank=rank,
shuffle=True,
) | collate_fn = TextAudioSpeakerCollate() | 1 | 2023-12-16 05:34:02+00:00 | 12k |
camenduru/FreeInit-hf | animatediff/models/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "animatediff/models/unet_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "animatediff/models/unet_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "animatediff/models/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
},
{
"identifier": "InflatedGroupNorm",
"path": "animatediff/models/resnet.py",
"snippet": "class InflatedGroupNorm(nn.GroupNorm):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d, InflatedGroupNorm
from diffusers.utils import WEIGHTS_NAME
import os
import json
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint | 9,416 | output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if use_inflated_groupnorm:
self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
else:
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
| # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
use_inflated_groupnorm=False,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module and motion_module_mid_block,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if use_inflated_groupnorm:
self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
else:
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False): | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 2 | 2023-12-19 21:06:32+00:00 | 12k |
zyrant/SPGroup3D | tools/data_converter/indoor_converter.py | [
{
"identifier": "S3DISData",
"path": "tools/data_converter/s3dis_data_utils.py",
"snippet": "class S3DISData(object):\n \"\"\"S3DIS data.\n\n Generate s3dis infos for s3dis_converter.\n\n Args:\n root_path (str): Root path of the raw data.\n split (str, optional): Set split type of the data. Default: 'Area_1'.\n \"\"\"\n\n def __init__(self, root_path, split='Area_1'):\n self.root_dir = root_path\n self.split = split\n self.data_dir = osp.join(root_path,\n 'Stanford3dDataset_v1.2_Aligned_Version')\n\n # Following `GSDN <https://arxiv.org/abs/2006.12356>`_, use 5 furniture\n # classes for detection: table, chair, sofa, bookcase, board.\n self.cat_ids = np.array([7, 8, 9, 10, 11])\n self.cat_ids2class = {\n cat_id: i\n for i, cat_id in enumerate(list(self.cat_ids))\n }\n\n assert split in [\n 'Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6'\n ]\n self.sample_id_list = os.listdir(osp.join(self.data_dir,\n split)) # conferenceRoom_1\n for sample_id in self.sample_id_list:\n if os.path.isfile(osp.join(self.data_dir, split, sample_id)):\n self.sample_id_list.remove(sample_id)\n\n def __len__(self):\n return len(self.sample_id_list)\n\n def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):\n \"\"\"Get data infos.\n\n This method gets information from the raw data.\n\n Args:\n num_workers (int, optional): Number of threads to be used.\n Default: 4.\n has_label (bool, optional): Whether the data has label.\n Default: True.\n sample_id_list (list[int], optional): Index list of the sample.\n Default: None.\n\n Returns:\n infos (list[dict]): Information of the raw data.\n \"\"\"\n\n def process_single_scene(sample_idx):\n print(f'{self.split} sample_idx: {sample_idx}')\n info = dict()\n pc_info = {\n 'num_features': 6,\n 'lidar_idx': f'{self.split}_{sample_idx}'\n }\n info['point_cloud'] = pc_info\n pts_filename = osp.join(self.root_dir, 's3dis_data',\n f'{self.split}_{sample_idx}_point.npy')\n pts_instance_mask_path = osp.join(\n self.root_dir, 's3dis_data',\n f'{self.split}_{sample_idx}_ins_label.npy')\n pts_semantic_mask_path = osp.join(\n self.root_dir, 's3dis_data',\n f'{self.split}_{sample_idx}_sem_label.npy')\n\n points = np.load(pts_filename).astype(np.float32)\n pts_instance_mask = np.load(pts_instance_mask_path).astype(np.int)\n pts_semantic_mask = np.load(pts_semantic_mask_path).astype(np.int)\n\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))\n\n ##########################superpoint#######################\n # superpoints_filename = osp.join(self.root_dir, 's3dis_data',\n # f'{self.split}_{sample_idx}_superpoint.npy')\n # superpoints = np.load(superpoints_filename)\n # mmcv.mkdir_or_exist(osp.join(self.root_dir, 'superpoints'))\n # superpoints.tofile(\n # osp.join(self.root_dir, 'superpoints', f'{self.split}_{sample_idx}.bin'))\n info['pts_superpoints_path'] = osp.join('superpoints', f'{self.split}_{sample_idx}.bin')\n ###########################################################\n\n points.tofile(\n osp.join(self.root_dir, 'points',\n f'{self.split}_{sample_idx}.bin'))\n pts_instance_mask.tofile(\n osp.join(self.root_dir, 'instance_mask',\n f'{self.split}_{sample_idx}.bin'))\n pts_semantic_mask.tofile(\n osp.join(self.root_dir, 'semantic_mask',\n f'{self.split}_{sample_idx}.bin'))\n\n info['pts_path'] = osp.join('points',\n f'{self.split}_{sample_idx}.bin')\n info['pts_instance_mask_path'] = osp.join(\n 'instance_mask', f'{self.split}_{sample_idx}.bin')\n info['pts_semantic_mask_path'] = osp.join(\n 'semantic_mask', f'{self.split}_{sample_idx}.bin')\n info['annos'] = self.get_bboxes(points, pts_instance_mask,\n pts_semantic_mask)\n\n return info\n\n sample_id_list = sample_id_list if sample_id_list is not None \\\n else self.sample_id_list\n with futures.ThreadPoolExecutor(num_workers) as executor:\n infos = executor.map(process_single_scene, sample_id_list)\n return list(infos)\n\n def get_bboxes(self, points, pts_instance_mask, pts_semantic_mask):\n \"\"\"Convert instance masks to axis-aligned bounding boxes.\n\n Args:\n points (np.array): Scene points of shape (n, 6).\n pts_instance_mask (np.ndarray): Instance labels of shape (n,).\n pts_semantic_mask (np.ndarray): Semantic labels of shape (n,).\n\n Returns:\n dict: A dict containing detection infos with following keys:\n\n - gt_boxes_upright_depth (np.ndarray): Bounding boxes\n of shape (n, 6)\n - class (np.ndarray): Box labels of shape (n,)\n - gt_num (int): Number of boxes.\n \"\"\"\n bboxes, labels = [], []\n for i in range(1, pts_instance_mask.max() + 1):\n ids = pts_instance_mask == i\n # check if all instance points have same semantic label\n assert pts_semantic_mask[ids].min() == pts_semantic_mask[ids].max()\n label = pts_semantic_mask[ids][0]\n # keep only furniture objects\n if label in self.cat_ids2class:\n labels.append(self.cat_ids2class[pts_semantic_mask[ids][0]])\n pts = points[:, :3][ids]\n min_pts = pts.min(axis=0)\n max_pts = pts.max(axis=0)\n locations = (min_pts + max_pts) / 2\n dimensions = max_pts - min_pts\n bboxes.append(np.concatenate((locations, dimensions)))\n annotation = dict()\n # follow ScanNet and SUN RGB-D keys\n annotation['gt_boxes_upright_depth'] = np.array(bboxes)\n annotation['class'] = np.array(labels)\n annotation['gt_num'] = len(labels)\n return annotation"
},
{
"identifier": "S3DISSegData",
"path": "tools/data_converter/s3dis_data_utils.py",
"snippet": "class S3DISSegData(object):\n \"\"\"S3DIS dataset used to generate infos for semantic segmentation task.\n\n Args:\n data_root (str): Root path of the raw data.\n ann_file (str): The generated scannet infos.\n split (str, optional): Set split type of the data. Default: 'train'.\n num_points (int, optional): Number of points in each data input.\n Default: 8192.\n label_weight_func (function, optional): Function to compute the\n label weight. Default: None.\n \"\"\"\n\n def __init__(self,\n data_root,\n ann_file,\n split='Area_1',\n num_points=4096,\n label_weight_func=None):\n self.data_root = data_root\n self.data_infos = mmcv.load(ann_file)\n self.split = split\n self.num_points = num_points\n\n self.all_ids = np.arange(13) # all possible ids\n self.cat_ids = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n 12]) # used for seg task\n self.ignore_index = len(self.cat_ids)\n\n self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \\\n self.ignore_index\n for i, cat_id in enumerate(self.cat_ids):\n self.cat_id2class[cat_id] = i\n\n # label weighting function is taken from\n # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24\n self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \\\n label_weight_func is None else label_weight_func\n\n def get_seg_infos(self):\n scene_idxs, label_weight = self.get_scene_idxs_and_label_weight()\n save_folder = osp.join(self.data_root, 'seg_info')\n mmcv.mkdir_or_exist(save_folder)\n np.save(\n osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'),\n scene_idxs)\n np.save(\n osp.join(save_folder, f'{self.split}_label_weight.npy'),\n label_weight)\n print(f'{self.split} resampled scene index and label weight saved')\n\n def _convert_to_label(self, mask):\n \"\"\"Convert class_id in loaded segmentation mask to label.\"\"\"\n if isinstance(mask, str):\n if mask.endswith('npy'):\n mask = np.load(mask)\n else:\n mask = np.fromfile(mask, dtype=np.int64)\n label = self.cat_id2class[mask]\n return label\n\n def get_scene_idxs_and_label_weight(self):\n \"\"\"Compute scene_idxs for data sampling and label weight for loss\n calculation.\n\n We sample more times for scenes with more points. Label_weight is\n inversely proportional to number of class points.\n \"\"\"\n num_classes = len(self.cat_ids)\n num_point_all = []\n label_weight = np.zeros((num_classes + 1, )) # ignore_index\n for data_info in self.data_infos:\n label = self._convert_to_label(\n osp.join(self.data_root, data_info['pts_semantic_mask_path']))\n num_point_all.append(label.shape[0])\n class_count, _ = np.histogram(label, range(num_classes + 2))\n label_weight += class_count\n\n # repeat scene_idx for num_scene_point // num_sample_point times\n sample_prob = np.array(num_point_all) / float(np.sum(num_point_all))\n num_iter = int(np.sum(num_point_all) / float(self.num_points))\n scene_idxs = []\n for idx in range(len(self.data_infos)):\n scene_idxs.extend([idx] * int(round(sample_prob[idx] * num_iter)))\n scene_idxs = np.array(scene_idxs).astype(np.int32)\n\n # calculate label weight, adopted from PointNet++\n label_weight = label_weight[:-1].astype(np.float32)\n label_weight = label_weight / label_weight.sum()\n label_weight = self.label_weight_func(label_weight).astype(np.float32)\n\n return scene_idxs, label_weight"
},
{
"identifier": "ScanNetData",
"path": "tools/data_converter/scannet_data_utils.py",
"snippet": "class ScanNetData(object):\n \"\"\"ScanNet data.\n\n Generate scannet infos for scannet_converter.\n\n Args:\n root_path (str): Root path of the raw data.\n split (str, optional): Set split type of the data. Default: 'train'.\n \"\"\"\n\n def __init__(self, root_path, split='train'):\n self.root_dir = root_path\n self.split = split\n self.split_dir = osp.join(root_path)\n self.classes = [\n 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',\n 'garbagebin'\n ]\n self.cat2label = {cat: self.classes.index(cat) for cat in self.classes}\n self.label2cat = {self.cat2label[t]: t for t in self.cat2label}\n self.cat_ids = np.array(\n [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])\n self.cat_ids2class = {\n nyu40id: i\n for i, nyu40id in enumerate(list(self.cat_ids))\n }\n assert split in ['train', 'val', 'test']\n split_file = osp.join(self.root_dir, 'meta_data',\n f'scannetv2_{split}.txt')\n mmcv.check_file_exist(split_file)\n self.sample_id_list = mmcv.list_from_file(split_file)\n self.test_mode = (split == 'test')\n\n def __len__(self):\n return len(self.sample_id_list)\n\n def get_aligned_box_label(self, idx):\n box_file = osp.join(self.root_dir, 'scannet_instance_data',\n f'{idx}_aligned_bbox.npy')\n mmcv.check_file_exist(box_file)\n return np.load(box_file)\n\n def get_unaligned_box_label(self, idx):\n box_file = osp.join(self.root_dir, 'scannet_instance_data',\n f'{idx}_unaligned_bbox.npy')\n mmcv.check_file_exist(box_file)\n return np.load(box_file)\n\n def get_axis_align_matrix(self, idx):\n matrix_file = osp.join(self.root_dir, 'scannet_instance_data',\n f'{idx}_axis_align_matrix.npy')\n mmcv.check_file_exist(matrix_file)\n return np.load(matrix_file)\n\n def get_images(self, idx):\n paths = []\n path = osp.join(self.root_dir, 'posed_images', idx)\n for file in sorted(os.listdir(path)):\n if file.endswith('.jpg'):\n paths.append(osp.join('posed_images', idx, file))\n return paths\n\n def get_extrinsics(self, idx):\n extrinsics = []\n path = osp.join(self.root_dir, 'posed_images', idx)\n for file in sorted(os.listdir(path)):\n if file.endswith('.txt') and not file == 'intrinsic.txt':\n extrinsics.append(np.loadtxt(osp.join(path, file)))\n return extrinsics\n\n def get_intrinsics(self, idx):\n matrix_file = osp.join(self.root_dir, 'posed_images', idx,\n 'intrinsic.txt')\n mmcv.check_file_exist(matrix_file)\n return np.loadtxt(matrix_file)\n\n def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):\n \"\"\"Get data infos.\n\n This method gets information from the raw data.\n\n Args:\n num_workers (int, optional): Number of threads to be used.\n Default: 4.\n has_label (bool, optional): Whether the data has label.\n Default: True.\n sample_id_list (list[int], optional): Index list of the sample.\n Default: None.\n\n Returns:\n infos (list[dict]): Information of the raw data.\n \"\"\"\n\n def process_single_scene(sample_idx):\n print(f'{self.split} sample_idx: {sample_idx}')\n info = dict()\n pc_info = {'num_features': 6, 'lidar_idx': sample_idx}\n info['point_cloud'] = pc_info\n pts_filename = osp.join(self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_vert.npy')\n points = np.load(pts_filename)\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))\n points.tofile(\n osp.join(self.root_dir, 'points', f'{sample_idx}.bin'))\n info['pts_path'] = osp.join('points', f'{sample_idx}.bin')\n\n ##########################superpoint#######################\n superpoints_filename = osp.join(self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_superpoint.npy')\n superpoints = np.load(superpoints_filename)\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'superpoints'))\n superpoints.tofile(\n osp.join(self.root_dir, 'superpoints', f'{sample_idx}.bin'))\n info['pts_superpoints_path'] = osp.join('superpoints', f'{sample_idx}.bin')\n ###########################################################\n\n # update with RGB image paths if exist\n if os.path.exists(osp.join(self.root_dir, 'posed_images')):\n info['intrinsics'] = self.get_intrinsics(sample_idx)\n all_extrinsics = self.get_extrinsics(sample_idx)\n all_img_paths = self.get_images(sample_idx)\n # some poses in ScanNet are invalid\n extrinsics, img_paths = [], []\n for extrinsic, img_path in zip(all_extrinsics, all_img_paths):\n if np.all(np.isfinite(extrinsic)):\n img_paths.append(img_path)\n extrinsics.append(extrinsic)\n info['extrinsics'] = extrinsics\n info['img_paths'] = img_paths\n\n if not self.test_mode:\n pts_instance_mask_path = osp.join(\n self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_ins_label.npy')\n pts_semantic_mask_path = osp.join(\n self.root_dir, 'scannet_instance_data',\n f'{sample_idx}_sem_label.npy')\n\n pts_instance_mask = np.load(pts_instance_mask_path).astype(\n np.int64)\n pts_semantic_mask = np.load(pts_semantic_mask_path).astype(\n np.int64)\n\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))\n\n pts_instance_mask.tofile(\n osp.join(self.root_dir, 'instance_mask',\n f'{sample_idx}.bin'))\n pts_semantic_mask.tofile(\n osp.join(self.root_dir, 'semantic_mask',\n f'{sample_idx}.bin'))\n\n info['pts_instance_mask_path'] = osp.join(\n 'instance_mask', f'{sample_idx}.bin')\n info['pts_semantic_mask_path'] = osp.join(\n 'semantic_mask', f'{sample_idx}.bin')\n\n if has_label:\n annotations = {}\n # box is of shape [k, 6 + class]\n aligned_box_label = self.get_aligned_box_label(sample_idx)\n unaligned_box_label = self.get_unaligned_box_label(sample_idx)\n annotations['gt_num'] = aligned_box_label.shape[0]\n if annotations['gt_num'] != 0:\n aligned_box = aligned_box_label[:, :-1] # k, 6\n unaligned_box = unaligned_box_label[:, :-1]\n classes = aligned_box_label[:, -1] # k\n annotations['name'] = np.array([\n self.label2cat[self.cat_ids2class[classes[i]]]\n for i in range(annotations['gt_num'])\n ])\n # default names are given to aligned bbox for compatibility\n # we also save unaligned bbox info with marked names\n annotations['location'] = aligned_box[:, :3]\n annotations['dimensions'] = aligned_box[:, 3:6]\n annotations['gt_boxes_upright_depth'] = aligned_box\n annotations['unaligned_location'] = unaligned_box[:, :3]\n annotations['unaligned_dimensions'] = unaligned_box[:, 3:6]\n annotations[\n 'unaligned_gt_boxes_upright_depth'] = unaligned_box\n annotations['index'] = np.arange(\n annotations['gt_num'], dtype=np.int32)\n annotations['class'] = np.array([\n self.cat_ids2class[classes[i]]\n for i in range(annotations['gt_num'])\n ])\n axis_align_matrix = self.get_axis_align_matrix(sample_idx)\n annotations['axis_align_matrix'] = axis_align_matrix # 4x4\n info['annos'] = annotations\n return info\n\n sample_id_list = sample_id_list if sample_id_list is not None \\\n else self.sample_id_list\n with futures.ThreadPoolExecutor(num_workers) as executor:\n infos = executor.map(process_single_scene, sample_id_list)\n return list(infos)"
},
{
"identifier": "ScanNetSegData",
"path": "tools/data_converter/scannet_data_utils.py",
"snippet": "class ScanNetSegData(object):\n \"\"\"ScanNet dataset used to generate infos for semantic segmentation task.\n\n Args:\n data_root (str): Root path of the raw data.\n ann_file (str): The generated scannet infos.\n split (str, optional): Set split type of the data. Default: 'train'.\n num_points (int, optional): Number of points in each data input.\n Default: 8192.\n label_weight_func (function, optional): Function to compute the\n label weight. Default: None.\n \"\"\"\n\n def __init__(self,\n data_root,\n ann_file,\n split='train',\n num_points=8192,\n label_weight_func=None):\n self.data_root = data_root\n self.data_infos = mmcv.load(ann_file)\n self.split = split\n assert split in ['train', 'val', 'test']\n self.num_points = num_points\n\n self.all_ids = np.arange(41) # all possible ids\n self.cat_ids = np.array([\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36,\n 39\n ]) # used for seg task\n self.ignore_index = len(self.cat_ids)\n\n self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \\\n self.ignore_index\n for i, cat_id in enumerate(self.cat_ids):\n self.cat_id2class[cat_id] = i\n\n # label weighting function is taken from\n # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24\n self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \\\n label_weight_func is None else label_weight_func\n\n def get_seg_infos(self):\n if self.split == 'test':\n return\n scene_idxs, label_weight = self.get_scene_idxs_and_label_weight()\n save_folder = osp.join(self.data_root, 'seg_info')\n mmcv.mkdir_or_exist(save_folder)\n np.save(\n osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'),\n scene_idxs)\n np.save(\n osp.join(save_folder, f'{self.split}_label_weight.npy'),\n label_weight)\n print(f'{self.split} resampled scene index and label weight saved')\n\n def _convert_to_label(self, mask):\n \"\"\"Convert class_id in loaded segmentation mask to label.\"\"\"\n if isinstance(mask, str):\n if mask.endswith('npy'):\n mask = np.load(mask)\n else:\n mask = np.fromfile(mask, dtype=np.int64)\n label = self.cat_id2class[mask]\n return label\n\n def get_scene_idxs_and_label_weight(self):\n \"\"\"Compute scene_idxs for data sampling and label weight for loss\n calculation.\n\n We sample more times for scenes with more points. Label_weight is\n inversely proportional to number of class points.\n \"\"\"\n num_classes = len(self.cat_ids)\n num_point_all = []\n label_weight = np.zeros((num_classes + 1, )) # ignore_index\n for data_info in self.data_infos:\n label = self._convert_to_label(\n osp.join(self.data_root, data_info['pts_semantic_mask_path']))\n num_point_all.append(label.shape[0])\n class_count, _ = np.histogram(label, range(num_classes + 2))\n label_weight += class_count\n\n # repeat scene_idx for num_scene_point // num_sample_point times\n sample_prob = np.array(num_point_all) / float(np.sum(num_point_all))\n num_iter = int(np.sum(num_point_all) / float(self.num_points))\n scene_idxs = []\n for idx in range(len(self.data_infos)):\n scene_idxs.extend([idx] * int(round(sample_prob[idx] * num_iter)))\n scene_idxs = np.array(scene_idxs).astype(np.int32)\n\n # calculate label weight, adopted from PointNet++\n label_weight = label_weight[:-1].astype(np.float32)\n label_weight = label_weight / label_weight.sum()\n label_weight = self.label_weight_func(label_weight).astype(np.float32)\n\n return scene_idxs, label_weight"
},
{
"identifier": "SUNRGBDData",
"path": "tools/data_converter/sunrgbd_data_utils.py",
"snippet": "class SUNRGBDData(object):\n \"\"\"SUNRGBD data.\n\n Generate scannet infos for sunrgbd_converter.\n\n Args:\n root_path (str): Root path of the raw data.\n split (str, optional): Set split type of the data. Default: 'train'.\n use_v1 (bool, optional): Whether to use v1. Default: False.\n \"\"\"\n\n def __init__(self, root_path, split='train', use_v1=False):\n self.root_dir = root_path\n self.split = split\n self.split_dir = osp.join(root_path, 'sunrgbd_trainval')\n self.classes = [\n 'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser',\n 'night_stand', 'bookshelf', 'bathtub'\n ]\n self.cat2label = {cat: self.classes.index(cat) for cat in self.classes}\n self.label2cat = {\n label: self.classes[label]\n for label in range(len(self.classes))\n }\n assert split in ['train', 'val', 'test']\n split_file = osp.join(self.split_dir, f'{split}_data_idx.txt')\n mmcv.check_file_exist(split_file)\n self.sample_id_list = map(int, mmcv.list_from_file(split_file))\n self.image_dir = osp.join(self.split_dir, 'image')\n self.calib_dir = osp.join(self.split_dir, 'calib')\n self.depth_dir = osp.join(self.split_dir, 'depth')\n if use_v1:\n self.label_dir = osp.join(self.split_dir, 'label_v1')\n else:\n self.label_dir = osp.join(self.split_dir, 'label')\n\n def __len__(self):\n return len(self.sample_id_list)\n\n def get_image(self, idx):\n img_filename = osp.join(self.image_dir, f'{idx:06d}.jpg')\n return mmcv.imread(img_filename)\n\n def get_image_shape(self, idx):\n image = self.get_image(idx)\n return np.array(image.shape[:2], dtype=np.int32)\n\n def get_depth(self, idx):\n depth_filename = osp.join(self.depth_dir, f'{idx:06d}.mat')\n depth = sio.loadmat(depth_filename)['instance']\n return depth\n\n def get_calibration(self, idx):\n calib_filepath = osp.join(self.calib_dir, f'{idx:06d}.txt')\n lines = [line.rstrip() for line in open(calib_filepath)]\n Rt = np.array([float(x) for x in lines[0].split(' ')])\n Rt = np.reshape(Rt, (3, 3), order='F').astype(np.float32)\n K = np.array([float(x) for x in lines[1].split(' ')])\n K = np.reshape(K, (3, 3), order='F').astype(np.float32)\n return K, Rt\n\n def get_label_objects(self, idx):\n label_filename = osp.join(self.label_dir, f'{idx:06d}.txt')\n lines = [line.rstrip() for line in open(label_filename)]\n objects = [SUNRGBDInstance(line) for line in lines]\n return objects\n\n def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):\n \"\"\"Get data infos.\n\n This method gets information from the raw data.\n\n Args:\n num_workers (int, optional): Number of threads to be used.\n Default: 4.\n has_label (bool, optional): Whether the data has label.\n Default: True.\n sample_id_list (list[int], optional): Index list of the sample.\n Default: None.\n\n Returns:\n infos (list[dict]): Information of the raw data.\n \"\"\"\n\n def process_single_scene(sample_idx):\n print(f'{self.split} sample_idx: {sample_idx}')\n # convert depth to points\n # SAMPLE_NUM = 50000 # we do not down sampling points\n # TODO: Check whether can move the point\n # sampling process during training.\n pc_upright_depth = self.get_depth(sample_idx)\n # pc_upright_depth_subsampled = random_sampling(\n # pc_upright_depth, SAMPLE_NUM)\n pc_upright_depth_subsampled = pc_upright_depth\n\n info = dict()\n pc_info = {'num_features': 6, 'lidar_idx': sample_idx}\n info['point_cloud'] = pc_info\n\n mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))\n pc_upright_depth_subsampled.tofile(\n osp.join(self.root_dir, 'points', f'{sample_idx:06d}.bin'))\n\n info['pts_path'] = osp.join('points', f'{sample_idx:06d}.bin')\n \n ##########################superpoint#######################\n info['pts_superpoints_path'] = osp.join('superpoints', f'{sample_idx:06d}.bin')\n ###########################################################\n \n img_path = osp.join('image', f'{sample_idx:06d}.jpg')\n image_info = {\n 'image_idx': sample_idx,\n 'image_shape': self.get_image_shape(sample_idx),\n 'image_path': img_path\n }\n info['image'] = image_info\n\n K, Rt = self.get_calibration(sample_idx)\n calib_info = {'K': K, 'Rt': Rt}\n info['calib'] = calib_info\n\n if has_label:\n obj_list = self.get_label_objects(sample_idx)\n annotations = {}\n annotations['gt_num'] = len([\n obj.classname for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ])\n if annotations['gt_num'] != 0:\n annotations['name'] = np.array([\n obj.classname for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ])\n annotations['bbox'] = np.concatenate([\n obj.box2d.reshape(1, 4) for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ],\n axis=0)\n annotations['location'] = np.concatenate([\n obj.centroid.reshape(1, 3) for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ],\n axis=0)\n annotations['dimensions'] = 2 * np.array([\n [obj.length, obj.width, obj.height] for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ]) # lwh (depth) format\n annotations['rotation_y'] = np.array([\n obj.heading_angle for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ])\n annotations['index'] = np.arange(\n len(obj_list), dtype=np.int32)\n annotations['class'] = np.array([\n self.cat2label[obj.classname] for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ])\n annotations['gt_boxes_upright_depth'] = np.stack(\n [\n obj.box3d for obj in obj_list\n if obj.classname in self.cat2label.keys()\n ],\n axis=0) # (K,8)\n info['annos'] = annotations\n return info\n\n sample_id_list = sample_id_list if \\\n sample_id_list is not None else self.sample_id_list\n with futures.ThreadPoolExecutor(num_workers) as executor:\n infos = executor.map(process_single_scene, sample_id_list)\n return list(infos)"
}
] | import os
import mmcv
import numpy as np
from tools.data_converter.s3dis_data_utils import S3DISData, S3DISSegData
from tools.data_converter.scannet_data_utils import ScanNetData, ScanNetSegData
from tools.data_converter.scannet_md40_data_utils import ScanNetData_md40, ScanNetSegData_md40
from tools.data_converter.sunrgbd_data_utils import SUNRGBDData | 8,711 | # Copyright (c) OpenMMLab. All rights reserved.
def create_indoor_info_file(data_path,
pkl_prefix='sunrgbd',
save_path=None,
use_v1=False,
workers=4):
"""Create indoor information file.
Get information of the raw data and save it to the pkl file.
Args:
data_path (str): Path of the data.
pkl_prefix (str, optional): Prefix of the pkl to be saved.
Default: 'sunrgbd'.
save_path (str, optional): Path of the pkl to be saved. Default: None.
use_v1 (bool, optional): Whether to use v1. Default: False.
workers (int, optional): Number of threads to be used. Default: 4.
"""
assert os.path.exists(data_path)
assert pkl_prefix in ['sunrgbd', 'scannet', 's3dis', 'scannet_md40'], \
f'unsupported indoor dataset {pkl_prefix}'
save_path = data_path if save_path is None else save_path
assert os.path.exists(save_path)
# generate infos for both detection and segmentation task
if pkl_prefix in ['sunrgbd', 'scannet', 'scannet_md40']:
train_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_train.pkl')
val_filename = os.path.join(save_path, f'{pkl_prefix}_infos_val.pkl')
if pkl_prefix == 'sunrgbd':
# SUN RGB-D has a train-val split
train_dataset = SUNRGBDData(
root_path=data_path, split='train', use_v1=use_v1)
val_dataset = SUNRGBDData(
root_path=data_path, split='val', use_v1=use_v1)
elif pkl_prefix == 'scannet':
# ScanNet has a train-val-test split
train_dataset = ScanNetData(root_path=data_path, split='train')
val_dataset = ScanNetData(root_path=data_path, split='val')
test_dataset = ScanNetData(root_path=data_path, split='test')
test_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_test.pkl')
else:
# ScanNet has a train-val-test split
train_dataset = ScanNetData_md40(root_path=data_path, split='train')
val_dataset = ScanNetData_md40(root_path=data_path, split='val')
test_dataset = ScanNetData_md40(root_path=data_path, split='test')
test_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_test.pkl')
infos_train = train_dataset.get_infos(
num_workers=workers, has_label=True)
mmcv.dump(infos_train, train_filename, 'pkl')
print(f'{pkl_prefix} info train file is saved to {train_filename}')
infos_val = val_dataset.get_infos(num_workers=workers, has_label=True)
mmcv.dump(infos_val, val_filename, 'pkl')
print(f'{pkl_prefix} info val file is saved to {val_filename}')
if pkl_prefix == 'scannet_md40':
infos_test = test_dataset.get_infos(
num_workers=workers, has_label=False)
mmcv.dump(infos_test, test_filename, 'pkl')
print(f'{pkl_prefix} info test file is saved to {test_filename}')
if pkl_prefix == 'scannet':
infos_test = test_dataset.get_infos(
num_workers=workers, has_label=False)
mmcv.dump(infos_test, test_filename, 'pkl')
print(f'{pkl_prefix} info test file is saved to {test_filename}')
# generate infos for the semantic segmentation task
# e.g. re-sampled scene indexes and label weights
# scene indexes are used to re-sample rooms with different number of points
# label weights are used to balance classes with different number of points
if pkl_prefix == 'scannet':
# label weight computation function is adopted from
# https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24
| # Copyright (c) OpenMMLab. All rights reserved.
def create_indoor_info_file(data_path,
pkl_prefix='sunrgbd',
save_path=None,
use_v1=False,
workers=4):
"""Create indoor information file.
Get information of the raw data and save it to the pkl file.
Args:
data_path (str): Path of the data.
pkl_prefix (str, optional): Prefix of the pkl to be saved.
Default: 'sunrgbd'.
save_path (str, optional): Path of the pkl to be saved. Default: None.
use_v1 (bool, optional): Whether to use v1. Default: False.
workers (int, optional): Number of threads to be used. Default: 4.
"""
assert os.path.exists(data_path)
assert pkl_prefix in ['sunrgbd', 'scannet', 's3dis', 'scannet_md40'], \
f'unsupported indoor dataset {pkl_prefix}'
save_path = data_path if save_path is None else save_path
assert os.path.exists(save_path)
# generate infos for both detection and segmentation task
if pkl_prefix in ['sunrgbd', 'scannet', 'scannet_md40']:
train_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_train.pkl')
val_filename = os.path.join(save_path, f'{pkl_prefix}_infos_val.pkl')
if pkl_prefix == 'sunrgbd':
# SUN RGB-D has a train-val split
train_dataset = SUNRGBDData(
root_path=data_path, split='train', use_v1=use_v1)
val_dataset = SUNRGBDData(
root_path=data_path, split='val', use_v1=use_v1)
elif pkl_prefix == 'scannet':
# ScanNet has a train-val-test split
train_dataset = ScanNetData(root_path=data_path, split='train')
val_dataset = ScanNetData(root_path=data_path, split='val')
test_dataset = ScanNetData(root_path=data_path, split='test')
test_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_test.pkl')
else:
# ScanNet has a train-val-test split
train_dataset = ScanNetData_md40(root_path=data_path, split='train')
val_dataset = ScanNetData_md40(root_path=data_path, split='val')
test_dataset = ScanNetData_md40(root_path=data_path, split='test')
test_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_test.pkl')
infos_train = train_dataset.get_infos(
num_workers=workers, has_label=True)
mmcv.dump(infos_train, train_filename, 'pkl')
print(f'{pkl_prefix} info train file is saved to {train_filename}')
infos_val = val_dataset.get_infos(num_workers=workers, has_label=True)
mmcv.dump(infos_val, val_filename, 'pkl')
print(f'{pkl_prefix} info val file is saved to {val_filename}')
if pkl_prefix == 'scannet_md40':
infos_test = test_dataset.get_infos(
num_workers=workers, has_label=False)
mmcv.dump(infos_test, test_filename, 'pkl')
print(f'{pkl_prefix} info test file is saved to {test_filename}')
if pkl_prefix == 'scannet':
infos_test = test_dataset.get_infos(
num_workers=workers, has_label=False)
mmcv.dump(infos_test, test_filename, 'pkl')
print(f'{pkl_prefix} info test file is saved to {test_filename}')
# generate infos for the semantic segmentation task
# e.g. re-sampled scene indexes and label weights
# scene indexes are used to re-sample rooms with different number of points
# label weights are used to balance classes with different number of points
if pkl_prefix == 'scannet':
# label weight computation function is adopted from
# https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 | train_dataset = ScanNetSegData( | 3 | 2023-12-21 12:50:35+00:00 | 12k |
jdejaegh/irm-kmi-ha | custom_components/irm_kmi/coordinator.py | [
{
"identifier": "IrmKmiApiClient",
"path": "custom_components/irm_kmi/api.py",
"snippet": "class IrmKmiApiClient:\n \"\"\"API client for IRM KMI weather data\"\"\"\n COORD_DECIMALS = 6\n\n def __init__(self, session: aiohttp.ClientSession) -> None:\n self._session = session\n self._base_url = \"https://app.meteo.be/services/appv4/\"\n\n async def get_forecasts_coord(self, coord: dict) -> dict:\n \"\"\"Get forecasts for given city.\"\"\"\n assert 'lat' in coord\n assert 'long' in coord\n coord['lat'] = round(coord['lat'], self.COORD_DECIMALS)\n coord['long'] = round(coord['long'], self.COORD_DECIMALS)\n\n response = await self._api_wrapper(params={\"s\": \"getForecasts\", \"k\": _api_key(\"getForecasts\")} | coord)\n return await response.json()\n\n async def get_image(self, url, params: dict | None = None) -> bytes:\n \"\"\"Get the image at the specified url with the parameters\"\"\"\n r: ClientResponse = await self._api_wrapper(base_url=url, params={} if params is None else params)\n return await r.read()\n\n async def _api_wrapper(\n self,\n params: dict,\n base_url: str | None = None,\n path: str = \"\",\n method: str = \"get\",\n data: dict | None = None,\n headers: dict | None = None,\n ) -> any:\n \"\"\"Get information from the API.\"\"\"\n\n try:\n async with async_timeout.timeout(10):\n response = await self._session.request(\n method=method,\n url=f\"{self._base_url if base_url is None else base_url}{path}\",\n headers=headers,\n json=data,\n params=params\n )\n response.raise_for_status()\n return response\n\n except asyncio.TimeoutError as exception:\n raise IrmKmiApiCommunicationError(\"Timeout error fetching information\") from exception\n except (aiohttp.ClientError, socket.gaierror) as exception:\n raise IrmKmiApiCommunicationError(\"Error fetching information\") from exception\n except Exception as exception: # pylint: disable=broad-except\n raise IrmKmiApiError(f\"Something really wrong happened! {exception}\") from exception"
},
{
"identifier": "IrmKmiApiError",
"path": "custom_components/irm_kmi/api.py",
"snippet": "class IrmKmiApiError(Exception):\n \"\"\"Exception to indicate a general API error.\"\"\""
},
{
"identifier": "CONF_DARK_MODE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_DARK_MODE: Final = \"dark_mode\""
},
{
"identifier": "CONF_STYLE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_STYLE: Final = \"style\""
},
{
"identifier": "DOMAIN",
"path": "custom_components/irm_kmi/const.py",
"snippet": "DOMAIN: Final = 'irm_kmi'"
},
{
"identifier": "IRM_KMI_TO_HA_CONDITION_MAP",
"path": "custom_components/irm_kmi/const.py",
"snippet": "IRM_KMI_TO_HA_CONDITION_MAP: Final = {\n (0, 'd'): ATTR_CONDITION_SUNNY,\n (0, 'n'): ATTR_CONDITION_CLEAR_NIGHT,\n (1, 'd'): ATTR_CONDITION_SUNNY,\n (1, 'n'): ATTR_CONDITION_CLEAR_NIGHT,\n (2, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (2, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (3, 'd'): ATTR_CONDITION_PARTLYCLOUDY,\n (3, 'n'): ATTR_CONDITION_PARTLYCLOUDY,\n (4, 'd'): ATTR_CONDITION_POURING,\n (4, 'n'): ATTR_CONDITION_POURING,\n (5, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (5, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (6, 'd'): ATTR_CONDITION_POURING,\n (6, 'n'): ATTR_CONDITION_POURING,\n (7, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (7, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (8, 'd'): ATTR_CONDITION_SNOWY_RAINY,\n (8, 'n'): ATTR_CONDITION_SNOWY_RAINY,\n (9, 'd'): ATTR_CONDITION_SNOWY_RAINY,\n (9, 'n'): ATTR_CONDITION_SNOWY_RAINY,\n (10, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (10, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (11, 'd'): ATTR_CONDITION_SNOWY,\n (11, 'n'): ATTR_CONDITION_SNOWY,\n (12, 'd'): ATTR_CONDITION_SNOWY,\n (12, 'n'): ATTR_CONDITION_SNOWY,\n (13, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (13, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (14, 'd'): ATTR_CONDITION_CLOUDY,\n (14, 'n'): ATTR_CONDITION_CLOUDY,\n (15, 'd'): ATTR_CONDITION_CLOUDY,\n (15, 'n'): ATTR_CONDITION_CLOUDY,\n (16, 'd'): ATTR_CONDITION_POURING,\n (16, 'n'): ATTR_CONDITION_POURING,\n (17, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (17, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (18, 'd'): ATTR_CONDITION_RAINY,\n (18, 'n'): ATTR_CONDITION_RAINY,\n (19, 'd'): ATTR_CONDITION_POURING,\n (19, 'n'): ATTR_CONDITION_POURING,\n (20, 'd'): ATTR_CONDITION_SNOWY_RAINY,\n (20, 'n'): ATTR_CONDITION_SNOWY_RAINY,\n (21, 'd'): ATTR_CONDITION_EXCEPTIONAL,\n (21, 'n'): ATTR_CONDITION_EXCEPTIONAL,\n (22, 'd'): ATTR_CONDITION_SNOWY,\n (22, 'n'): ATTR_CONDITION_SNOWY,\n (23, 'd'): ATTR_CONDITION_SNOWY,\n (23, 'n'): ATTR_CONDITION_SNOWY,\n (24, 'd'): ATTR_CONDITION_FOG,\n (24, 'n'): ATTR_CONDITION_FOG,\n (25, 'd'): ATTR_CONDITION_FOG,\n (25, 'n'): ATTR_CONDITION_FOG,\n (26, 'd'): ATTR_CONDITION_FOG,\n (26, 'n'): ATTR_CONDITION_FOG,\n (27, 'd'): ATTR_CONDITION_EXCEPTIONAL,\n (27, 'n'): ATTR_CONDITION_EXCEPTIONAL\n}"
},
{
"identifier": "LANGS",
"path": "custom_components/irm_kmi/const.py",
"snippet": "LANGS: Final = ['en', 'fr', 'nl', 'de']"
},
{
"identifier": "MAP_WARNING_ID_TO_SLUG",
"path": "custom_components/irm_kmi/const.py",
"snippet": "MAP_WARNING_ID_TO_SLUG: Final = {\n 0: 'wind',\n 1: 'rain',\n 2: 'ice_or_snow',\n 3: 'thunder',\n 7: 'fog',\n 9: 'cold',\n 12: 'thunder_wind_rain',\n 13: 'thunderstorm_strong_gusts',\n 14: 'thunderstorm_large_rainfall',\n 15: 'storm_surge',\n 17: 'coldspell'}"
},
{
"identifier": "OPTION_STYLE_SATELLITE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "OPTION_STYLE_SATELLITE: Final = 'satellite_style'"
},
{
"identifier": "OUT_OF_BENELUX",
"path": "custom_components/irm_kmi/const.py",
"snippet": "OUT_OF_BENELUX: Final = [\"außerhalb der Benelux (Brussels)\",\n \"Hors de Belgique (Bxl)\",\n \"Outside the Benelux (Brussels)\",\n \"Buiten de Benelux (Brussel)\"]"
},
{
"identifier": "STYLE_TO_PARAM_MAP",
"path": "custom_components/irm_kmi/const.py",
"snippet": "STYLE_TO_PARAM_MAP: Final = {\n OPTION_STYLE_STD: 1,\n OPTION_STYLE_CONTRAST: 2,\n OPTION_STYLE_YELLOW_RED: 3,\n OPTION_STYLE_SATELLITE: 4\n}"
},
{
"identifier": "AnimationFrameData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class AnimationFrameData(TypedDict, total=False):\n \"\"\"Holds one single frame of the radar camera, along with the timestamp of the frame\"\"\"\n time: datetime | None\n image: bytes | None\n value: float | None\n position: float | None\n position_higher: float | None\n position_lower: float | None"
},
{
"identifier": "CurrentWeatherData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class CurrentWeatherData(TypedDict, total=False):\n \"\"\"Class to hold the currently observable weather at a given location\"\"\"\n condition: str | None\n temperature: float | None\n wind_speed: float | None\n wind_gust_speed: float | None\n wind_bearing: float | str | None\n uv_index: float | None\n pressure: float | None"
},
{
"identifier": "IrmKmiForecast",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class IrmKmiForecast(Forecast):\n \"\"\"Forecast class with additional attributes for IRM KMI\"\"\"\n\n # TODO: add condition_2 as well and evolution to match data from the API?\n # TODO: remove the _fr and _nl to have only one 'text' attribute\n text_fr: str | None\n text_nl: str | None"
},
{
"identifier": "ProcessedCoordinatorData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class ProcessedCoordinatorData(TypedDict, total=False):\n \"\"\"Data class that will be exposed to the entities consuming data from an IrmKmiCoordinator\"\"\"\n current_weather: CurrentWeatherData\n hourly_forecast: List[Forecast] | None\n daily_forecast: List[IrmKmiForecast] | None\n animation: RadarAnimationData\n warnings: List[WarningData] | None"
},
{
"identifier": "RadarAnimationData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class RadarAnimationData(TypedDict, total=False):\n \"\"\"Holds frames and additional data for the animation to be rendered\"\"\"\n sequence: List[AnimationFrameData] | None\n most_recent_image_idx: int | None\n hint: str | None\n unit: str | None\n location: bytes | None\n svg_still: bytes | None\n svg_animated: bytes | None"
},
{
"identifier": "WarningData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class WarningData(TypedDict, total=False):\n \"\"\"Holds data about a specific warning\"\"\"\n slug: str\n id: int\n level: int\n friendly_name: str\n text: str\n starts_at: datetime\n ends_at: datetime"
},
{
"identifier": "RainGraph",
"path": "custom_components/irm_kmi/rain_graph.py",
"snippet": "class RainGraph:\n def __init__(self,\n animation_data: RadarAnimationData,\n background_image_path: str,\n background_size: (int, int),\n dark_mode: bool = False,\n tz: str = 'UTC',\n svg_width: float = 640,\n inset: float = 20,\n graph_height: float = 150,\n top_text_space: float = 30,\n top_text_y_pos: float = 20,\n bottom_text_space: float = 50,\n bottom_text_y_pos: float = 218,\n auto=True\n ):\n\n self._animation_data: RadarAnimationData = animation_data\n self._background_image_path: str = background_image_path\n self._background_size: (int, int) = background_size\n self._dark_mode: bool = dark_mode\n self._tz = pytz.timezone(tz)\n self._svg_width: float = svg_width\n self._inset: float = inset\n self._graph_height: float = graph_height\n self._top_text_space: float = top_text_space + background_size[1]\n self._top_text_y_pos: float = top_text_y_pos + background_size[1]\n self._bottom_text_space: float = bottom_text_space\n self._bottom_text_y_pos: float = bottom_text_y_pos + background_size[1]\n\n self._frame_count: int = len(self._animation_data['sequence'])\n self._graph_width: float = self._svg_width - 2 * self._inset\n self._graph_bottom: float = self._top_text_space + self._graph_height\n self._svg_height: float = self._graph_height + self._top_text_space + self._bottom_text_space\n self._interval_width: float = self._graph_width / self._frame_count\n self._offset: float = self._inset + self._interval_width / 2\n\n if not (0 <= self._top_text_y_pos <= self._top_text_space):\n raise ValueError(\"It must hold that 0 <= top_text_y_pos <= top_text_space\")\n\n if not (self._graph_bottom <= self._bottom_text_y_pos <= self._graph_bottom + self._bottom_text_space):\n raise ValueError(\"bottom_text_y_pos must be below the graph\")\n\n self._dwg: Drawing = Drawing(size=(self._svg_width, self._svg_height), profile='full')\n self._dwg_save: Drawing\n self._dwg_animated: Drawing\n self._dwg_still: Drawing\n\n if auto:\n self.draw_svg_frame()\n self.draw_hour_bars()\n self.draw_chances_path()\n self.draw_data_line()\n self.write_hint()\n self.insert_background()\n self._dwg_save = copy.deepcopy(self._dwg)\n\n self.draw_current_fame_line()\n self.draw_description_text()\n self.insert_cloud_layer()\n self.draw_location()\n self._dwg_animated = self._dwg\n\n self._dwg = self._dwg_save\n idx = self._animation_data['most_recent_image_idx']\n self.draw_current_fame_line(idx)\n self.draw_description_text(idx)\n self.insert_cloud_layer(idx)\n self.draw_location()\n self._dwg_still = self._dwg\n\n def draw_svg_frame(self):\n \"\"\"Create the global area to draw the other items\"\"\"\n self._dwg.embed_font(name=\"Roboto Medium\", filename='custom_components/irm_kmi/resources/roboto_medium.ttf')\n self._dwg.embed_stylesheet(\"\"\"\n .roboto {\n font-family: \"Roboto Medium\";\n }\n \"\"\")\n\n fill_color = '#393C40' if self._dark_mode else '#385E95'\n self._dwg.add(self._dwg.rect(insert=(0, 0),\n size=(self._svg_width, self._svg_height),\n rx=None, ry=None,\n fill=fill_color, stroke='none'))\n\n def draw_description_text(self, idx: int | None = None):\n \"\"\"For every frame write the amount of precipitation and the time at the top of the graph.\n If idx is set, only do it for the given idx\"\"\"\n\n times = [e['time'].astimezone(tz=self._tz).strftime('%H:%M') for e in\n self._animation_data['sequence']]\n rain_levels = [f\"{e['value']}{self._animation_data['unit']}\" for e in self._animation_data['sequence']]\n\n if idx is not None:\n time = times[idx]\n rain_level = rain_levels[idx]\n\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n\n self.write_time_and_rain(paragraph, rain_level, time)\n return\n\n for i in range(self._frame_count):\n time = times[i]\n rain_level = rain_levels[i]\n\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n\n values = ['hidden'] * self._frame_count\n values[i] = 'visible'\n\n paragraph.add(Animate(\n attributeName=\"visibility\",\n values=\";\".join(values),\n dur=f\"{self._frame_count * 0.3}s\",\n begin=\"0s\",\n repeatCount=\"indefinite\"\n ))\n\n self.write_time_and_rain(paragraph, rain_level, time)\n\n def write_time_and_rain(self, paragraph, rain_level, time):\n \"\"\"Using the paragraph object, write the time and rain level data\"\"\"\n paragraph.add(self._dwg.text(f\"{time}\", insert=(self._offset, self._top_text_y_pos),\n text_anchor=\"start\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n paragraph.add(self._dwg.text(f\"{rain_level}\", insert=(self._svg_width / 2, self._top_text_y_pos),\n text_anchor=\"middle\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n\n def write_hint(self):\n \"\"\"Add the hint text at the bottom of the graph\"\"\"\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n\n hint = self._animation_data['hint']\n\n paragraph.add(self._dwg.text(f\"{hint}\", insert=(self._svg_width / 2, self._bottom_text_y_pos),\n text_anchor=\"middle\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n\n def draw_chances_path(self):\n \"\"\"Draw the prevision margin area around the main forecast line\"\"\"\n list_lower_points = []\n list_higher_points = []\n\n rain_list: List[AnimationFrameData] = self._animation_data['sequence']\n graph_rect_left = self._offset\n graph_rect_top = self._top_text_space\n\n for i in range(len(rain_list)):\n position_higher = rain_list[i]['position_higher']\n if position_higher is not None:\n list_higher_points.append((graph_rect_left, graph_rect_top + (\n 1.0 - position_higher) * self._graph_height))\n graph_rect_left += self._interval_width\n\n graph_rect_right = graph_rect_left - self._interval_width\n for i in range(len(rain_list) - 1, -1, -1):\n position_lower = rain_list[i]['position_lower']\n if position_lower is not None:\n list_lower_points.append((graph_rect_right, graph_rect_top + (\n 1.0 - position_lower) * self._graph_height))\n graph_rect_right -= self._interval_width\n\n if list_higher_points and list_lower_points:\n self.draw_chance_precip(list_higher_points, list_lower_points)\n\n def draw_chance_precip(self, list_higher_points: List, list_lower_points: List):\n \"\"\"Draw the blue solid line representing the actual rain forecast\"\"\"\n precip_higher_chance_path = self._dwg.path(fill='#63c8fa', stroke='none', opacity=.3)\n\n list_higher_points[-1] = tuple(list(list_higher_points[-1]) + ['last'])\n\n self.set_curved_path(precip_higher_chance_path, list_higher_points + list_lower_points)\n self._dwg.add(precip_higher_chance_path)\n\n @staticmethod\n def set_curved_path(path, points):\n \"\"\"Pushes points on the path by creating a nice curve between them\"\"\"\n if len(points) < 2:\n return\n\n path.push('M', *points[0])\n\n for i in range(1, len(points)):\n x_mid = (points[i - 1][0] + points[i][0]) / 2\n y_mid = (points[i - 1][1] + points[i][1]) / 2\n\n path.push('Q', points[i - 1][0], points[i - 1][1], x_mid, y_mid)\n if points[i][-1] == 'last' or points[i - 1][-1] == 'last':\n path.push('Q', points[i][0], points[i][1], points[i][0], points[i][1])\n\n path.push('Q', points[-1][0], points[-1][1], points[-1][0], points[-1][1])\n\n def draw_data_line(self):\n \"\"\"Draw the main data line for the rain forecast\"\"\"\n rain_list: List[AnimationFrameData] = self._animation_data['sequence']\n graph_rect_left = self._offset\n graph_rect_top = self._top_text_space\n\n entry_list = []\n\n for i in range(len(rain_list)):\n position = rain_list[i]['position']\n entry_list.append(\n (graph_rect_left,\n graph_rect_top + (1.0 - position) * self._graph_height))\n graph_rect_left += self._interval_width\n data_line_path = self._dwg.path(fill='none', stroke='#63c8fa', stroke_width=2)\n self.set_curved_path(data_line_path, entry_list)\n self._dwg.add(data_line_path)\n\n def draw_hour_bars(self):\n \"\"\"Draw the small bars at the bottom to represent the time\"\"\"\n hour_bar_height = 8\n horizontal_inset = self._offset\n\n for (i, rain_item) in enumerate(self._animation_data['sequence']):\n time_image = rain_item['time'].astimezone(tz=self._tz)\n is_hour_bar = time_image.minute == 0\n\n x_position = horizontal_inset\n if i == self._animation_data['most_recent_image_idx']:\n self._dwg.add(self._dwg.line(start=(x_position, self._top_text_space),\n end=(x_position, self._graph_bottom),\n stroke='white',\n opacity=0.5,\n stroke_dasharray=4))\n\n self._dwg.add(self._dwg.line(start=(x_position, self._graph_bottom - hour_bar_height),\n end=(x_position, self._graph_bottom),\n stroke='white' if is_hour_bar else 'lightgrey',\n opacity=0.9 if is_hour_bar else 0.7))\n\n if is_hour_bar:\n graph_rect_center_x = x_position\n graph_rect_center_y = self._graph_bottom + 18\n\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n paragraph.add(self._dwg.text(f\"{time_image.hour}h\", insert=(graph_rect_center_x, graph_rect_center_y),\n text_anchor=\"middle\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n\n horizontal_inset += self._interval_width\n\n self._dwg.add(self._dwg.line(start=(self._offset, self._graph_bottom),\n end=(self._graph_width + self._interval_width / 2, self._graph_bottom),\n stroke='white'))\n\n def draw_current_fame_line(self, idx: int | None = None):\n \"\"\"Draw a solid white line on the timeline at the position of the given frame index\"\"\"\n x_position = self._offset if idx is None else self._offset + idx * self._interval_width\n now = self._dwg.add(self._dwg.line(start=(x_position, self._top_text_space),\n end=(x_position, self._graph_bottom),\n id='now',\n stroke='white',\n opacity=1,\n stroke_width=2))\n if idx is not None:\n return\n now.add(self._dwg.animateTransform(\"translate\", \"transform\",\n id=\"now\",\n from_=f\"{self._offset} 0\",\n to=f\"{self._graph_width - self._offset} 0\",\n dur=f\"{self._frame_count * 0.3}s\",\n repeatCount=\"indefinite\"))\n\n def get_svg_string(self, still_image: bool = False) -> bytes:\n return self._dwg_still.tostring().encode() if still_image else self._dwg_animated.tostring().encode()\n\n def insert_background(self):\n with open(self._background_image_path, 'rb') as f:\n png_data = base64.b64encode(f.read()).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n\n def insert_cloud_layer(self, idx: int | None = None):\n imgs = [e['image'] for e in self._animation_data['sequence']]\n\n if idx is not None:\n img = imgs[idx]\n png_data = base64.b64encode(img).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n return\n\n for i, img in enumerate(imgs):\n png_data = base64.b64encode(img).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n\n values = ['hidden'] * self._frame_count\n values[i] = 'visible'\n\n image.add(Animate(\n attributeName=\"visibility\",\n values=\";\".join(values),\n dur=f\"{self._frame_count * 0.3}s\",\n begin=\"0s\",\n repeatCount=\"indefinite\"\n ))\n\n def draw_location(self):\n img = self._animation_data['location']\n png_data = base64.b64encode(img).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n\n def get_dwg(self):\n return copy.deepcopy(self._dwg)"
},
{
"identifier": "disable_from_config",
"path": "custom_components/irm_kmi/utils.py",
"snippet": "def disable_from_config(hass: HomeAssistant, config_entry: ConfigEntry):\n modify_from_config(hass, config_entry.entry_id, False)"
},
{
"identifier": "get_config_value",
"path": "custom_components/irm_kmi/utils.py",
"snippet": "def get_config_value(config_entry: ConfigEntry, key: str) -> Any:\n if config_entry.options and key in config_entry.options:\n return config_entry.options[key]\n return config_entry.data[key]"
}
] | import asyncio
import logging
import async_timeout
import pytz
from datetime import datetime, timedelta
from typing import Any, List, Tuple
from homeassistant.components.weather import Forecast
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE, CONF_ZONE
from homeassistant.core import HomeAssistant
from homeassistant.helpers import issue_registry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (DataUpdateCoordinator,
UpdateFailed)
from .api import IrmKmiApiClient, IrmKmiApiError
from .const import CONF_DARK_MODE, CONF_STYLE, DOMAIN
from .const import IRM_KMI_TO_HA_CONDITION_MAP as CDT_MAP
from .const import LANGS
from .const import MAP_WARNING_ID_TO_SLUG as SLUG_MAP
from .const import OPTION_STYLE_SATELLITE, OUT_OF_BENELUX, STYLE_TO_PARAM_MAP
from .data import (AnimationFrameData, CurrentWeatherData, IrmKmiForecast,
ProcessedCoordinatorData, RadarAnimationData, WarningData)
from .rain_graph import RainGraph
from .utils import disable_from_config, get_config_value | 9,065 | wind_gust_speed = float(now_hourly.get('windPeakSpeedKm', None)) if now_hourly is not None else None
except TypeError:
wind_gust_speed = None
try:
temperature = float(api_data.get('obs', {}).get('temp'))
except TypeError:
temperature = None
current_weather = CurrentWeatherData(
condition=CDT_MAP.get((api_data.get('obs', {}).get('ww'), api_data.get('obs', {}).get('dayNight')), None),
temperature=temperature,
wind_speed=wind_speed,
wind_gust_speed=wind_gust_speed,
wind_bearing=now_hourly.get('windDirectionText', {}).get('en') if now_hourly is not None else None,
pressure=pressure,
uv_index=uv_index
)
if api_data.get('country', '') == 'NL':
current_weather['wind_speed'] = api_data.get('obs', {}).get('windSpeedKm')
current_weather['wind_bearing'] = api_data.get('obs', {}).get('windDirectionText', {}).get('en')
return current_weather
@staticmethod
def hourly_list_to_forecast(data: List[dict] | None) -> List[Forecast] | None:
"""Parse data from the API to create a list of hourly forecasts"""
if data is None or not isinstance(data, list) or len(data) == 0:
return None
forecasts = list()
day = datetime.now()
for f in data:
if 'dateShow' in f:
day = day + timedelta(days=1)
hour = f.get('hour', None)
if hour is None:
continue
precipitation_probability = None
if f.get('precipChance', None) is not None:
precipitation_probability = int(f.get('precipChance'))
ww = None
if f.get('ww', None) is not None:
ww = int(f.get('ww'))
forecast = Forecast(
datetime=day.strftime(f'%Y-%m-%dT{hour}:00:00'),
condition=CDT_MAP.get((ww, f.get('dayNight', None)), None),
native_precipitation=f.get('precipQuantity', None),
native_temperature=f.get('temp', None),
native_templow=None,
native_wind_gust_speed=f.get('windPeakSpeedKm', None),
native_wind_speed=f.get('windSpeedKm', None),
precipitation_probability=precipitation_probability,
wind_bearing=f.get('windDirectionText', {}).get('en'),
native_pressure=f.get('pressure', None),
is_daytime=f.get('dayNight', None) == 'd'
)
forecasts.append(forecast)
return forecasts
@staticmethod
def daily_list_to_forecast(data: List[dict] | None) -> List[Forecast] | None:
"""Parse data from the API to create a list of daily forecasts"""
if data is None or not isinstance(data, list) or len(data) == 0:
return None
forecasts = list()
n_days = 0
for (idx, f) in enumerate(data):
precipitation = None
if f.get('precipQuantity', None) is not None:
try:
precipitation = float(f.get('precipQuantity'))
except TypeError:
pass
native_wind_gust_speed = None
if f.get('wind', {}).get('peakSpeed') is not None:
try:
native_wind_gust_speed = int(f.get('wind', {}).get('peakSpeed'))
except TypeError:
pass
is_daytime = f.get('dayNight', None) == 'd'
forecast = IrmKmiForecast(
datetime=(datetime.now() + timedelta(days=n_days)).strftime('%Y-%m-%d')
if is_daytime else datetime.now().strftime('%Y-%m-%d'),
condition=CDT_MAP.get((f.get('ww1', None), f.get('dayNight', None)), None),
native_precipitation=precipitation,
native_temperature=f.get('tempMax', None),
native_templow=f.get('tempMin', None),
native_wind_gust_speed=native_wind_gust_speed,
native_wind_speed=f.get('wind', {}).get('speed'),
precipitation_probability=f.get('precipChance', None),
wind_bearing=f.get('wind', {}).get('dirText', {}).get('en'),
is_daytime=is_daytime,
text_fr=f.get('text', {}).get('fr'),
text_nl=f.get('text', {}).get('nl')
)
forecasts.append(forecast)
if is_daytime or idx == 0:
n_days += 1
return forecasts
def create_rain_graph(self,
radar_animation: RadarAnimationData,
api_animation_data: List[dict],
country: str,
images_from_api: Tuple[bytes],
| """DataUpdateCoordinator for the IRM KMI integration."""
_LOGGER = logging.getLogger(__name__)
class IrmKmiCoordinator(DataUpdateCoordinator):
"""Coordinator to update data from IRM KMI"""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Initialize the coordinator."""
super().__init__(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="IRM KMI weather",
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(minutes=7),
)
self._api_client = IrmKmiApiClient(session=async_get_clientsession(hass))
self._zone = get_config_value(entry, CONF_ZONE)
self._dark_mode = get_config_value(entry, CONF_DARK_MODE)
self._style = get_config_value(entry, CONF_STYLE)
self._config_entry = entry
async def _async_update_data(self) -> ProcessedCoordinatorData:
"""Fetch data from API endpoint.
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
if (zone := self.hass.states.get(self._zone)) is None:
raise UpdateFailed(f"Zone '{self._zone}' not found")
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(10):
api_data = await self._api_client.get_forecasts_coord(
{'lat': zone.attributes[ATTR_LATITUDE],
'long': zone.attributes[ATTR_LONGITUDE]}
)
_LOGGER.debug(f"Observation for {api_data.get('cityName', '')}: {api_data.get('obs', '{}')}")
except IrmKmiApiError as err:
raise UpdateFailed(f"Error communicating with API: {err}")
if api_data.get('cityName', None) in OUT_OF_BENELUX:
# TODO create a repair when this triggers
_LOGGER.info(f"Config state: {self._config_entry.state}")
_LOGGER.error(f"The zone {self._zone} is now out of Benelux and forecast is only available in Benelux."
f"Associated device is now disabled. Move the zone back in Benelux and re-enable to fix "
f"this")
disable_from_config(self.hass, self._config_entry)
issue_registry.async_create_issue(
self.hass,
DOMAIN,
"zone_moved",
is_fixable=True,
severity=issue_registry.IssueSeverity.ERROR,
translation_key='zone_moved',
data={'config_entry_id': self._config_entry.entry_id, 'zone': self._zone},
translation_placeholders={'zone': self._zone}
)
return ProcessedCoordinatorData()
return await self.process_api_data(api_data)
async def async_refresh(self) -> None:
"""Refresh data and log errors."""
await self._async_refresh(log_failures=True, raise_on_entry_error=True)
async def _async_animation_data(self, api_data: dict) -> RadarAnimationData:
"""From the API data passed in, call the API to get all the images and create the radar animation data object.
Frames from the API are merged with the background map and the location marker to create each frame."""
animation_data = api_data.get('animation', {}).get('sequence')
localisation_layer_url = api_data.get('animation', {}).get('localisationLayer')
country = api_data.get('country', '')
if animation_data is None or localisation_layer_url is None or not isinstance(animation_data, list):
return RadarAnimationData()
try:
images_from_api = await self.download_images_from_api(animation_data, country, localisation_layer_url)
except IrmKmiApiError:
_LOGGER.warning(f"Could not get images for weather radar")
return RadarAnimationData()
localisation = images_from_api[0]
images_from_api = images_from_api[1:]
lang = self.hass.config.language if self.hass.config.language in LANGS else 'en'
radar_animation = RadarAnimationData(
hint=api_data.get('animation', {}).get('sequenceHint', {}).get(lang),
unit=api_data.get('animation', {}).get('unit', {}).get(lang),
location=localisation
)
rain_graph = self.create_rain_graph(radar_animation, animation_data, country, images_from_api)
radar_animation['svg_animated'] = rain_graph.get_svg_string()
radar_animation['svg_still'] = rain_graph.get_svg_string(still_image=True)
return radar_animation
async def process_api_data(self, api_data: dict) -> ProcessedCoordinatorData:
"""From the API data, create the object that will be used in the entities"""
return ProcessedCoordinatorData(
current_weather=IrmKmiCoordinator.current_weather_from_data(api_data),
daily_forecast=IrmKmiCoordinator.daily_list_to_forecast(api_data.get('for', {}).get('daily')),
hourly_forecast=IrmKmiCoordinator.hourly_list_to_forecast(api_data.get('for', {}).get('hourly')),
animation=await self._async_animation_data(api_data=api_data),
warnings=self.warnings_from_data(api_data.get('for', {}).get('warning'))
)
async def download_images_from_api(self,
animation_data: list,
country: str,
localisation_layer_url: str) -> tuple[Any]:
"""Download a batch of images to create the radar frames."""
coroutines = list()
coroutines.append(
self._api_client.get_image(localisation_layer_url,
params={'th': 'd' if country == 'NL' or not self._dark_mode else 'n'}))
for frame in animation_data:
if frame.get('uri', None) is not None:
coroutines.append(
self._api_client.get_image(frame.get('uri'), params={'rs': STYLE_TO_PARAM_MAP[self._style]}))
async with async_timeout.timeout(20):
images_from_api = await asyncio.gather(*coroutines)
_LOGGER.debug(f"Just downloaded {len(images_from_api)} images")
return images_from_api
@staticmethod
def current_weather_from_data(api_data: dict) -> CurrentWeatherData:
"""Parse the API data to build a CurrentWeatherData."""
# Process data to get current hour forecast
now_hourly = None
hourly_forecast_data = api_data.get('for', {}).get('hourly')
if not (hourly_forecast_data is None
or not isinstance(hourly_forecast_data, list)
or len(hourly_forecast_data) == 0):
for current in hourly_forecast_data[:2]:
if datetime.now().strftime('%H') == current['hour']:
now_hourly = current
break
# Get UV index
module_data = api_data.get('module', None)
uv_index = None
if not (module_data is None or not isinstance(module_data, list)):
for module in module_data:
if module.get('type', None) == 'uv':
uv_index = module.get('data', {}).get('levelValue')
try:
pressure = float(now_hourly.get('pressure', None)) if now_hourly is not None else None
except TypeError:
pressure = None
try:
wind_speed = float(now_hourly.get('windSpeedKm', None)) if now_hourly is not None else None
except TypeError:
wind_speed = None
try:
wind_gust_speed = float(now_hourly.get('windPeakSpeedKm', None)) if now_hourly is not None else None
except TypeError:
wind_gust_speed = None
try:
temperature = float(api_data.get('obs', {}).get('temp'))
except TypeError:
temperature = None
current_weather = CurrentWeatherData(
condition=CDT_MAP.get((api_data.get('obs', {}).get('ww'), api_data.get('obs', {}).get('dayNight')), None),
temperature=temperature,
wind_speed=wind_speed,
wind_gust_speed=wind_gust_speed,
wind_bearing=now_hourly.get('windDirectionText', {}).get('en') if now_hourly is not None else None,
pressure=pressure,
uv_index=uv_index
)
if api_data.get('country', '') == 'NL':
current_weather['wind_speed'] = api_data.get('obs', {}).get('windSpeedKm')
current_weather['wind_bearing'] = api_data.get('obs', {}).get('windDirectionText', {}).get('en')
return current_weather
@staticmethod
def hourly_list_to_forecast(data: List[dict] | None) -> List[Forecast] | None:
"""Parse data from the API to create a list of hourly forecasts"""
if data is None or not isinstance(data, list) or len(data) == 0:
return None
forecasts = list()
day = datetime.now()
for f in data:
if 'dateShow' in f:
day = day + timedelta(days=1)
hour = f.get('hour', None)
if hour is None:
continue
precipitation_probability = None
if f.get('precipChance', None) is not None:
precipitation_probability = int(f.get('precipChance'))
ww = None
if f.get('ww', None) is not None:
ww = int(f.get('ww'))
forecast = Forecast(
datetime=day.strftime(f'%Y-%m-%dT{hour}:00:00'),
condition=CDT_MAP.get((ww, f.get('dayNight', None)), None),
native_precipitation=f.get('precipQuantity', None),
native_temperature=f.get('temp', None),
native_templow=None,
native_wind_gust_speed=f.get('windPeakSpeedKm', None),
native_wind_speed=f.get('windSpeedKm', None),
precipitation_probability=precipitation_probability,
wind_bearing=f.get('windDirectionText', {}).get('en'),
native_pressure=f.get('pressure', None),
is_daytime=f.get('dayNight', None) == 'd'
)
forecasts.append(forecast)
return forecasts
@staticmethod
def daily_list_to_forecast(data: List[dict] | None) -> List[Forecast] | None:
"""Parse data from the API to create a list of daily forecasts"""
if data is None or not isinstance(data, list) or len(data) == 0:
return None
forecasts = list()
n_days = 0
for (idx, f) in enumerate(data):
precipitation = None
if f.get('precipQuantity', None) is not None:
try:
precipitation = float(f.get('precipQuantity'))
except TypeError:
pass
native_wind_gust_speed = None
if f.get('wind', {}).get('peakSpeed') is not None:
try:
native_wind_gust_speed = int(f.get('wind', {}).get('peakSpeed'))
except TypeError:
pass
is_daytime = f.get('dayNight', None) == 'd'
forecast = IrmKmiForecast(
datetime=(datetime.now() + timedelta(days=n_days)).strftime('%Y-%m-%d')
if is_daytime else datetime.now().strftime('%Y-%m-%d'),
condition=CDT_MAP.get((f.get('ww1', None), f.get('dayNight', None)), None),
native_precipitation=precipitation,
native_temperature=f.get('tempMax', None),
native_templow=f.get('tempMin', None),
native_wind_gust_speed=native_wind_gust_speed,
native_wind_speed=f.get('wind', {}).get('speed'),
precipitation_probability=f.get('precipChance', None),
wind_bearing=f.get('wind', {}).get('dirText', {}).get('en'),
is_daytime=is_daytime,
text_fr=f.get('text', {}).get('fr'),
text_nl=f.get('text', {}).get('nl')
)
forecasts.append(forecast)
if is_daytime or idx == 0:
n_days += 1
return forecasts
def create_rain_graph(self,
radar_animation: RadarAnimationData,
api_animation_data: List[dict],
country: str,
images_from_api: Tuple[bytes], | ) -> RainGraph: | 17 | 2023-12-17 16:35:01+00:00 | 12k |
v3ucn/Bert-vits2-V2.2 | oldVersion/V101/text/chinese.py | [
{
"identifier": "punctuation",
"path": "oldVersion/V101/text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "oldVersion/V101/text/tone_sandhi.py",
"snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",\n \"麻利\",\n \"鸳鸯\",\n \"高粱\",\n \"骨头\",\n \"骆驼\",\n \"马虎\",\n \"首饰\",\n \"馒头\",\n \"馄饨\",\n \"风筝\",\n \"难为\",\n \"队伍\",\n \"阔气\",\n \"闺女\",\n \"门道\",\n \"锄头\",\n \"铺盖\",\n \"铃铛\",\n \"铁匠\",\n \"钥匙\",\n \"里脊\",\n \"里头\",\n \"部分\",\n \"那么\",\n \"道士\",\n \"造化\",\n \"迷糊\",\n \"连累\",\n \"这么\",\n \"这个\",\n \"运气\",\n \"过去\",\n \"软和\",\n \"转悠\",\n \"踏实\",\n \"跳蚤\",\n \"跟头\",\n \"趔趄\",\n \"财主\",\n \"豆腐\",\n \"讲究\",\n \"记性\",\n \"记号\",\n \"认识\",\n \"规矩\",\n \"见识\",\n \"裁缝\",\n \"补丁\",\n \"衣裳\",\n \"衣服\",\n \"衙门\",\n \"街坊\",\n \"行李\",\n \"行当\",\n \"蛤蟆\",\n \"蘑菇\",\n \"薄荷\",\n \"葫芦\",\n \"葡萄\",\n \"萝卜\",\n \"荸荠\",\n \"苗条\",\n \"苗头\",\n \"苍蝇\",\n \"芝麻\",\n \"舒服\",\n \"舒坦\",\n \"舌头\",\n \"自在\",\n \"膏药\",\n \"脾气\",\n \"脑袋\",\n \"脊梁\",\n \"能耐\",\n \"胳膊\",\n \"胭脂\",\n \"胡萝\",\n \"胡琴\",\n \"胡同\",\n \"聪明\",\n \"耽误\",\n \"耽搁\",\n \"耷拉\",\n \"耳朵\",\n \"老爷\",\n \"老实\",\n \"老婆\",\n \"老头\",\n \"老太\",\n \"翻腾\",\n \"罗嗦\",\n \"罐头\",\n \"编辑\",\n \"结实\",\n \"红火\",\n \"累赘\",\n \"糨糊\",\n \"糊涂\",\n \"精神\",\n \"粮食\",\n \"簸箕\",\n \"篱笆\",\n \"算计\",\n \"算盘\",\n \"答应\",\n \"笤帚\",\n \"笑语\",\n \"笑话\",\n \"窟窿\",\n \"窝囊\",\n \"窗户\",\n \"稳当\",\n \"稀罕\",\n \"称呼\",\n \"秧歌\",\n \"秀气\",\n \"秀才\",\n \"福气\",\n \"祖宗\",\n \"砚台\",\n \"码头\",\n \"石榴\",\n \"石头\",\n \"石匠\",\n \"知识\",\n \"眼睛\",\n \"眯缝\",\n \"眨巴\",\n \"眉毛\",\n \"相声\",\n \"盘算\",\n \"白净\",\n \"痢疾\",\n \"痛快\",\n \"疟疾\",\n \"疙瘩\",\n \"疏忽\",\n \"畜生\",\n \"生意\",\n \"甘蔗\",\n \"琵琶\",\n \"琢磨\",\n \"琉璃\",\n \"玻璃\",\n \"玫瑰\",\n \"玄乎\",\n \"狐狸\",\n \"状元\",\n \"特务\",\n \"牲口\",\n \"牙碜\",\n \"牌楼\",\n \"爽快\",\n \"爱人\",\n \"热闹\",\n \"烧饼\",\n \"烟筒\",\n \"烂糊\",\n \"点心\",\n \"炊帚\",\n \"灯笼\",\n \"火候\",\n \"漂亮\",\n \"滑溜\",\n \"溜达\",\n \"温和\",\n \"清楚\",\n \"消息\",\n \"浪头\",\n \"活泼\",\n \"比方\",\n \"正经\",\n \"欺负\",\n \"模糊\",\n \"槟榔\",\n \"棺材\",\n \"棒槌\",\n \"棉花\",\n \"核桃\",\n \"栅栏\",\n \"柴火\",\n \"架势\",\n \"枕头\",\n \"枇杷\",\n \"机灵\",\n \"本事\",\n \"木头\",\n \"木匠\",\n \"朋友\",\n \"月饼\",\n \"月亮\",\n \"暖和\",\n \"明白\",\n \"时候\",\n \"新鲜\",\n \"故事\",\n \"收拾\",\n \"收成\",\n \"提防\",\n \"挖苦\",\n \"挑剔\",\n \"指甲\",\n \"指头\",\n \"拾掇\",\n \"拳头\",\n \"拨弄\",\n \"招牌\",\n \"招呼\",\n \"抬举\",\n \"护士\",\n \"折腾\",\n \"扫帚\",\n \"打量\",\n \"打算\",\n \"打点\",\n \"打扮\",\n \"打听\",\n \"打发\",\n \"扎实\",\n \"扁担\",\n \"戒指\",\n \"懒得\",\n \"意识\",\n \"意思\",\n \"情形\",\n \"悟性\",\n \"怪物\",\n \"思量\",\n \"怎么\",\n \"念头\",\n \"念叨\",\n \"快活\",\n \"忙活\",\n \"志气\",\n \"心思\",\n \"得罪\",\n \"张罗\",\n \"弟兄\",\n \"开通\",\n \"应酬\",\n \"庄稼\",\n \"干事\",\n \"帮手\",\n \"帐篷\",\n \"希罕\",\n \"师父\",\n \"师傅\",\n \"巴结\",\n \"巴掌\",\n \"差事\",\n \"工夫\",\n \"岁数\",\n \"屁股\",\n \"尾巴\",\n \"少爷\",\n \"小气\",\n \"小伙\",\n \"将就\",\n \"对头\",\n \"对付\",\n \"寡妇\",\n \"家伙\",\n \"客气\",\n \"实在\",\n \"官司\",\n \"学问\",\n \"学生\",\n \"字号\",\n \"嫁妆\",\n \"媳妇\",\n \"媒人\",\n \"婆家\",\n \"娘家\",\n \"委屈\",\n \"姑娘\",\n \"姐夫\",\n \"妯娌\",\n \"妥当\",\n \"妖精\",\n \"奴才\",\n \"女婿\",\n \"头发\",\n \"太阳\",\n \"大爷\",\n \"大方\",\n \"大意\",\n \"大夫\",\n \"多少\",\n \"多么\",\n \"外甥\",\n \"壮实\",\n \"地道\",\n \"地方\",\n \"在乎\",\n \"困难\",\n \"嘴巴\",\n \"嘱咐\",\n \"嘟囔\",\n \"嘀咕\",\n \"喜欢\",\n \"喇嘛\",\n \"喇叭\",\n \"商量\",\n \"唾沫\",\n \"哑巴\",\n \"哈欠\",\n \"哆嗦\",\n \"咳嗽\",\n \"和尚\",\n \"告诉\",\n \"告示\",\n \"含糊\",\n \"吓唬\",\n \"后头\",\n \"名字\",\n \"名堂\",\n \"合同\",\n \"吆喝\",\n \"叫唤\",\n \"口袋\",\n \"厚道\",\n \"厉害\",\n \"千斤\",\n \"包袱\",\n \"包涵\",\n \"匀称\",\n \"勤快\",\n \"动静\",\n \"动弹\",\n \"功夫\",\n \"力气\",\n \"前头\",\n \"刺猬\",\n \"刺激\",\n \"别扭\",\n \"利落\",\n \"利索\",\n \"利害\",\n \"分析\",\n \"出息\",\n \"凑合\",\n \"凉快\",\n \"冷战\",\n \"冤枉\",\n \"冒失\",\n \"养活\",\n \"关系\",\n \"先生\",\n \"兄弟\",\n \"便宜\",\n \"使唤\",\n \"佩服\",\n \"作坊\",\n \"体面\",\n \"位置\",\n \"似的\",\n \"伙计\",\n \"休息\",\n \"什么\",\n \"人家\",\n \"亲戚\",\n \"亲家\",\n \"交情\",\n \"云彩\",\n \"事情\",\n \"买卖\",\n \"主意\",\n \"丫头\",\n \"丧气\",\n \"两口\",\n \"东西\",\n \"东家\",\n \"世故\",\n \"不由\",\n \"不在\",\n \"下水\",\n \"下巴\",\n \"上头\",\n \"上司\",\n \"丈夫\",\n \"丈人\",\n \"一辈\",\n \"那个\",\n \"菩萨\",\n \"父亲\",\n \"母亲\",\n \"咕噜\",\n \"邋遢\",\n \"费用\",\n \"冤家\",\n \"甜头\",\n \"介绍\",\n \"荒唐\",\n \"大人\",\n \"泥鳅\",\n \"幸福\",\n \"熟悉\",\n \"计划\",\n \"扑腾\",\n \"蜡烛\",\n \"姥爷\",\n \"照顾\",\n \"喉咙\",\n \"吉他\",\n \"弄堂\",\n \"蚂蚱\",\n \"凤凰\",\n \"拖沓\",\n \"寒碜\",\n \"糟蹋\",\n \"倒腾\",\n \"报复\",\n \"逻辑\",\n \"盘缠\",\n \"喽啰\",\n \"牢骚\",\n \"咖喱\",\n \"扫把\",\n \"惦记\",\n }\n self.must_not_neural_tone_words = {\n \"男子\",\n \"女子\",\n \"分子\",\n \"原子\",\n \"量子\",\n \"莲子\",\n \"石子\",\n \"瓜子\",\n \"电子\",\n \"人人\",\n \"虎虎\",\n }\n self.punc = \":,;。?!“”‘’':,;.?!\"\n\n # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041\n # e.g.\n # word: \"家里\"\n # pos: \"s\"\n # finals: ['ia1', 'i3']\n def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:\n # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺\n for j, item in enumerate(word):\n if (\n j - 1 >= 0\n and item == word[j - 1]\n and pos[0] in {\"n\", \"v\", \"a\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[j] = finals[j][:-1] + \"5\"\n ge_idx = word.find(\"个\")\n if len(word) >= 1 and word[-1] in \"吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶\":\n finals[-1] = finals[-1][:-1] + \"5\"\n elif len(word) >= 1 and word[-1] in \"的地得\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 走了, 看着, 去过\n # elif len(word) == 1 and word in \"了着过\" and pos in {\"ul\", \"uz\", \"ug\"}:\n # finals[-1] = finals[-1][:-1] + \"5\"\n elif (\n len(word) > 1\n and word[-1] in \"们子\"\n and pos in {\"r\", \"n\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 桌上, 地下, 家里\n elif len(word) > 1 and word[-1] in \"上下里\" and pos in {\"s\", \"l\", \"f\"}:\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 上来, 下去\n elif len(word) > 1 and word[-1] in \"来去\" and word[-2] in \"上下进出回过起开\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # 个做量词\n elif (\n ge_idx >= 1\n and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in \"几有两半多各整每做是\")\n ) or word == \"个\":\n finals[ge_idx] = finals[ge_idx][:-1] + \"5\"\n else:\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n\n word_list = self._split_word(word)\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n for i, word in enumerate(word_list):\n # conventional neural in Chinese\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals_list[i][-1] = finals_list[i][-1][:-1] + \"5\"\n finals = sum(finals_list, [])\n return finals\n\n def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # e.g. 看不懂\n if len(word) == 3 and word[1] == \"不\":\n finals[1] = finals[1][:-1] + \"5\"\n else:\n for i, char in enumerate(word):\n # \"不\" before tone4 should be bu2, e.g. 不怕\n if char == \"不\" and i + 1 < len(word) and finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n return finals\n\n def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # \"一\" in number sequences, e.g. 一零零, 二一零\n if word.find(\"一\") != -1 and all(\n [item.isnumeric() for item in word if item != \"一\"]\n ):\n return finals\n # \"一\" between reduplication words shold be yi5, e.g. 看一看\n elif len(word) == 3 and word[1] == \"一\" and word[0] == word[-1]:\n finals[1] = finals[1][:-1] + \"5\"\n # when \"一\" is ordinal word, it should be yi1\n elif word.startswith(\"第一\"):\n finals[1] = finals[1][:-1] + \"1\"\n else:\n for i, char in enumerate(word):\n if char == \"一\" and i + 1 < len(word):\n # \"一\" before tone4 should be yi2, e.g. 一段\n if finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n # \"一\" before non-tone4 should be yi4, e.g. 一天\n else:\n # \"一\" 后面如果是标点,还读一声\n if word[i + 1] not in self.punc:\n finals[i] = finals[i][:-1] + \"4\"\n return finals\n\n def _split_word(self, word: str) -> List[str]:\n word_list = jieba.cut_for_search(word)\n word_list = sorted(word_list, key=lambda i: len(i), reverse=False)\n first_subword = word_list[0]\n first_begin_idx = word.find(first_subword)\n if first_begin_idx == 0:\n second_subword = word[len(first_subword) :]\n new_word_list = [first_subword, second_subword]\n else:\n second_subword = word[: -len(first_subword)]\n new_word_list = [second_subword, first_subword]\n return new_word_list\n\n def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:\n if len(word) == 2 and self._all_tone_three(finals):\n finals[0] = finals[0][:-1] + \"2\"\n elif len(word) == 3:\n word_list = self._split_word(word)\n if self._all_tone_three(finals):\n # disyllabic + monosyllabic, e.g. 蒙古/包\n if len(word_list[0]) == 2:\n finals[0] = finals[0][:-1] + \"2\"\n finals[1] = finals[1][:-1] + \"2\"\n # monosyllabic + disyllabic, e.g. 纸/老虎\n elif len(word_list[0]) == 1:\n finals[1] = finals[1][:-1] + \"2\"\n else:\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n if len(finals_list) == 2:\n for i, sub in enumerate(finals_list):\n # e.g. 所有/人\n if self._all_tone_three(sub) and len(sub) == 2:\n finals_list[i][0] = finals_list[i][0][:-1] + \"2\"\n # e.g. 好/喜欢\n elif (\n i == 1\n and not self._all_tone_three(sub)\n and finals_list[i][0][-1] == \"3\"\n and finals_list[0][-1][-1] == \"3\"\n ):\n finals_list[0][-1] = finals_list[0][-1][:-1] + \"2\"\n finals = sum(finals_list, [])\n # split idiom into two words who's length is 2\n elif len(word) == 4:\n finals_list = [finals[:2], finals[2:]]\n finals = []\n for sub in finals_list:\n if self._all_tone_three(sub):\n sub[0] = sub[0][:-1] + \"2\"\n finals += sub\n\n return finals\n\n def _all_tone_three(self, finals: List[str]) -> bool:\n return all(x[-1] == \"3\" for x in finals)\n\n # merge \"不\" and the word behind it\n # if don't merge, \"不\" sometimes appears alone according to jieba, which may occur sandhi error\n def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n last_word = \"\"\n for word, pos in seg:\n if last_word == \"不\":\n word = last_word + word\n if word != \"不\":\n new_seg.append((word, pos))\n last_word = word[:]\n if last_word == \"不\":\n new_seg.append((last_word, \"d\"))\n last_word = \"\"\n return new_seg\n\n # function 1: merge \"一\" and reduplication words in it's left and right, e.g. \"听\",\"一\",\"听\" ->\"听一听\"\n # function 2: merge single \"一\" and the word behind it\n # if don't merge, \"一\" sometimes appears alone according to jieba, which may occur sandhi error\n # e.g.\n # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]\n # output seg: [['听一听', 'v']]\n def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n # function 1\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and word == \"一\"\n and i + 1 < len(seg)\n and seg[i - 1][0] == seg[i + 1][0]\n and seg[i - 1][1] == \"v\"\n ):\n new_seg[i - 1][0] = new_seg[i - 1][0] + \"一\" + new_seg[i - 1][0]\n else:\n if (\n i - 2 >= 0\n and seg[i - 1][0] == \"一\"\n and seg[i - 2][0] == word\n and pos == \"v\"\n ):\n continue\n else:\n new_seg.append([word, pos])\n seg = new_seg\n new_seg = []\n # function 2\n for i, (word, pos) in enumerate(seg):\n if new_seg and new_seg[-1][0] == \"一\":\n new_seg[-1][0] = new_seg[-1][0] + word\n else:\n new_seg.append([word, pos])\n return new_seg\n\n # the first and the second words are all_tone_three\n def _merge_continuous_three_tones(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and self._all_tone_three(sub_finals_list[i - 1])\n and self._all_tone_three(sub_finals_list[i])\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n\n return new_seg\n\n def _is_reduplication(self, word: str) -> bool:\n return len(word) == 2 and word[0] == word[1]\n\n # the last char of first word and the first char of second word is tone_three\n def _merge_continuous_three_tones_2(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and sub_finals_list[i - 1][-1][-1] == \"3\"\n and sub_finals_list[i][0][-1] == \"3\"\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if i - 1 >= 0 and word == \"儿\" and seg[i - 1][0] != \"#\":\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if new_seg and word == new_seg[-1][0]:\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n seg = self._merge_bu(seg)\n try:\n seg = self._merge_yi(seg)\n except:\n print(\"_merge_yi failed\")\n seg = self._merge_reduplication(seg)\n seg = self._merge_continuous_three_tones(seg)\n seg = self._merge_continuous_three_tones_2(seg)\n seg = self._merge_er(seg)\n return seg\n\n def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:\n finals = self._bu_sandhi(word, finals)\n finals = self._yi_sandhi(word, finals)\n finals = self._neural_sandhi(word, pos, finals)\n finals = self._three_sandhi(word, finals)\n return finals"
}
] | import os
import re
import cn2an
import jieba.posseg as psg
from pypinyin import lazy_pinyin, Style
from .symbols import punctuation
from .tone_sandhi import ToneSandhi
from text import chinese_bert
from text.chinese_bert import get_bert_feature | 7,596 |
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
|
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
| tone_modifier = ToneSandhi() | 1 | 2023-12-18 04:54:46+00:00 | 12k |
d-krupke/CP-SAT-Log-Analyzer | app.py | [
{
"identifier": "LogParser",
"path": "cpsat_log_parser/parser.py",
"snippet": "class LogParser:\n def __init__(self, log: typing.Union[str, typing.List[str]]) -> None:\n self.comments, log_without_comments = self._extract_comments(log)\n self.blocks = self.parse_blocks(log_without_comments)\n\n def parse_blocks(\n self, log: typing.Union[str, typing.List[str]]\n ) -> typing.List[LogBlock]:\n \"\"\"\n Parse a log into its blocks.\n \"\"\"\n blocks = []\n sub_parser = ALL_BLOCKS\n for data in _split_log(log):\n for parser in sub_parser:\n if parser.matches(data):\n blocks.append(parser(data))\n break\n else:\n raise ValueError(f\"Could not parse data: {data}\")\n return blocks\n\n def _extract_comments(\n self, log: typing.Union[str, typing.List[str]]\n ) -> typing.Tuple[typing.List[str], typing.List[str]]:\n \"\"\"\n Extract the comments from a log.\n \"\"\"\n if isinstance(log, str):\n log = log.split(\"\\n\")\n if not isinstance(log, list):\n raise TypeError(\"log must be a list or a string\")\n comments = []\n data = []\n for line in log:\n if line.startswith(\"//\"):\n comments.append(line[2:].strip())\n else:\n data.append(line)\n return comments, data\n\n def get_block_of_type(self, block_type: typing.Type[LogBlock]) -> LogBlock:\n for block in self.blocks:\n if isinstance(block, block_type):\n return block\n raise KeyError(f\"Could not find block '{block_type.__name__}'\")"
},
{
"identifier": "SearchProgressBlock",
"path": "cpsat_log_parser/blocks/search_progress.py",
"snippet": "class SearchProgressBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n lines = [line.strip() for line in lines if line.strip()]\n if not lines:\n raise ValueError(\"No lines to parse\")\n if not self.matches(lines):\n raise ValueError(\"Lines do not match SearchProgressBlock\")\n self.lines = lines\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().lower().startswith(\"Starting search\".lower())\n\n def _parse_events(\n self,\n ) -> typing.List[typing.Union[BoundEvent, ObjEvent, ModelEvent]]:\n \"\"\"\n Parse the log file into a list of BoundEvent and ObjEvent.\n \"\"\"\n events = []\n for line in self.lines:\n obj_event = ObjEvent.parse(line)\n if obj_event:\n events.append(obj_event)\n continue\n bound_event = BoundEvent.parse(line)\n if bound_event:\n events.append(bound_event)\n continue\n model_event = ModelEvent.parse(line)\n if model_event:\n events.append(model_event)\n continue\n return events\n\n def get_presolve_time(self) -> float:\n # first line looks like this \"Starting search at 16.74s with 24 workers.\"\n m = re.match(\n r\"Starting [Ss]earch at (?P<time>\\d+\\.\\d+s) with \\d+ workers.\",\n self.lines[0],\n )\n if m:\n return parse_time(m.group(\"time\"))\n raise ValueError(f\"Could not parse presolve time from '{self.lines[0]}'\")\n\n def get_title(self) -> str:\n return \"Search progress:\"\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\nThe search progress log is an essential element of the overall log, crucial for identifying performance bottlenecks. It clearly demonstrates the solver's progression over time and pinpoints where it faces significant challenges. It is important to discern whether the upper or lower bounds are causing issues, or if the solver initially finds a near-optimal solution but struggles to minimize a small remaining gap.\n\nThe structure of the log entries is standardized as follows:\n\n`EVENT NAME\\t|\\tTIME\\t|\\tBEST SOLUTION\\t|\\tRANGE OF THE SEARCH\\t|\\tCOMMENT`\n\nFor instance, an event marked `#2` indicates the discovery of the second solution. Here, you will observe an improvement in the `BEST SOLUTION` metric. A notation like `best:16` confirms that the solver has found a solution with a value of 16.\n\nAn event with `#Bound` denotes an enhancement in the bound, as seen by a reduction in the `RANGE OF THE SEARCH`. A detail such as `next:[7,14]` signifies that the solver is now focused on finding a solution valued between 7 and 14.\n\nThe `COMMENT` section provides essential information about the strategies that led to these improvements.\n\nEvents labeled `#Model` signal modifications to the model, such as fixing certain variables.\n\nTo fully grasp the nuances, zooming into the plot is necessary, especially since the initial values can be quite large. A thorough examination of which sections of the process converge quickest is crucial for a comprehensive understanding.\n \"\"\"\n\n def gap_as_plotly(self) -> typing.Optional[go.Figure]:\n gap_events = [\n e\n for e in self._parse_events()\n if isinstance(e, BoundEvent) or isinstance(e, ObjEvent)\n ]\n\n def is_valid_gap(gap):\n if gap is None:\n return False\n if not math.isfinite(gap):\n return False\n return True\n\n gaps = [(e.time, e.get_gap()) for e in gap_events if is_valid_gap(e.get_gap())]\n fig = go.Figure()\n if not gap_events:\n return None\n # add gaps\n fig.add_trace(\n go.Scatter(\n x=[t for t, _ in gaps],\n y=[gap for _, gap in gaps],\n mode=\"lines+markers\",\n line=dict(color=\"purple\"),\n name=\"Gap\",\n hovertext=[e.msg for e in gap_events],\n )\n )\n # make the x-axis start at 0\n fig.update_xaxes(range=[0, 1.01 * gaps[-1][0]])\n max_gap = max(gap for _, gap in gaps if gap is not None)\n # make the y-axis start at 0\n fig.update_yaxes(range=[-1, min(300, 1.01 * max_gap)])\n fig.update_layout(\n title=\"Optimality Gap\",\n xaxis_title=\"Time (s)\",\n yaxis_title=\"Gap (%)\",\n legend_title=\"Legend\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"RebeccaPurple\"),\n )\n return fig\n\n def model_changes_as_plotly(self) -> typing.Optional[go.Figure]:\n \"\"\"\n Plot the model changes in percent over time.\n \"\"\"\n model_events = [e for e in self._parse_events() if isinstance(e, ModelEvent)]\n fig = go.Figure()\n if not model_events:\n return None\n # add number of vars\n fig.add_trace(\n go.Scatter(\n x=[e.time for e in model_events],\n y=[100 * (e.vars_remaining / e.vars) for e in model_events],\n mode=\"lines+markers\",\n line=dict(color=\"green\"),\n name=\"Variables\",\n hovertext=[e.msg for e in model_events],\n )\n )\n # add number of constraints\n fig.add_trace(\n go.Scatter(\n x=[e.time for e in model_events],\n y=[100 * (e.constr_remaining / e.constr) for e in model_events],\n mode=\"lines+markers\",\n line=dict(color=\"orange\"),\n name=\"Constraints\",\n hovertext=[e.msg for e in model_events],\n )\n )\n # make the x-axis start at 0\n fig.update_xaxes(range=[0, 1.01 * model_events[-1].time])\n # make the y-axis range from 0 to 100\n fig.update_yaxes(range=[0, 101])\n fig.update_layout(\n title=\"Model changes\",\n xaxis_title=\"Time (s)\",\n yaxis_title=\"Remaining (%)\",\n legend_title=\"Legend\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"RebeccaPurple\"),\n )\n return fig\n\n def as_plotly(self) -> typing.Optional[go.Figure]:\n \"\"\"\n Plot the progress of the solver.\n \"\"\"\n events = self._parse_events()\n obj_events = [e for e in events if isinstance(e, ObjEvent)]\n bound_events = [e for e in events if isinstance(e, BoundEvent)]\n fig = go.Figure()\n if not obj_events and not bound_events:\n return None\n max_time = max([e.time for e in bound_events + obj_events])\n\n # make sure that both bounds and objs have a value at max_time\n if obj_events and obj_events[-1].time < max_time:\n if bound_events[-1].obj is None:\n # Should nearly never happen\n obj_events.append(\n ObjEvent(\n time=max_time,\n obj=obj_events[-1].obj,\n bound=bound_events[-1].bound,\n msg=\"\",\n )\n )\n else:\n obj_events.append(\n ObjEvent(\n time=max_time,\n obj=bound_events[-1].obj,\n bound=bound_events[-1].bound,\n msg=\"\",\n )\n )\n if bound_events and bound_events[-1].time < max_time:\n bound_events.append(\n BoundEvent(\n time=max_time,\n obj=obj_events[-1].obj,\n bound=obj_events[-1].bound,\n msg=\"\",\n )\n )\n\n # plot the bounds over time. Add the comment as hover text\n fig.add_trace(\n go.Scatter(\n x=[b.time for b in bound_events],\n y=[b.bound for b in bound_events],\n mode=\"lines+markers\",\n line=dict(color=\"cyan\"),\n name=\"Bound\",\n hovertext=[b.msg for b in bound_events],\n )\n )\n\n # plot the objective values over time. Add the comment as hover text\n fig.add_trace(\n go.Scatter(\n x=[o.time for o in obj_events],\n y=[o.obj for o in obj_events],\n mode=\"lines+markers\",\n line=dict(color=\"red\"),\n name=\"Objective\",\n hovertext=[o.msg for o in obj_events],\n )\n )\n\n # make the x-axis start at 0\n fig.update_xaxes(range=[0, 1.01 * max_time])\n fig.update_layout(\n title=\"Search progress\",\n xaxis_title=\"Time (s)\",\n yaxis_title=\"Objective\",\n legend_title=\"Legend\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"RebeccaPurple\"),\n )\n return fig"
},
{
"identifier": "SearchStatsBlock",
"path": "cpsat_log_parser/blocks/search_stats.py",
"snippet": "class SearchStatsBlock(TableBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n if not lines[0].startswith(\"Search stats\"):\n raise ValueError(f\"Not a valid progress log. First line: {lines[0]}\")\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().startswith(\"Search stats\")\n\n def get_title(self) -> str:\n return \"Search Strategies: Statistics\"\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n This table gives you some statistics on the different search strategies.\n How many variables where in the search space, how many conflicts were found, how many branches were executed, how often was the search restarted, and how often where the boolean and integer propagators applied.\n \"\"\""
},
{
"identifier": "SolutionsBlock",
"path": "cpsat_log_parser/blocks/solutions.py",
"snippet": "class SolutionsBlock(TableBlock):\n \"\"\"\n\n Not available for older versions of CP-SAT.\n \"\"\"\n\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n if not self.matches(lines):\n raise ValueError(f\"Not a valid progress log. First line: {lines[0]}\")\n\n def get_num_solutions(self) -> int:\n # First line looks like this \"Solutions (11) Num Rank\"\n # We want to get the number in the parentheses\n return int(self.lines[0].split(\"(\")[1].split(\")\")[0])\n\n def get_title(self) -> str:\n return \"Solutions\"\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n Which strategy found the most solutions?\n The rank indicates how good the found solutions are.\n Ranks with `[1,X]` indicate an optimal solution.\n \"\"\"\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n # \"Solutions (11) Num Rank\"\n match = re.match(r\"Solutions\\s+\\(\\d+\\)\\s+Num\\s+Rank\", lines[0])\n return bool(match)"
},
{
"identifier": "TableBlock",
"path": "cpsat_log_parser/blocks/tables.py",
"snippet": "class TableBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n if not lines:\n raise ValueError(\"No lines to parse\")\n self.lines = lines\n self._df = None\n\n def to_pandas(self) -> pd.DataFrame:\n \"\"\"\n Parse the table into a pandas DataFrame.\n \"\"\"\n log = \"\\n\".join((line.strip() for line in self.lines))\n # Replace the single quotes with nothing\n log = log.replace(\"'\", \"\")\n\n # Replace two or more spaces with a single tab\n log = re.sub(\"\\s\\s+\", \"\\t\", log)\n\n # Use StringIO to convert the string to a file-like object for read_csv\n log_file = StringIO(log)\n\n df = pd.read_csv(log_file, delimiter=\"\\t\", index_col=0)\n return df"
},
{
"identifier": "SolverBlock",
"path": "cpsat_log_parser/blocks/solver.py",
"snippet": "class SolverBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n def _parse_parameters(self, line: str) -> typing.Dict:\n \"\"\"\n\n The parameters line can look like this:\n \"Parameters: log_search_progress: true use_timetabling_in_no_overlap_2d: true use_energetic_reasoning_in_no_overlap_2d: true use_pairwise_reasoning_in_no_overlap_2d: true\"\n \"\"\"\n parameters = {}\n line = line[len(\"Parameters:\") :]\n for match in re.finditer(r\"(?P<key>\\w+): (?P<value>[^ ]+)\", line):\n parameters[match.group(\"key\")] = match.group(\"value\")\n return parameters\n\n def get_title(self) -> str:\n return \"Solver Information\"\n\n def get_help(self) -> str:\n return \"\"\"This block contains basic information about the solver.\n As CP-SAT is still under active development and makes serious improvements with every release, it is important to know which version of the solver was used.\n The number of workers, i.e., the level of parallelism, is also important to know.\n CP-SAT is a portfolio solver and the higher the number of workers, the more strategies are used.\n You can find an overview of the different tiers activated by the number of workers in the [CP-SAT documentation](https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers).\n While you should be careful with tinkering with the parameters (they have sensible defaults), it is still good to know which parameters were used.\n All of these information are actually already shown in the overview.\n \"\"\"\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().startswith(\"Starting CP-SAT solver\")\n\n def get_parameters(self) -> typing.Dict:\n for line in self.lines:\n if line.startswith(\"Parameters:\"):\n return self._parse_parameters(line)\n raise ValueError(\"No parameters found\")\n\n def get_number_of_workers(self) -> int:\n # the line looks like this: \"Setting number of workers to 24\"\n for line in self.lines:\n if line.startswith(\"Setting number of workers to\"):\n return int(line.strip().split(\" \")[-1])\n # If `num_search_workers` is set, the number of workers is not shown in the log.\n if \"num_search_workers\" in self.get_parameters():\n return int(self.get_parameters()[\"num_search_workers\"])\n raise ValueError(\"No number of workers found\")\n\n def get_version(self) -> str:\n # the line looks like this: \"Starting CP-SAT solver v9.7.2996\"\n for line in self.lines:\n if line.startswith(\"Starting CP-SAT solver\"):\n return line.strip().split(\" \")[-1]\n raise ValueError(\"No version found\")\n\n def get_parsed_version(self) -> typing.Tuple[int, int, int]:\n # the line looks like this: \"Starting CP-SAT solver v9.7.2996\"\n version = self.get_version()[1:]\n major, minor, patch = version.split(\".\")\n return int(major), int(minor), int(patch)"
},
{
"identifier": "ResponseBlock",
"path": "cpsat_log_parser/blocks/solver_response.py",
"snippet": "class ResponseBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].startswith(\"CpSolverResponse\")\n\n def get_title(self) -> str:\n return \"CpSolverResponse\"\n\n def to_dict(self) -> dict:\n d = {}\n for line in self.lines:\n if line.startswith(\"CpSolverResponse\"):\n continue\n key, value = line.split(\":\")\n key = key.strip()\n value = value.strip()\n if key == \"status\":\n value = value.split(\" \")[0]\n d[key] = value\n return d\n\n def get_gap(self):\n vals = self.to_dict()\n try:\n obj = float(vals[\"objective\"])\n bound = float(vals[\"best_bound\"])\n except TypeError:\n return None\n except ValueError:\n return None\n return 100 * (abs(obj - bound) / max(1, abs(obj)))\n\n def to_pandas(self) -> pd.DataFrame:\n return pd.DataFrame([self.to_dict()])\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n This final block of the log contains a summary by the solver.\n Here you find the most important information, such as how successful the search was.\n\n You can find the original documentation [here](https://github.com/google/or-tools/blob/8768ed7a43f8899848effb71295a790f3ecbe2f2/ortools/sat/cp_model.proto#L720).\n \"\"\""
},
{
"identifier": "PresolveLogBlock",
"path": "cpsat_log_parser/blocks/presolve_log.py",
"snippet": "class PresolveLogBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().startswith(\"Starting presolve at\")\n\n def get_title(self) -> str:\n return \"Presolve Log\"\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n This block contains the presolve log.\n It contains information about the presolve steps and the time they took.\n\n There are multiple rounds of domain reduction, expansion, equivalence\n checking, substitution, and probing performed during presolve.\n These steps can take some time, but they can also significantly reduce\n the model size and the search space and thus the time it takes to find\n a solution. Usually, the summary is sufficient to look at to see what happened.\n\n However, you may still want to scroll over the log for messages like\n `removed duplicate constraint`, indicating redundancies (and possibly bugs)\n in you model building.\n \"\"\""
},
{
"identifier": "PresolvedModelBlock",
"path": "cpsat_log_parser/blocks/presolved_model.py",
"snippet": "class PresolvedModelBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n if re.match(r\"Presolved (satisfaction|optimization) model\", lines[0]):\n return True\n return False\n\n def get_title(self) -> str:\n return \"Presolved Model\"\n\n def get_model_fingerprint(self) -> str:\n return self.lines[0].split(\"model_fingerprint: \")[1].strip(\")\")\n\n def get_num_variables(self) -> int:\n return int(\n self.lines[1]\n .split(\"#Variables: \")[1]\n .strip()\n .split(\" \")[0]\n .replace(\"'\", \"\")\n )\n\n def get_num_constraints(self) -> int:\n n = 0\n for line in self.lines:\n if line.startswith(\"#k\"):\n # \"#kNoOverlap2D: 1 (#rectangles: 24)\"\n # \"#kInterval: 48\"\n n += int(line.split(\":\")[1].strip().split(\" \")[0].replace(\"'\", \"\"))\n return n\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n This is the most important block of the presolve phase and gives an overview of the model after presolve.\n It contains the number of variables and constraints, as well as coefficients and domains.\n\n `- 200 in [0,199]` will indicate that there are 200 variables with domain `[0,199]`, i.e., values between 0 and 199.\n\n `#kLinearN: 3'000 (#terms: 980'948)` indicates that there are 3000 linear constraints with 980'948 coefficients.\n\n It is useful to compare this to the initial model, to see if your\n model was simplified by presolve, which indicates that you can\n simplify your model yourself, saving presolve time. If you notice that a\n lot of time is spent in presolve but it does not simplify your model,\n you can try to disable/reduce presolve.\n\n It is also interesting to see if the presolve replaced some of your\n constraints with more efficient ones.\n \"\"\""
},
{
"identifier": "TaskTimingBlock",
"path": "cpsat_log_parser/blocks/task_timing.py",
"snippet": "class TaskTimingBlock(LogBlock):\n def __init__(self, lines: List[str]) -> None:\n super().__init__(lines)\n if not self.matches(lines):\n raise ValueError(\"Invalid lines for TaskTimingBlock\")\n\n @staticmethod\n def matches(lines: List[str]) -> bool:\n if not lines:\n return False\n return lines[0].startswith(\"Task timing\")\n\n def get_help(self) -> typing.Optional[str]:\n return \"The time spent on each subsolver. Does not give much useful information for the common user.\"\n\n def get_title(self) -> str:\n return \"Task Timing\"\n\n def to_pandas(self, deterministic: bool) -> pd.DataFrame:\n lines = [line.strip() for line in self.lines if line.strip()]\n lines = [line.replace(\"'\", \"\") for line in lines]\n lines = [line.replace(\"[\", \" \") for line in lines]\n lines = [line.replace(\"]\", \" \") for line in lines]\n lines = [line.replace(\",\", \" \") for line in lines]\n lines = [line.replace(\"\\t\", \" \") for line in lines]\n lines = [line.replace(\"s \", \"s \") for line in lines]\n lines = [re.sub(\"\\s\\s+\", \"\\t\", line) for line in lines]\n\n def filter(line):\n split_line = line.split(\"\\t\")\n n = len(split_line)\n if deterministic:\n return \"\\t\".join(split_line[:1] + split_line[n // 2 + 1 :])\n else:\n return \"\\t\".join(split_line[: n // 2 + 1])\n\n lines = [filter(line) for line in lines]\n if deterministic:\n lines[0] = lines[0].replace(\"Task timing\", \"Task timing (deterministic)\")\n\n # Replace two or more spaces with a single tab\n log = \"\\n\".join(lines)\n log = re.sub(\"\\s\\s+\", \"\\t\", log)\n\n # Use StringIO to convert the string to a file-like object for read_csv\n log_file = StringIO(log)\n\n df = pd.read_csv(log_file, delimiter=\"\\t\", index_col=0)\n return df"
},
{
"identifier": "input_log",
"path": "_app/input_log.py",
"snippet": "def input_log():\n # accept log via file upload or text input\n data = None\n log_file = st.file_uploader(\"Upload a log file\", type=\"txt\")\n if log_file is not None:\n data = log_file.read().decode(\"utf-8\")\n else:\n log_text = st.text_area(\"Or paste a log here\")\n if log_text:\n data = log_text\n url = st.text_input(\"Or load a log from a URL:\", value=\"\")\n if url:\n data = get_data_from_url(url)\n # example logs per button\n st.markdown(\"Or use one of the following example logs:\")\n examples = [\n {\n \"file\": \"example_logs/98_02.txt\",\n \"origin\": \"This log originates from a TSP with MTZ constraints. It is not solved to optimality.\",\n },\n {\n \"file\": \"example_logs/98_03.txt\",\n \"origin\": \"This log originates from a TSP with AddCircuit constraint. It only has a single, but expensive, constraint.\",\n },\n {\n \"file\": \"example_logs/98_04.txt\",\n \"origin\": \"This log originates from a Multi-Knapsack problem.\",\n },\n {\n \"file\": \"example_logs/98_05.txt\",\n \"origin\": \"This log originates from a Packing problem.\",\n },\n {\n \"file\": \"example_logs/98_06.txt\",\n \"origin\": \"This log originates from a Packing problem.\",\n },\n {\n \"file\": \"example_logs/98_07.txt\",\n \"origin\": \"This log originates from a Knapsack problem run on an old Macbook. It spends most of the time in presolve.\",\n },\n {\n \"file\": \"example_logs/98_08.txt\",\n \"origin\": \"An example from an iteration of SampLNS\",\n },\n {\n \"file\": \"example_logs/97_01.txt\",\n \"origin\": \"This was an example log flying around on my computer for teaching purposes.\",\n },\n ]\n # at most 5 examples per row\n row_length = 4\n for i in range(0, len(examples), row_length):\n cols = st.columns(min(len(examples) - i, row_length))\n for j, example in enumerate(examples[i : i + row_length]):\n if cols[j].button(f\"Example {i+j+1}\", help=example.get(\"origin\", None)):\n with open(example[\"file\"]) as f:\n data = f.read()\n\n if not data and \"from_url\" in st.query_params:\n url = st.query_params.get_all(\"from_url\")[0]\n data = get_data_from_url(url)\n if not data and \"example\" in st.query_params:\n example = st.query_params.get_all(\"example\")[0]\n import urllib.request\n import urllib.parse\n\n url = \"https://cpsat-log-analyzer.streamlit.app/?\" + urllib.parse.urlencode(\n {\"example\": example}\n )\n st.info(\n f\"Loading example log `{example}`. You can share it with others using [{url}]({url}).\"\n )\n if \"/\" in example:\n st.error(f\"Invalid example log `{example}`.\")\n return None\n example_path = f\"example_logs/{example}.txt\"\n if not os.path.dirname(example_path).endswith(\"example_logs\"):\n st.error(f\"Invalid example log `{example}`.\")\n return None\n if not os.path.exists(example_path):\n st.error(f\"Example log `{example}` does not exist.\")\n return None\n with open(f\"example_logs/{example}.txt\") as f:\n data = f.read()\n return data"
},
{
"identifier": "print_header",
"path": "_app/header.py",
"snippet": "def print_header():\n st.title(\"CP-SAT Log Analyzer\")\n st.markdown(\n \"Dive into the world of constraint programming with ease using our CP-SAT Log Analyzer. This tool transforms the dense and detailed logs of CP-SAT into clear, readable formats, complemented by intuitive visualizations of key metrics. Whether you're tuning your model or exploring data, our analyzer simplifies and enlightens your journey with CP-SAT. Let us make complex logs simple and actionable!\"\n )\n\n st.markdown(\n \"[](https://github.com/d-krupke/CP-SAT-Log-Analyzer) Feel free to open issues or contribute.\"\n )\n st.markdown(\n \"[](https://github.com/d-krupke/cpsat-primer) This project is a sibling of the CP-SAT Primer.\"\n )\n\n st.header(\"Log File\")\n st.markdown(\n \"\"\"\n To begin analyzing with CP-SAT Log Analyzer, please upload your log file. If you haven't already, you can generate a log file by enabling the log output. Simply set the `log_search_progress` parameter to `True` in your CP-SAT solver configuration. Once this is done, you'll have a detailed log ready for upload and analysis.\n\n The log usually starts as follows:\n ```\n Starting CP-SAT solver v9.7.2996\n Parameters: log_search_progress: true\n Setting number of workers to 24\n\n ...\n ```\n\n Only complete and properly formatted logs are supported for now.\n \"\"\"\n )"
},
{
"identifier": "show_overview",
"path": "_app/overview.py",
"snippet": "def show_overview(parser):\n st.subheader(\"Overview\", divider=True)\n if parser.comments:\n with st.chat_message(\"user\"):\n # escape markdown to prevent XSS\n comment = \"\\n\".join(parser.comments)\n comment = comment.replace(\"\\\\\", \"\")\n comment = comment.replace(\"[\", \"\\\\[*\")\n comment = comment.replace(\"]\", \"*\\\\]\")\n st.write(comment)\n try:\n solver_block = parser.get_block_of_type(SolverBlock)\n initial_model_block = parser.get_block_of_type(InitialModelBlock)\n search_progress_block = parser.get_block_of_type(SearchProgressBlock)\n response_block = parser.get_block_of_type(ResponseBlock)\n col1, col2 = st.columns(2)\n major, minor, patch = solver_block.get_parsed_version()\n if major < 9 or (major == 9 and minor < 8):\n col1.metric(\n label=\"CP-SAT Version\",\n value=solver_block.get_version(),\n help=\"CP-SAT has seen significant performance improvements over the last years. Make sure to use the latest version.\",\n delta=\"outdated\",\n delta_color=\"inverse\",\n )\n else:\n col1.metric(\n label=\"CP-SAT Version\",\n value=solver_block.get_version(),\n help=\"CP-SAT has seen significant performance improvements over the last years. Make sure to use the latest version.\",\n )\n col2.metric(\n label=\"Number of workers\",\n value=solver_block.get_number_of_workers(),\n help=\"CP-SAT has different parallelization tiers, triggered by the number of workers. More workers can improve performance. Fine more information [here](https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers)\",\n )\n # https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers\n\n # print all parameters (key: value)\n if solver_block.get_parameters():\n md = \"*CP-SAT was setup with the following parameters:*\\n\"\n st.markdown(md)\n st.json(solver_block.get_parameters())\n st.markdown(\n \"*You can find more information about the parameters [here](https://github.com/google/or-tools/blob/stable/ortools/sat/sat_parameters.proto).*\"\n )\n\n col1, col2, col3 = st.columns(3)\n response = response_block.to_dict()\n\n col1.metric(\n label=\"Status\",\n value=response[\"status\"],\n help=\"\"\"\n CP-SAT can have 5 different statuses:\n - `UNKNOWN`: The solver timed out before finding a solution or proving infeasibility.\n - `OPTIMAL`: The solver found an optimal solution. This is the best possible status.\n - `FEASIBLE`: The solver found a feasible solution, but it is not guaranteed to be optimal.\n - `INFEASIBLE`: The solver proved that the problem is infeasible. This often indicates a bug in the model.\n - `MODEL_INVALID`: Definitely a bug. Should rarely happen.\n \"\"\",\n )\n col2.metric(\n label=\"Time\",\n value=f\"{float(response['walltime']):.3f}s\",\n help=\"The total time spent by the solver. This includes the time spent in presolve and the time spent in the search.\",\n )\n col3.metric(\n label=\"Presolve\",\n value=f\"{search_progress_block.get_presolve_time():.3f}s\",\n help=\"The time spent in presolve. This is usually a small fraction of the total time.\",\n )\n\n col1, col2, col3 = st.columns(3)\n col1.metric(\n label=\"Variables\",\n value=initial_model_block.get_num_variables(),\n help=\"CP-SAT can handle (hundreds of) thousands of variables. This just gives a rough estimate of the size of the problem. Check *Initial Optimization Model* for more information. Many variables may also be removed during presolve, check *Presolve Summary*.\",\n )\n col2.metric(\n label=\"Constraints\",\n value=initial_model_block.get_num_constraints(),\n help=\"CP-SAT can handle (hundreds of) thousands of constraints. More important than the number is the type of constraints. Some constraints are more expensive than others. Check *Initial Optimization Model* for more information.\",\n )\n col3.metric(\n label=\"Type\",\n value=\"Optimization\"\n if initial_model_block.is_optimization()\n else \"Satisfaction\",\n help=\"Is the model an optimization or satisfaction model?\",\n )\n # col3.metric(\"Model Fingerprint\", value=initial_model_block.get_model_fingerprint())\n\n col1, col2, col3 = st.columns(3)\n try:\n obj = float(response[\"objective\"])\n except ValueError:\n obj = None\n col1.metric(\n label=\"Objective\",\n value=obj,\n help=\"Value of the best solution found.\",\n )\n try:\n bound = float(response[\"best_bound\"])\n except ValueError:\n bound = None\n col2.metric(\n label=\"Best bound\",\n value=bound,\n help=\"Bound on how good the best solution can be. If it matches the objective, the solution is optimal.\",\n )\n gap = response_block.get_gap()\n gap_help = \"The gap is the difference between the objective and the best bound. The smaller the better. A gap of 0% means that the solution is optimal.\"\n if gap is None:\n col3.metric(label=\"Gap\", value=None, help=gap_help)\n else:\n col3.metric(label=\"Gap\", value=f\"{gap:.2f}%\", help=gap_help)\n if response[\"status\"] == \"OPTIMAL\" and gap > 0:\n st.error(\n \"CP-SAT returned the status `OPTIMAL`, but does not have a matching bound. This indicates a bug.\"\n )\n\n if (\n response[\"status\"] in (\"OPTIMAL\", \"FEASIBLE\")\n and initial_model_block.is_optimization()\n ):\n fig = search_progress_block.as_plotly()\n if fig:\n st.plotly_chart(fig, use_container_width=True)\n except KeyError as ke:\n st.error(\n f\"Error parsing information. Log seems to be incomplete: {ke}. Make sure you enter the full log without any modifications. The parser is sensitive to new lines.\"\n )"
}
] | import streamlit as st
from cpsat_log_parser import LogParser
from cpsat_log_parser.blocks import (
SearchProgressBlock,
SearchStatsBlock,
SolutionsBlock,
TableBlock,
SolverBlock,
ResponseBlock,
PresolveLogBlock,
TaskTimingBlock,
PresolvedModelBlock,
)
from _app import print_header, input_log, show_overview | 9,026 | """
This file is the main entry point for the Streamlit app.
Further parts of the app are in the `_app` folder.
The logic for parsing the log is in the `cpsat_log_parser` folder.
"""
print_header()
data = input_log()
if data:
st.header("Log Analysis")
st.warning(
"This is just a prototype and may crash or show wrong results. Please report any issues [here](https://github.com/d-krupke/CP-SAT-Log-Analyzer). I welcome any feedback and complex logs to test this on."
)
parser = LogParser(data)
| """
This file is the main entry point for the Streamlit app.
Further parts of the app are in the `_app` folder.
The logic for parsing the log is in the `cpsat_log_parser` folder.
"""
print_header()
data = input_log()
if data:
st.header("Log Analysis")
st.warning(
"This is just a prototype and may crash or show wrong results. Please report any issues [here](https://github.com/d-krupke/CP-SAT-Log-Analyzer). I welcome any feedback and complex logs to test this on."
)
parser = LogParser(data) | show_overview(parser) | 12 | 2023-12-18 09:18:19+00:00 | 12k |
MMC-K/multimodal_generation_downstream_tasks | training_veldt5_accelerate.py | [
{
"identifier": "DatasetForVLAlign",
"path": "data_utils.py",
"snippet": "class DatasetForVLAlign(Dataset):\n def __init__(\n self,\n file_path: str,\n image_tokenizer: ViTFeatureExtractor,\n text_tokenizer: AutoTokenizer,\n image_root_dir=None,\n text_max_length=512,\n ):\n super().__init__()\n self.file_path = file_path\n self.image_tokenizer = image_tokenizer\n self.text_tokenizer = text_tokenizer\n self.image_root_dir=image_root_dir\n self.text_max_length = text_max_length\n\n logger.info(\"loading dataset...\")\n self.data = json.load(open(file_path, \"r\"))\n logger.info(\"{} examples was loaded.\".format(len(self.data)))\n\n def __getitem__(self, index):\n sample = self.data[index]\n\n path = sample[\"path\"]\n if self.image_root_dir is not None:\n path = os.path.join(self.image_root_dir, path)\n \n description = sample[\"description\"]\n\n image = Image.open(path)\n\n image_feature = self.image_tokenizer(images=image, return_tensors=\"pt\")\n text_feature = self.text_tokenizer(description, return_tensors=\"pt\", truncation=True, max_length=self.text_max_length)\n\n return {\n \"pixel_values\": image_feature[\"pixel_values\"],\n \"input_ids\": text_feature[\"input_ids\"],\n \"attention_mask\": text_feature[\"attention_mask\"],\n }\n\n def __len__(self):\n return len(self.data)\n\n def get_collate_fn(self):\n def collate_fn(samples, pad_id=0):\n if len(samples) == 0:\n return {}\n return {\n \"input_ids\": collate_tokens([s[\"input_ids\"] for s in samples], pad_id),\n \"attention_mask\": collate_tokens([s[\"attention_mask\"] for s in samples], 0),\n \"pixel_values\": default_collate([s[\"pixel_values\"][0] for s in samples])\n }\n return functools.partial(collate_fn, pad_id=self.text_tokenizer.pad_token_id)"
},
{
"identifier": "VELDT5Model",
"path": "modeling_veldt5.py",
"snippet": "class VELDT5Model(PreTrainedModel):\n r\"\"\"\n [`VELDT5Model`] is a generic model class that will be instantiated as a transformer architecture with\n one of the base vision model classes of the library as encoder and another one as decoder when created with the\n :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and\n :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.\n \"\"\"\n config_class = VELDT5Config\n base_model_prefix = \"veldt5\"\n main_input_name = \"pixel_values\"\n supports_gradient_checkpointing = True\n\n def __init__(\n self,\n config: Optional[PretrainedConfig] = None,\n encoder: Optional[PreTrainedModel] = None,\n decoder: Optional[PreTrainedModel] = None,\n ):\n if config is None and (encoder is None or decoder is None):\n raise ValueError(\"Either a configuration or an encoder and a decoder has to be provided.\")\n if config is None:\n config = VELDT5Config.from_encoder_decoder_configs(encoder.config, decoder.config)\n else:\n if not isinstance(config, self.config_class):\n raise ValueError(f\"Config: {config} has to be of type {self.config_class}\")\n\n if config.decoder.cross_attention_hidden_size is not None:\n if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:\n raise ValueError(\n \"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal\"\n f\" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for\"\n f\" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for\"\n \" `config.encoder.hidden_size`.\"\n )\n\n # initialize with config\n # make sure input & output embeddings is not tied\n config.tie_word_embeddings = False\n super().__init__(config)\n\n if encoder is None:\n encoder = ViTModel(config.encoder, add_pooling_layer=False)\n\n if decoder is None:\n decoder = T5DualDecoderDoubleHeadsModel(config.decoder)\n\n self.encoder = encoder\n self.decoder = decoder\n\n if self.encoder.config.to_dict() != self.config.encoder.to_dict():\n logger.warning(\n f\"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:\"\n f\" {self.config.encoder}\"\n )\n if self.decoder.config.to_dict() != self.config.decoder.to_dict():\n logger.warning(\n f\"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:\"\n f\" {self.config.decoder}\"\n )\n\n # make sure that the individual model's config refers to the shared config\n # so that the updates to the config will be synced\n self.encoder.config = self.config.encoder\n self.decoder.config = self.config.decoder\n\n # encoder outputs might need to be projected to different dimension for decoder\n if (\n self.encoder.config.hidden_size != self.decoder.config.hidden_size\n and self.decoder.config.cross_attention_hidden_size is None\n ):\n self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)\n\n if self.encoder.get_output_embeddings() is not None:\n raise ValueError(\n f\"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head\"\n )\n \n\n pooling_config = copy.deepcopy(self.encoder.config)\n pooling_config.summary_type = \"attn\"\n self.global_pooling = SequenceSummary(pooling_config, num_queries=self.config.num_queries_global)\n self.local_pooling = SequenceSummary(pooling_config, num_queries=self.config.num_queries_local)\n\n\n def _set_gradient_checkpointing(self, module, value=False):\n # call both encoder and decoder function on gradient checkpointing\n self.encoder._set_gradient_checkpointing(module, value=value)\n self.decoder._set_gradient_checkpointing(module, value=value)\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n def get_output_embeddings(self):\n return self.decoder.get_output_embeddings()\n\n def set_output_embeddings(self, new_embeddings):\n return self.decoder.set_output_embeddings(new_embeddings)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n # At the moment fast initialization is not supported for composite models\n if kwargs.get(\"_fast_init\", False):\n logger.warning(\n \"Fast initialization is currently not supported for VELDT5Model. \"\n \"Falling back to slow initialization...\"\n )\n kwargs[\"_fast_init\"] = False\n return super().from_pretrained(*args, **kwargs)\n\n @classmethod\n def from_encoder_decoder_pretrained(\n cls,\n encoder_pretrained_model_name_or_path: str = None,\n decoder_pretrained_model_name_or_path: str = None,\n *model_args,\n **kwargs\n ) -> PreTrainedModel:\n r\"\"\"\n Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model\n checkpoints.\n\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train\n the model, you need to first set it back in training mode with `model.train()`.\n\n Params:\n encoder_pretrained_model_name_or_path (`str`, *optional*):\n Information necessary to initiate the image encoder. Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An\n example is `google/vit-base-patch16-224-in21k`.\n - A path to a *directory* containing model weights saved using\n [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In\n this case, `from_tf` should be set to `True` and a configuration object should be provided as\n `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a\n PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):\n Information necessary to initiate the text decoder. Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a\n user or organization name, like `dbmdz/bert-base-german-cased`.\n - A path to a *directory* containing model weights saved using\n [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In\n this case, `from_tf` should be set to `True` and a configuration object should be provided as\n `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a\n PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args (remaining positional arguments, *optional*):\n All remaning positional arguments will be passed to the underlying model's `__init__` method.\n\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,\n `output_attentions=True`).\n\n - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.\n - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.\n - To update the parent model configuration, do not use a prefix for each configuration parameter.\n\n Behaves differently depending on whether a `config` is provided or automatically loaded.\n\n Example:\n\n ```python\n >>> from transformers import VELDT5Model\n\n >>> # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized\n >>> model = VELDT5Model.from_encoder_decoder_pretrained(\n ... \"google/vit-base-patch16-224-in21k\", \"bert-base-uncased\"\n ... )\n >>> # saving model after fine-tuning\n >>> model.save_pretrained(\"./vit-bert\")\n >>> # load fine-tuned model\n >>> model = VELDT5Model.from_pretrained(\"./vit-bert\")\n ```\"\"\"\n\n kwargs_encoder = {\n argument[len(\"encoder_\") :]: value for argument, value in kwargs.items() if argument.startswith(\"encoder_\")\n }\n\n kwargs_decoder = {\n argument[len(\"decoder_\") :]: value for argument, value in kwargs.items() if argument.startswith(\"decoder_\")\n }\n\n # remove encoder, decoder kwargs from kwargs\n for key in kwargs_encoder.keys():\n del kwargs[\"encoder_\" + key]\n for key in kwargs_decoder.keys():\n del kwargs[\"decoder_\" + key]\n\n # Load and initialize the encoder and decoder\n # The distinction between encoder and decoder at the model level is made\n # by the value of the flag `is_decoder` that we need to set correctly.\n encoder = kwargs_encoder.pop(\"model\", None)\n if encoder is None:\n if encoder_pretrained_model_name_or_path is None:\n raise ValueError(\n \"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has \"\n \"to be defined.\"\n )\n\n if \"config\" not in kwargs_encoder:\n encoder_config, kwargs_encoder = ViTConfig.from_pretrained(\n encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True\n )\n\n if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:\n logger.info(\n f\"Initializing {encoder_pretrained_model_name_or_path} as a encoder model \"\n \"from a decoder model. Cross-attention and casual mask are disabled.\"\n )\n encoder_config.is_decoder = False\n encoder_config.add_cross_attention = False\n\n kwargs_encoder[\"config\"] = encoder_config\n\n encoder = ViTModel.from_pretrained(encoder_pretrained_model_name_or_path, add_pooling_layer=False, *model_args, **kwargs_encoder)\n\n decoder = kwargs_decoder.pop(\"model\", None)\n if decoder is None:\n if decoder_pretrained_model_name_or_path is None:\n raise ValueError(\n \"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has \"\n \"to be defined.\"\n )\n\n if \"config\" not in kwargs_decoder:\n decoder_config, kwargs_decoder = T5Config.from_pretrained(\n decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True\n )\n\n if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:\n logger.info(\n f\"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention\"\n f\" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if\"\n f\" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.\"\n )\n decoder_config.is_decoder = True\n decoder_config.add_cross_attention = True\n\n kwargs_decoder[\"config\"] = decoder_config\n\n if kwargs_decoder[\"config\"].is_decoder is False or kwargs_decoder[\"config\"].add_cross_attention is False:\n logger.warning(\n f\"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. \"\n f\"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, \"\n \"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` \"\n \"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a \"\n \"`decoder_config` to `.from_encoder_decoder_pretrained(...)`\"\n )\n\n decoder = T5DualDecoderDoubleHeadsModel.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)\n\n # instantiate config with corresponding kwargs\n config = VELDT5Config.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)\n\n # make sure input & output embeddings is not tied\n config.tie_word_embeddings = False\n return cls(encoder=encoder, decoder=decoder, config=config)\n\n @add_start_docstrings_to_model_forward(VISION_ENCODER_DECODER_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC_VELDT5)\n def forward(\n self,\n pixel_values=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n decoder_inputs_embeds=None,\n labels=None,\n return_contrastive_loss=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n logit_temperature=1.0,\n label_smoothing=0.0,\n **kwargs,\n ):\n r\"\"\"\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import TrOCRProcessor, VisionEncoderDecoderModel\n >>> import requests\n >>> from PIL import Image\n >>> import torch\n\n >>> processor = TrOCRProcessor.from_pretrained(\"microsoft/trocr-base-handwritten\")\n >>> model = VisionEncoderDecoderModel.from_pretrained(\"microsoft/trocr-base-handwritten\")\n\n >>> # load image from the IAM dataset\n >>> url = \"https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw).convert(\"RGB\")\n\n >>> # training\n >>> model.config.decoder_start_token_id = processor.tokenizer.cls_token_id\n >>> model.config.pad_token_id = processor.tokenizer.pad_token_id\n >>> model.config.vocab_size = model.config.decoder.vocab_size\n\n >>> pixel_values = processor(image, return_tensors=\"pt\").pixel_values\n >>> text = \"hello world\"\n >>> labels = processor.tokenizer(text, return_tensors=\"pt\").input_ids\n >>> outputs = model(pixel_values=pixel_values, labels=labels)\n >>> loss = outputs.loss\n\n >>> # inference (generation)\n >>> generated_ids = model.generate(pixel_values)\n >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith(\"decoder_\")}\n\n kwargs_decoder = {\n argument[len(\"decoder_\") :]: value for argument, value in kwargs.items() if argument.startswith(\"decoder_\")\n }\n\n if encoder_outputs is None and pixel_values is not None:\n # if pixel_values is None:\n # raise ValueError(\"You have to specify pixel_values\")\n\n encoder_outputs = self.encoder(\n pixel_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs_encoder,\n )\n elif isinstance(encoder_outputs, tuple):\n encoder_outputs = BaseModelOutput(*encoder_outputs)\n\n encoder_hidden_states = None if encoder_outputs is None else encoder_outputs[0]\n pooler_output_local = None if encoder_outputs is None else self.local_pooling(encoder_hidden_states)\n pooler_output_global = None if encoder_outputs is None else self.global_pooling(pooler_output_local).squeeze(1)\n\n # optionally project encoder_hidden_states\n if (\n self.encoder.config.hidden_size != self.decoder.config.hidden_size\n and self.decoder.config.cross_attention_hidden_size is None\n and pooler_output_local is not None\n ):\n pooler_output_local = self.enc_to_dec_proj(pooler_output_local)\n\n\n # else:\n encoder_attention_mask = None\n\n if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):\n decoder_input_ids = self.decoder.prepare_decoder_input_ids_from_labels(labels)\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=pooler_output_local,\n encoder_attention_mask=encoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n use_cache=use_cache,\n past_key_values=past_key_values,\n return_dict=return_dict,\n **kwargs_decoder,\n )\n\n # Compute loss independent from decoder (as some shift the logits inside them)\n loss = None\n if labels is not None:\n logits = decoder_outputs.logits if return_dict else decoder_outputs[0]\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))\n \n c_loss = None\n if return_contrastive_loss is not None and encoder_outputs is not None:\n decoder_logits = decoder_outputs.ss_logits if return_dict else decoder_outputs[0]\n encoder_logits = pooler_output_global\n loss_fct = CrossEntropyLoss(label_smoothing=label_smoothing)\n\n if (\n self.encoder.config.hidden_size != self.decoder.config.hidden_size\n and self.decoder.config.cross_attention_hidden_size is None\n ):\n encoder_logits = self.enc_to_dec_proj(encoder_logits)\n\n\n encoder_logits = nn.functional.normalize(encoder_logits)\n decoder_logits = nn.functional.normalize(decoder_logits)\n\n batch_size = encoder_logits.size(0)\n scores = torch.mm(decoder_logits, encoder_logits.t())\n target = torch.arange(batch_size).to(decoder_logits.device)\n\n c_loss = loss_fct(scores/logit_temperature, target) + loss_fct(scores.t()/logit_temperature, target)\n\n\n if decoder_outputs.self_decoder_hidden_states is not None and decoder_outputs.cross_decoder_hidden_states is not None:\n decoder_hidden_states = decoder_outputs.self_decoder_hidden_states + decoder_outputs.cross_decoder_hidden_states\n else:\n decoder_hidden_states = None\n\n if decoder_outputs.self_decoder_attentions is not None and decoder_outputs.cross_decoder_attentions is not None:\n decoder_attentions = decoder_outputs.self_decoder_attentions + decoder_outputs.cross_decoder_attentions\n else:\n decoder_attentions = None\n\n if not return_dict:\n outputs = (\n decoder_outputs.logits,\n pooler_output_global,\n pooler_output_local,\n decoder_outputs.ss_logits,\n decoder_outputs.past_key_values,\n decoder_hidden_states,\n decoder_attentions,\n decoder_outputs.cross_attentions,\n None if encoder_outputs is None else encoder_outputs.last_hidden_state,\n None if encoder_outputs is None else encoder_outputs.hidden_states,\n None if encoder_outputs is None else encoder_outputs.attentions,\n )\n if c_loss is not None:\n outputs = (c_loss,) + outputs\n if loss is not None:\n return (loss,) + outputs\n else:\n return outputs\n\n return VELDDoubleHeadsOutput(\n loss=loss,\n c_loss=c_loss,\n logits=decoder_outputs.logits,\n e_logits_g=pooler_output_global,\n e_logits_l=pooler_output_local,\n d_logits=decoder_outputs.ss_logits,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_hidden_states,\n decoder_attentions=decoder_attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=None if encoder_outputs is None else encoder_outputs.last_hidden_state,\n encoder_hidden_states=None if encoder_outputs is None else encoder_outputs.hidden_states,\n encoder_attentions=None if encoder_outputs is None else encoder_outputs.attentions,\n )\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return self.decoder.prepare_decoder_input_ids_from_labels(labels)\n\n def prepare_inputs_for_generation(\n self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs\n ):\n decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)\n decoder_attention_mask = decoder_inputs[\"attention_mask\"] if \"attention_mask\" in decoder_inputs else None\n input_dict = {\n \"attention_mask\": attention_mask,\n \"decoder_attention_mask\": decoder_attention_mask,\n \"decoder_input_ids\": decoder_inputs[\"input_ids\"],\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": decoder_inputs[\"past_key_values\"],\n \"use_cache\": use_cache,\n }\n return input_dict\n\n def resize_token_embeddings(self, *args, **kwargs):\n raise NotImplementedError(\n \"Resizing the embedding layers via the VisionEncoderDecoderModel directly is not supported.Please use the\"\n \" respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))\"\n )\n\n def _reorder_cache(self, past, beam_idx):\n # apply decoder cache reordering here\n return self.decoder._reorder_cache(past, beam_idx)"
}
] | import argparse
import json
import logging
import math
import os
import random
import numpy as np
import torch
import transformers
import datasets
from curses import raw
from datetime import timedelta
from itertools import chain
from torch import nn
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from torch.nn import CrossEntropyLoss
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed, InitProcessGroupKwargs, DistributedDataParallelKwargs
from torch.optim import AdamW
from transformers import (
AutoTokenizer,
ViTFeatureExtractor,
SchedulerType,
get_scheduler,
default_data_collator,
)
from datasets import load_dataset
from data_utils import DatasetForVLAlign
from modeling_veldt5 import VELDT5Model | 7,931 | default=None,
help="Total number of validation steps to perform.",
)
parser.add_argument(
"--max_train_steps_per_epoch",
type=int,
default=None,
help="The number of training steps to perform on a epoch. (for debugging)",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--warmup_portion", type=float, default=0, help="Portion of total training steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
# logging
parser.add_argument(
"--logging_steps", type=int, default=0, help="Number of steps for logging (stdout)."
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--finetune",
action="store_true",
help="disable language dataset training in finetuning.",
)
parser.add_argument(
"--from_veld_pretrained",
type=str,
default=None,
help="pretrained veld model to use for finetuning.",
)
args = parser.parse_args()
return args
def main():
args = parse_args()
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs["log_with"] = args.report_to
# accelerator_log_kwargs["logging_dir"] = args.output_dir
accelerator_log_kwargs["project_dir"] = args.output_dir
kwargs_handlers = [
InitProcessGroupKwargs(timeout=timedelta(days=10)),
DistributedDataParallelKwargs(find_unused_parameters=True)
]
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
kwargs_handlers=kwargs_handlers , **accelerator_log_kwargs)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
random.seed(args.seed)
if accelerator.is_main_process and args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Load model and tokenizer
if args.from_veld_pretrained is None:
| #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 san kim
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = get_logger(__name__)
# epochs=1
# learning_rate=0.001
# scheduler_type=linear
# accelerate launch training_veldt5_accelerate.py \
# --vision_model 'google/vit-base-patch16-384' \
# --language_model 'KETI-AIR/ke-t5-base' \
# --gradient_accumulation_steps 32 \
# --per_device_train_batch_size 16 \
# --per_device_eval_batch_size 16 \
# --warmup_portion 0.02 \
# --logging_steps 20 \
# --checkpointing_steps 10000 \
# --num_train_epochs $epochs \
# --lr_scheduler_type $scheduler_type \
# --with_tracking \
# --output_dir veld_e${epochs}_${scheduler_type}
# accelerate launch training_veldt5_accelerate.py \
# --max_train_steps_per_epoch 100 \
# --max_validation_steps 20 \
# --logging_steps 5 \
# --with_tracking \
# --output_dir test
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a summarization task")
# data
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
parser.add_argument(
"--dataset_name_lm",
type=str,
default="sent_dataset.py",
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name_lm",
type=str,
default="base",
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--hf_cache_dir",
type=str,
default="../huggingface_datasets",
help="The path to cache directory for huggingface datasets.",
)
parser.add_argument(
"--hf_data_dir_lm",
type=str,
default="../sent_eq_4k_25/*/",
help="The path to data directory for huggingface datasets.",
)
parser.add_argument(
"--validation_split_percentage",
default=1,
help="The percentage of the train set used as validation set in case there's no validation split",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=256,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--block_size",
type=int,
default=None,
help=(
"Optional input sequence length after tokenization. The training dataset will be truncated in block of"
" this size for training. Default to the model max input length for single sentence inputs (take into"
" account special tokens)."
),
)
parser.add_argument("--train_path",
default="../../downloaded_data/train-filtered.json", type=str)
parser.add_argument("--validation_path",
default="../../downloaded_data/validation-filtered.json", type=str)
parser.add_argument("--image_root_dir",
default="../../downloaded_data", type=str)
parser.add_argument(
"--dataset_name",
type=str,
default="image_text_pair_datasets.py",
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default="base",
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--hf_data_dir",
type=str,
default="../../downloaded_data",
help="The path to data directory for huggingface datasets.",
)
# model
parser.add_argument("--vision_model",
default="google/vit-base-patch16-384", type=str)
parser.add_argument("--language_model",
default="KETI-AIR/ke-t5-base", type=str)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
# training
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=16,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=8e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--contrastive_weight", default=1.0,
type=float, help="The weighting value for contrastive loss")
parser.add_argument("--captioning_weight", default=2.0,
type=float, help="The weighting value for captioning loss")
parser.add_argument("--lm_weight", default=1.0,
type=float, help="The weighting value for lm loss")
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--logit_temperature", default=1.0,
type=float, help="temperature for logits")
parser.add_argument("--label_smoothing", default=0.0,
type=float, help="label smoothing for cross entropy")
parser.add_argument("--num_train_epochs", type=int, default=1, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--max_validation_steps",
type=int,
default=None,
help="Total number of validation steps to perform.",
)
parser.add_argument(
"--max_train_steps_per_epoch",
type=int,
default=None,
help="The number of training steps to perform on a epoch. (for debugging)",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--warmup_portion", type=float, default=0, help="Portion of total training steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
# logging
parser.add_argument(
"--logging_steps", type=int, default=0, help="Number of steps for logging (stdout)."
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--finetune",
action="store_true",
help="disable language dataset training in finetuning.",
)
parser.add_argument(
"--from_veld_pretrained",
type=str,
default=None,
help="pretrained veld model to use for finetuning.",
)
args = parser.parse_args()
return args
def main():
args = parse_args()
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs["log_with"] = args.report_to
# accelerator_log_kwargs["logging_dir"] = args.output_dir
accelerator_log_kwargs["project_dir"] = args.output_dir
kwargs_handlers = [
InitProcessGroupKwargs(timeout=timedelta(days=10)),
DistributedDataParallelKwargs(find_unused_parameters=True)
]
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
kwargs_handlers=kwargs_handlers , **accelerator_log_kwargs)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
random.seed(args.seed)
if accelerator.is_main_process and args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Load model and tokenizer
if args.from_veld_pretrained is None: | model = VELDT5Model.from_encoder_decoder_pretrained( | 1 | 2023-12-19 01:37:23+00:00 | 12k |
sidharthrajaram/StyleTTS2 | src/styletts2/models.py | [
{
"identifier": "ASRCNN",
"path": "src/styletts2/Utils/ASR/models.py",
"snippet": "class ASRCNN(nn.Module):\n def __init__(self,\n input_dim=80,\n hidden_dim=256,\n n_token=35,\n n_layers=6,\n token_embedding_dim=256,\n\n ):\n super().__init__()\n self.n_token = n_token\n self.n_down = 1\n self.to_mfcc = MFCC()\n self.init_cnn = ConvNorm(input_dim//2, hidden_dim, kernel_size=7, padding=3, stride=2)\n self.cnns = nn.Sequential(\n *[nn.Sequential(\n ConvBlock(hidden_dim),\n nn.GroupNorm(num_groups=1, num_channels=hidden_dim)\n ) for n in range(n_layers)])\n self.projection = ConvNorm(hidden_dim, hidden_dim // 2)\n self.ctc_linear = nn.Sequential(\n LinearNorm(hidden_dim//2, hidden_dim),\n nn.ReLU(),\n LinearNorm(hidden_dim, n_token))\n self.asr_s2s = ASRS2S(\n embedding_dim=token_embedding_dim,\n hidden_dim=hidden_dim//2,\n n_token=n_token)\n\n def forward(self, x, src_key_padding_mask=None, text_input=None):\n x = self.to_mfcc(x)\n x = self.init_cnn(x)\n x = self.cnns(x)\n x = self.projection(x)\n x = x.transpose(1, 2)\n ctc_logit = self.ctc_linear(x)\n if text_input is not None:\n _, s2s_logit, s2s_attn = self.asr_s2s(x, src_key_padding_mask, text_input)\n return ctc_logit, s2s_logit, s2s_attn\n else:\n return ctc_logit\n\n def get_feature(self, x):\n x = self.to_mfcc(x.squeeze(1))\n x = self.init_cnn(x)\n x = self.cnns(x)\n x = self.projection(x)\n return x\n\n def length_to_mask(self, lengths):\n mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)\n mask = torch.gt(mask+1, lengths.unsqueeze(1)).to(lengths.device)\n return mask\n\n def get_future_mask(self, out_length, unmask_future_steps=0):\n \"\"\"\n Args:\n out_length (int): returned mask shape is (out_length, out_length).\n unmask_futre_steps (int): unmasking future step size.\n Return:\n mask (torch.BoolTensor): mask future timesteps mask[i, j] = True if i > j + unmask_future_steps else False\n \"\"\"\n index_tensor = torch.arange(out_length).unsqueeze(0).expand(out_length, -1)\n mask = torch.gt(index_tensor, index_tensor.T + unmask_future_steps)\n return mask"
},
{
"identifier": "JDCNet",
"path": "src/styletts2/Utils/JDC/model.py",
"snippet": "class JDCNet(nn.Module):\n \"\"\"\n Joint Detection and Classification Network model for singing voice melody.\n \"\"\"\n def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):\n super().__init__()\n self.num_class = num_class\n\n # input = (b, 1, 31, 513), b = batch size\n self.conv_block = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)\n nn.BatchNorm2d(num_features=64),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)\n )\n\n # res blocks\n self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)\n self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)\n self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)\n\n # pool block\n self.pool_block = nn.Sequential(\n nn.BatchNorm2d(num_features=256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)\n nn.Dropout(p=0.2),\n )\n\n # maxpool layers (for auxiliary network inputs)\n # in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)\n self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))\n # in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)\n self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))\n # in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)\n self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))\n\n # in = (b, 640, 31, 2), out = (b, 256, 31, 2)\n self.detector_conv = nn.Sequential(\n nn.Conv2d(640, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Dropout(p=0.2),\n )\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_classifier = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_detector = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b * 31, 512)\n self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)\n\n # input: (b * 31, 512)\n self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier\n\n # initialize weights\n self.apply(self.init_weights)\n\n def get_feature_GAN(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return poolblock_out.transpose(-1, -2)\n \n def get_feature(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return self.pool_block[2](poolblock_out)\n \n def forward(self, x):\n \"\"\"\n Returns:\n classification_prediction, detection_prediction\n sizes: (b, 31, 722), (b, 31, 2)\n \"\"\"\n ###############################\n # forward pass for classifier #\n ###############################\n seq_len = x.shape[-1]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n \n \n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n GAN_feature = poolblock_out.transpose(-1, -2)\n poolblock_out = self.pool_block[2](poolblock_out)\n \n # (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)\n classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512))\n classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states\n\n classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)\n classifier_out = self.classifier(classifier_out)\n classifier_out = classifier_out.view((-1, seq_len, self.num_class)) # (b, 31, num_class)\n \n # sizes: (b, 31, 722), (b, 31, 2)\n # classifier output consists of predicted pitch classes per frame\n # detector output consists of: (isvoice, notvoice) estimates per frame\n return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out\n\n @staticmethod\n def init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.kaiming_uniform_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv2d):\n nn.init.xavier_normal_(m.weight)\n elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):\n for p in m.parameters():\n if p.data is None:\n continue\n\n if len(p.shape) >= 2:\n nn.init.orthogonal_(p.data)\n else:\n nn.init.normal_(p.data)"
},
{
"identifier": "KDiffusion",
"path": "src/styletts2/Modules/diffusion/sampler.py",
"snippet": "class KDiffusion(Diffusion):\n \"\"\"Elucidated Diffusion (Karras et al. 2022): https://arxiv.org/abs/2206.00364\"\"\"\n\n alias = \"k\"\n\n def __init__(\n self,\n net: nn.Module,\n *,\n sigma_distribution: Distribution,\n sigma_data: float, # data distribution standard deviation\n dynamic_threshold: float = 0.0,\n ):\n super().__init__()\n self.net = net\n self.sigma_data = sigma_data\n self.sigma_distribution = sigma_distribution\n self.dynamic_threshold = dynamic_threshold\n\n def get_scale_weights(self, sigmas: Tensor) -> Tuple[Tensor, ...]:\n sigma_data = self.sigma_data\n c_noise = torch.log(sigmas) * 0.25\n sigmas = rearrange(sigmas, \"b -> b 1 1\")\n c_skip = (sigma_data ** 2) / (sigmas ** 2 + sigma_data ** 2)\n c_out = sigmas * sigma_data * (sigma_data ** 2 + sigmas ** 2) ** -0.5\n c_in = (sigmas ** 2 + sigma_data ** 2) ** -0.5\n return c_skip, c_out, c_in, c_noise\n\n def denoise_fn(\n self,\n x_noisy: Tensor,\n sigmas: Optional[Tensor] = None,\n sigma: Optional[float] = None,\n **kwargs,\n ) -> Tensor:\n batch_size, device = x_noisy.shape[0], x_noisy.device\n sigmas = to_batch(x=sigma, xs=sigmas, batch_size=batch_size, device=device)\n\n # Predict network output and add skip connection\n c_skip, c_out, c_in, c_noise = self.get_scale_weights(sigmas)\n x_pred = self.net(c_in * x_noisy, c_noise, **kwargs)\n x_denoised = c_skip * x_noisy + c_out * x_pred\n\n return x_denoised\n\n def loss_weight(self, sigmas: Tensor) -> Tensor:\n # Computes weight depending on data distribution\n return (sigmas ** 2 + self.sigma_data ** 2) * (sigmas * self.sigma_data) ** -2\n\n def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor:\n batch_size, device = x.shape[0], x.device\n from einops import rearrange, reduce\n\n # Sample amount of noise to add for each batch element\n sigmas = self.sigma_distribution(num_samples=batch_size, device=device)\n sigmas_padded = rearrange(sigmas, \"b -> b 1 1\")\n\n # Add noise to input\n noise = default(noise, lambda: torch.randn_like(x))\n x_noisy = x + sigmas_padded * noise\n \n # Compute denoised values\n x_denoised = self.denoise_fn(x_noisy, sigmas=sigmas, **kwargs)\n\n # Compute weighted loss\n losses = F.mse_loss(x_denoised, x, reduction=\"none\")\n losses = reduce(losses, \"b ... -> b\", \"mean\")\n losses = losses * self.loss_weight(sigmas)\n loss = losses.mean()\n return loss"
},
{
"identifier": "LogNormalDistribution",
"path": "src/styletts2/Modules/diffusion/sampler.py",
"snippet": "class LogNormalDistribution(Distribution):\n def __init__(self, mean: float, std: float):\n self.mean = mean\n self.std = std\n\n def __call__(\n self, num_samples: int, device: torch.device = torch.device(\"cpu\")\n ) -> Tensor:\n normal = self.mean + self.std * torch.randn((num_samples,), device=device)\n return normal.exp()"
},
{
"identifier": "Transformer1d",
"path": "src/styletts2/Modules/diffusion/modules.py",
"snippet": "class Transformer1d(nn.Module):\n def __init__(\n self,\n num_layers: int,\n channels: int,\n num_heads: int,\n head_features: int,\n multiplier: int,\n use_context_time: bool = True,\n use_rel_pos: bool = False,\n context_features_multiplier: int = 1,\n rel_pos_num_buckets: Optional[int] = None,\n rel_pos_max_distance: Optional[int] = None,\n context_features: Optional[int] = None,\n context_embedding_features: Optional[int] = None,\n embedding_max_length: int = 512,\n ):\n super().__init__()\n\n self.blocks = nn.ModuleList(\n [\n TransformerBlock(\n features=channels + context_embedding_features,\n head_features=head_features,\n num_heads=num_heads,\n multiplier=multiplier,\n use_rel_pos=use_rel_pos,\n rel_pos_num_buckets=rel_pos_num_buckets,\n rel_pos_max_distance=rel_pos_max_distance,\n )\n for i in range(num_layers)\n ]\n )\n\n self.to_out = nn.Sequential(\n Rearrange(\"b t c -> b c t\"),\n nn.Conv1d(\n in_channels=channels + context_embedding_features,\n out_channels=channels,\n kernel_size=1,\n ),\n )\n \n use_context_features = exists(context_features)\n self.use_context_features = use_context_features\n self.use_context_time = use_context_time\n\n if use_context_time or use_context_features:\n context_mapping_features = channels + context_embedding_features\n\n self.to_mapping = nn.Sequential(\n nn.Linear(context_mapping_features, context_mapping_features),\n nn.GELU(),\n nn.Linear(context_mapping_features, context_mapping_features),\n nn.GELU(),\n )\n \n if use_context_time:\n assert exists(context_mapping_features)\n self.to_time = nn.Sequential(\n TimePositionalEmbedding(\n dim=channels, out_features=context_mapping_features\n ),\n nn.GELU(),\n )\n\n if use_context_features:\n assert exists(context_features) and exists(context_mapping_features)\n self.to_features = nn.Sequential(\n nn.Linear(\n in_features=context_features, out_features=context_mapping_features\n ),\n nn.GELU(),\n )\n \n self.fixed_embedding = FixedEmbedding(\n max_length=embedding_max_length, features=context_embedding_features\n )\n \n\n def get_mapping(\n self, time: Optional[Tensor] = None, features: Optional[Tensor] = None\n ) -> Optional[Tensor]:\n \"\"\"Combines context time features and features into mapping\"\"\"\n items, mapping = [], None\n # Compute time features\n if self.use_context_time:\n assert_message = \"use_context_time=True but no time features provided\"\n assert exists(time), assert_message\n items += [self.to_time(time)]\n # Compute features\n if self.use_context_features:\n assert_message = \"context_features exists but no features provided\"\n assert exists(features), assert_message\n items += [self.to_features(features)]\n\n # Compute joint mapping\n if self.use_context_time or self.use_context_features:\n mapping = reduce(torch.stack(items), \"n b m -> b m\", \"sum\")\n mapping = self.to_mapping(mapping)\n\n return mapping\n \n def run(self, x, time, embedding, features):\n \n mapping = self.get_mapping(time, features)\n x = torch.cat([x.expand(-1, embedding.size(1), -1), embedding], axis=-1)\n mapping = mapping.unsqueeze(1).expand(-1, embedding.size(1), -1)\n \n for block in self.blocks:\n x = x + mapping\n x = block(x)\n \n x = x.mean(axis=1).unsqueeze(1)\n x = self.to_out(x)\n x = x.transpose(-1, -2)\n \n return x\n \n def forward(self, x: Tensor, \n time: Tensor, \n embedding_mask_proba: float = 0.0,\n embedding: Optional[Tensor] = None, \n features: Optional[Tensor] = None,\n embedding_scale: float = 1.0) -> Tensor:\n \n b, device = embedding.shape[0], embedding.device\n fixed_embedding = self.fixed_embedding(embedding)\n if embedding_mask_proba > 0.0:\n # Randomly mask embedding\n batch_mask = rand_bool(\n shape=(b, 1, 1), proba=embedding_mask_proba, device=device\n )\n embedding = torch.where(batch_mask, fixed_embedding, embedding)\n\n if embedding_scale != 1.0:\n # Compute both normal and fixed embedding outputs\n out = self.run(x, time, embedding=embedding, features=features)\n out_masked = self.run(x, time, embedding=fixed_embedding, features=features)\n # Scale conditional output using classifier-free guidance\n return out_masked + (out - out_masked) * embedding_scale\n else:\n return self.run(x, time, embedding=embedding, features=features)\n \n return x"
},
{
"identifier": "StyleTransformer1d",
"path": "src/styletts2/Modules/diffusion/modules.py",
"snippet": "class StyleTransformer1d(nn.Module):\n def __init__(\n self,\n num_layers: int,\n channels: int,\n num_heads: int,\n head_features: int,\n multiplier: int,\n use_context_time: bool = True,\n use_rel_pos: bool = False,\n context_features_multiplier: int = 1,\n rel_pos_num_buckets: Optional[int] = None,\n rel_pos_max_distance: Optional[int] = None,\n context_features: Optional[int] = None,\n context_embedding_features: Optional[int] = None,\n embedding_max_length: int = 512,\n ):\n super().__init__()\n\n self.blocks = nn.ModuleList(\n [\n StyleTransformerBlock(\n features=channels + context_embedding_features,\n head_features=head_features,\n num_heads=num_heads,\n multiplier=multiplier,\n style_dim=context_features,\n use_rel_pos=use_rel_pos,\n rel_pos_num_buckets=rel_pos_num_buckets,\n rel_pos_max_distance=rel_pos_max_distance,\n )\n for i in range(num_layers)\n ]\n )\n\n self.to_out = nn.Sequential(\n Rearrange(\"b t c -> b c t\"),\n nn.Conv1d(\n in_channels=channels + context_embedding_features,\n out_channels=channels,\n kernel_size=1,\n ),\n )\n \n use_context_features = exists(context_features)\n self.use_context_features = use_context_features\n self.use_context_time = use_context_time\n\n if use_context_time or use_context_features:\n context_mapping_features = channels + context_embedding_features\n\n self.to_mapping = nn.Sequential(\n nn.Linear(context_mapping_features, context_mapping_features),\n nn.GELU(),\n nn.Linear(context_mapping_features, context_mapping_features),\n nn.GELU(),\n )\n \n if use_context_time:\n assert exists(context_mapping_features)\n self.to_time = nn.Sequential(\n TimePositionalEmbedding(\n dim=channels, out_features=context_mapping_features\n ),\n nn.GELU(),\n )\n\n if use_context_features:\n assert exists(context_features) and exists(context_mapping_features)\n self.to_features = nn.Sequential(\n nn.Linear(\n in_features=context_features, out_features=context_mapping_features\n ),\n nn.GELU(),\n )\n \n self.fixed_embedding = FixedEmbedding(\n max_length=embedding_max_length, features=context_embedding_features\n )\n \n\n def get_mapping(\n self, time: Optional[Tensor] = None, features: Optional[Tensor] = None\n ) -> Optional[Tensor]:\n \"\"\"Combines context time features and features into mapping\"\"\"\n items, mapping = [], None\n # Compute time features\n if self.use_context_time:\n assert_message = \"use_context_time=True but no time features provided\"\n assert exists(time), assert_message\n items += [self.to_time(time)]\n # Compute features\n if self.use_context_features:\n assert_message = \"context_features exists but no features provided\"\n assert exists(features), assert_message\n items += [self.to_features(features)]\n\n # Compute joint mapping\n if self.use_context_time or self.use_context_features:\n mapping = reduce(torch.stack(items), \"n b m -> b m\", \"sum\")\n mapping = self.to_mapping(mapping)\n\n return mapping\n \n def run(self, x, time, embedding, features):\n \n mapping = self.get_mapping(time, features)\n x = torch.cat([x.expand(-1, embedding.size(1), -1), embedding], axis=-1)\n mapping = mapping.unsqueeze(1).expand(-1, embedding.size(1), -1)\n \n for block in self.blocks:\n x = x + mapping\n x = block(x, features)\n \n x = x.mean(axis=1).unsqueeze(1)\n x = self.to_out(x)\n x = x.transpose(-1, -2)\n \n return x\n \n def forward(self, x: Tensor, \n time: Tensor, \n embedding_mask_proba: float = 0.0,\n embedding: Optional[Tensor] = None, \n features: Optional[Tensor] = None,\n embedding_scale: float = 1.0) -> Tensor:\n \n b, device = embedding.shape[0], embedding.device\n fixed_embedding = self.fixed_embedding(embedding)\n if embedding_mask_proba > 0.0:\n # Randomly mask embedding\n batch_mask = rand_bool(\n shape=(b, 1, 1), proba=embedding_mask_proba, device=device\n )\n embedding = torch.where(batch_mask, fixed_embedding, embedding)\n\n if embedding_scale != 1.0:\n # Compute both normal and fixed embedding outputs\n out = self.run(x, time, embedding=embedding, features=features)\n out_masked = self.run(x, time, embedding=fixed_embedding, features=features)\n # Scale conditional output using classifier-free guidance\n return out_masked + (out - out_masked) * embedding_scale\n else:\n return self.run(x, time, embedding=embedding, features=features)\n \n return x"
},
{
"identifier": "AudioDiffusionConditional",
"path": "src/styletts2/Modules/diffusion/diffusion.py",
"snippet": "class AudioDiffusionConditional(Model1d):\n def __init__(\n self,\n embedding_features: int,\n embedding_max_length: int,\n embedding_mask_proba: float = 0.1,\n **kwargs,\n ):\n self.embedding_mask_proba = embedding_mask_proba\n default_kwargs = dict(\n **get_default_model_kwargs(),\n unet_type=\"cfg\",\n context_embedding_features=embedding_features,\n context_embedding_max_length=embedding_max_length,\n )\n super().__init__(**{**default_kwargs, **kwargs})\n\n def forward(self, *args, **kwargs):\n default_kwargs = dict(embedding_mask_proba=self.embedding_mask_proba)\n return super().forward(*args, **{**default_kwargs, **kwargs})\n\n def sample(self, *args, **kwargs):\n default_kwargs = dict(\n **get_default_sampling_kwargs(),\n embedding_scale=5.0,\n )\n return super().sample(*args, **{**default_kwargs, **kwargs})"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "src/styletts2/Modules/discriminators.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiPeriodDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n DiscriminatorP(2),\n DiscriminatorP(3),\n DiscriminatorP(5),\n DiscriminatorP(7),\n DiscriminatorP(11),\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "MultiResSpecDiscriminator",
"path": "src/styletts2/Modules/discriminators.py",
"snippet": "class MultiResSpecDiscriminator(torch.nn.Module):\n\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\"):\n\n super(MultiResSpecDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),\n SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),\n SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "WavLMDiscriminator",
"path": "src/styletts2/Modules/discriminators.py",
"snippet": "class WavLMDiscriminator(nn.Module):\n \"\"\"docstring for Discriminator.\"\"\"\n\n def __init__(self, slm_hidden=768, \n slm_layers=13, \n initial_channel=64, \n use_spectral_norm=False):\n super(WavLMDiscriminator, self).__init__()\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.pre = norm_f(Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0))\n \n self.convs = nn.ModuleList([\n norm_f(nn.Conv1d(initial_channel, initial_channel * 2, kernel_size=5, padding=2)),\n norm_f(nn.Conv1d(initial_channel * 2, initial_channel * 4, kernel_size=5, padding=2)),\n norm_f(nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)),\n ])\n\n self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1))\n \n def forward(self, x):\n x = self.pre(x)\n \n fmap = []\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n x = torch.flatten(x, 1, -1)\n\n return x"
}
] | import os
import os.path as osp
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import yaml
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from .Utils.ASR.models import ASRCNN
from .Utils.JDC.model import JDCNet
from .Modules.diffusion.sampler import KDiffusion, LogNormalDistribution
from .Modules.diffusion.modules import Transformer1d, StyleTransformer1d
from .Modules.diffusion.diffusion import AudioDiffusionConditional
from .Modules.discriminators import MultiPeriodDiscriminator, MultiResSpecDiscriminator, WavLMDiscriminator
from munch import Munch
from .Modules.istftnet import Decoder
from .Modules.hifigan import Decoder | 10,300 |
x_pad[:, :, :x.shape[-1]] = x
x = x_pad.to(x.device)
return x.transpose(-1, -2)
def inference(self, x, style):
x = self.embedding(x.transpose(-1, -2)) * math.sqrt(self.d_model)
style = style.expand(x.shape[0], x.shape[1], -1)
x = torch.cat([x, style], axis=-1)
src = self.pos_encoder(x)
output = self.transformer_encoder(src).transpose(0, 1)
return output
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
def load_F0_models(path):
# load F0 model
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load(path, map_location='cpu')['net']
F0_model.load_state_dict(params)
_ = F0_model.train()
return F0_model
def load_ASR_models(ASR_MODEL_PATH, ASR_MODEL_CONFIG):
# load ASR model
def _load_config(path):
with open(path) as f:
config = yaml.safe_load(f)
model_config = config['model_params']
return model_config
def _load_model(model_config, model_path):
model = ASRCNN(**model_config)
params = torch.load(model_path, map_location='cpu')['model']
model.load_state_dict(params)
return model
asr_model_config = _load_config(ASR_MODEL_CONFIG)
asr_model = _load_model(asr_model_config, ASR_MODEL_PATH)
_ = asr_model.train()
return asr_model
def build_model(args, text_aligner, pitch_extractor, bert):
assert args.decoder.type in ['istftnet', 'hifigan'], 'Decoder type unknown'
if args.decoder.type == "istftnet":
decoder = Decoder(dim_in=args.hidden_dim, style_dim=args.style_dim, dim_out=args.n_mels,
resblock_kernel_sizes = args.decoder.resblock_kernel_sizes,
upsample_rates = args.decoder.upsample_rates,
upsample_initial_channel=args.decoder.upsample_initial_channel,
resblock_dilation_sizes=args.decoder.resblock_dilation_sizes,
upsample_kernel_sizes=args.decoder.upsample_kernel_sizes,
gen_istft_n_fft=args.decoder.gen_istft_n_fft, gen_istft_hop_size=args.decoder.gen_istft_hop_size)
else:
decoder = Decoder(dim_in=args.hidden_dim, style_dim=args.style_dim, dim_out=args.n_mels,
resblock_kernel_sizes = args.decoder.resblock_kernel_sizes,
upsample_rates = args.decoder.upsample_rates,
upsample_initial_channel=args.decoder.upsample_initial_channel,
resblock_dilation_sizes=args.decoder.resblock_dilation_sizes,
upsample_kernel_sizes=args.decoder.upsample_kernel_sizes)
text_encoder = TextEncoder(channels=args.hidden_dim, kernel_size=5, depth=args.n_layer, n_symbols=args.n_token)
predictor = ProsodyPredictor(style_dim=args.style_dim, d_hid=args.hidden_dim, nlayers=args.n_layer, max_dur=args.max_dur, dropout=args.dropout)
style_encoder = StyleEncoder(dim_in=args.dim_in, style_dim=args.style_dim, max_conv_dim=args.hidden_dim) # acoustic style encoder
predictor_encoder = StyleEncoder(dim_in=args.dim_in, style_dim=args.style_dim, max_conv_dim=args.hidden_dim) # prosodic style encoder
# define diffusion model
if args.multispeaker:
transformer = StyleTransformer1d(channels=args.style_dim*2,
context_embedding_features=bert.config.hidden_size,
context_features=args.style_dim*2,
**args.diffusion.transformer)
else:
transformer = Transformer1d(channels=args.style_dim*2,
context_embedding_features=bert.config.hidden_size,
**args.diffusion.transformer)
diffusion = AudioDiffusionConditional(
in_channels=1,
embedding_max_length=bert.config.max_position_embeddings,
embedding_features=bert.config.hidden_size,
embedding_mask_proba=args.diffusion.embedding_mask_proba, # Conditional dropout of batch elements,
channels=args.style_dim*2,
context_features=args.style_dim*2,
)
diffusion.diffusion = KDiffusion(
net=diffusion.unet,
sigma_distribution=LogNormalDistribution(mean = args.diffusion.dist.mean, std = args.diffusion.dist.std),
sigma_data=args.diffusion.dist.sigma_data, # a placeholder, will be changed dynamically when start training diffusion model
dynamic_threshold=0.0
)
diffusion.diffusion.net = transformer
diffusion.unet = transformer
nets = Munch(
bert=bert,
bert_encoder=nn.Linear(bert.config.hidden_size, args.hidden_dim),
predictor=predictor,
decoder=decoder,
text_encoder=text_encoder,
predictor_encoder=predictor_encoder,
style_encoder=style_encoder,
diffusion=diffusion,
text_aligner = text_aligner,
pitch_extractor=pitch_extractor,
| #coding:utf-8
class LearnedDownSample(nn.Module):
def __init__(self, layer_type, dim_in):
super().__init__()
self.layer_type = layer_type
if self.layer_type == 'none':
self.conv = nn.Identity()
elif self.layer_type == 'timepreserve':
self.conv = spectral_norm(nn.Conv2d(dim_in, dim_in, kernel_size=(3, 1), stride=(2, 1), groups=dim_in, padding=(1, 0)))
elif self.layer_type == 'half':
self.conv = spectral_norm(nn.Conv2d(dim_in, dim_in, kernel_size=(3, 3), stride=(2, 2), groups=dim_in, padding=1))
else:
raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
def forward(self, x):
return self.conv(x)
class LearnedUpSample(nn.Module):
def __init__(self, layer_type, dim_in):
super().__init__()
self.layer_type = layer_type
if self.layer_type == 'none':
self.conv = nn.Identity()
elif self.layer_type == 'timepreserve':
self.conv = nn.ConvTranspose2d(dim_in, dim_in, kernel_size=(3, 1), stride=(2, 1), groups=dim_in, output_padding=(1, 0), padding=(1, 0))
elif self.layer_type == 'half':
self.conv = nn.ConvTranspose2d(dim_in, dim_in, kernel_size=(3, 3), stride=(2, 2), groups=dim_in, output_padding=1, padding=1)
else:
raise RuntimeError('Got unexpected upsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
def forward(self, x):
return self.conv(x)
class DownSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.avg_pool2d(x, (2, 1))
elif self.layer_type == 'half':
if x.shape[-1] % 2 != 0:
x = torch.cat([x, x[..., -1].unsqueeze(-1)], dim=-1)
return F.avg_pool2d(x, 2)
else:
raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
class UpSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.interpolate(x, scale_factor=(2, 1), mode='nearest')
elif self.layer_type == 'half':
return F.interpolate(x, scale_factor=2, mode='nearest')
else:
raise RuntimeError('Got unexpected upsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
class ResBlk(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2),
normalize=False, downsample='none'):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = DownSample(downsample)
self.downsample_res = LearnedDownSample(downsample, dim_in)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = spectral_norm(nn.Conv2d(dim_in, dim_in, 3, 1, 1))
self.conv2 = spectral_norm(nn.Conv2d(dim_in, dim_out, 3, 1, 1))
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = spectral_norm(nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False))
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = self.downsample(x)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
x = self.downsample_res(x)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2) # unit variance
class StyleEncoder(nn.Module):
def __init__(self, dim_in=48, style_dim=48, max_conv_dim=384):
super().__init__()
blocks = []
blocks += [spectral_norm(nn.Conv2d(1, dim_in, 3, 1, 1))]
repeat_num = 4
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [spectral_norm(nn.Conv2d(dim_out, dim_out, 5, 1, 0))]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [nn.LeakyReLU(0.2)]
self.shared = nn.Sequential(*blocks)
self.unshared = nn.Linear(dim_out, style_dim)
def forward(self, x):
h = self.shared(x)
h = h.view(h.size(0), -1)
s = self.unshared(h)
return s
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class Discriminator2d(nn.Module):
def __init__(self, dim_in=48, num_domains=1, max_conv_dim=384, repeat_num=4):
super().__init__()
blocks = []
blocks += [spectral_norm(nn.Conv2d(1, dim_in, 3, 1, 1))]
for lid in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [spectral_norm(nn.Conv2d(dim_out, dim_out, 5, 1, 0))]
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [spectral_norm(nn.Conv2d(dim_out, num_domains, 1, 1, 0))]
self.main = nn.Sequential(*blocks)
def get_feature(self, x):
features = []
for l in self.main:
x = l(x)
features.append(x)
out = features[-1]
out = out.view(out.size(0), -1) # (batch, num_domains)
return out, features
def forward(self, x):
out, features = self.get_feature(x)
out = out.squeeze() # (batch)
return out, features
class ResBlk1d(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2),
normalize=False, downsample='none', dropout_p=0.2):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample_type = downsample
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
self.dropout_p = dropout_p
if self.downsample_type == 'none':
self.pool = nn.Identity()
else:
self.pool = weight_norm(nn.Conv1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1))
def _build_weights(self, dim_in, dim_out):
self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_in, 3, 1, 1))
self.conv2 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1))
if self.normalize:
self.norm1 = nn.InstanceNorm1d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm1d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False))
def downsample(self, x):
if self.downsample_type == 'none':
return x
else:
if x.shape[-1] % 2 != 0:
x = torch.cat([x, x[..., -1].unsqueeze(-1)], dim=-1)
return F.avg_pool1d(x, 2)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
x = self.downsample(x)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = F.dropout(x, p=self.dropout_p, training=self.training)
x = self.conv1(x)
x = self.pool(x)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = F.dropout(x, p=self.dropout_p, training=self.training)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2) # unit variance
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class TextEncoder(nn.Module):
def __init__(self, channels, kernel_size, depth, n_symbols, actv=nn.LeakyReLU(0.2)):
super().__init__()
self.embedding = nn.Embedding(n_symbols, channels)
padding = (kernel_size - 1) // 2
self.cnn = nn.ModuleList()
for _ in range(depth):
self.cnn.append(nn.Sequential(
weight_norm(nn.Conv1d(channels, channels, kernel_size=kernel_size, padding=padding)),
LayerNorm(channels),
actv,
nn.Dropout(0.2),
))
# self.cnn = nn.Sequential(*self.cnn)
self.lstm = nn.LSTM(channels, channels//2, 1, batch_first=True, bidirectional=True)
def forward(self, x, input_lengths, m):
x = self.embedding(x) # [B, T, emb]
x = x.transpose(1, 2) # [B, emb, T]
m = m.to(input_lengths.device).unsqueeze(1)
x.masked_fill_(m, 0.0)
for c in self.cnn:
x = c(x)
x.masked_fill_(m, 0.0)
x = x.transpose(1, 2) # [B, T, chn]
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True, enforce_sorted=False)
self.lstm.flatten_parameters()
x, _ = self.lstm(x)
x, _ = nn.utils.rnn.pad_packed_sequence(
x, batch_first=True)
x = x.transpose(-1, -2)
x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]])
x_pad[:, :, :x.shape[-1]] = x
x = x_pad.to(x.device)
x.masked_fill_(m, 0.0)
return x
def inference(self, x):
x = self.embedding(x)
x = x.transpose(1, 2)
x = self.cnn(x)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
x, _ = self.lstm(x)
return x
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
class AdaIN1d(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm1d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features*2)
def forward(self, x, s):
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
return (1 + gamma) * self.norm(x) + beta
class UpSample1d(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
else:
return F.interpolate(x, scale_factor=2, mode='nearest')
class AdainResBlk1d(nn.Module):
def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2),
upsample='none', dropout_p=0.0):
super().__init__()
self.actv = actv
self.upsample_type = upsample
self.upsample = UpSample1d(upsample)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out, style_dim)
self.dropout = nn.Dropout(dropout_p)
if upsample == 'none':
self.pool = nn.Identity()
else:
self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1))
def _build_weights(self, dim_in, dim_out, style_dim):
self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1))
self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1))
self.norm1 = AdaIN1d(style_dim, dim_in)
self.norm2 = AdaIN1d(style_dim, dim_out)
if self.learned_sc:
self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False))
def _shortcut(self, x):
x = self.upsample(x)
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s):
x = self.norm1(x, s)
x = self.actv(x)
x = self.pool(x)
x = self.conv1(self.dropout(x))
x = self.norm2(x, s)
x = self.actv(x)
x = self.conv2(self.dropout(x))
return x
def forward(self, x, s):
out = self._residual(x, s)
out = (out + self._shortcut(x)) / math.sqrt(2)
return out
class AdaLayerNorm(nn.Module):
def __init__(self, style_dim, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.fc = nn.Linear(style_dim, channels*2)
def forward(self, x, s):
x = x.transpose(-1, -2)
x = x.transpose(1, -1)
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
gamma, beta = gamma.transpose(1, -1), beta.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), eps=self.eps)
x = (1 + gamma) * x + beta
return x.transpose(1, -1).transpose(-1, -2)
class ProsodyPredictor(nn.Module):
def __init__(self, style_dim, d_hid, nlayers, max_dur=50, dropout=0.1):
super().__init__()
self.text_encoder = DurationEncoder(sty_dim=style_dim,
d_model=d_hid,
nlayers=nlayers,
dropout=dropout)
self.lstm = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True)
self.duration_proj = LinearNorm(d_hid, max_dur)
self.shared = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True)
self.F0 = nn.ModuleList()
self.F0.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout))
self.F0.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout))
self.F0.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout))
self.N = nn.ModuleList()
self.N.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout))
self.N.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout))
self.N.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout))
self.F0_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0)
self.N_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0)
def forward(self, texts, style, text_lengths, alignment, m):
d = self.text_encoder(texts, style, text_lengths, m)
batch_size = d.shape[0]
text_size = d.shape[1]
# predict duration
input_lengths = text_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
d, input_lengths, batch_first=True, enforce_sorted=False)
m = m.to(text_lengths.device).unsqueeze(1)
self.lstm.flatten_parameters()
x, _ = self.lstm(x)
x, _ = nn.utils.rnn.pad_packed_sequence(
x, batch_first=True)
x_pad = torch.zeros([x.shape[0], m.shape[-1], x.shape[-1]])
x_pad[:, :x.shape[1], :] = x
x = x_pad.to(x.device)
duration = self.duration_proj(nn.functional.dropout(x, 0.5, training=self.training))
en = (d.transpose(-1, -2) @ alignment)
return duration.squeeze(-1), en
def F0Ntrain(self, x, s):
x, _ = self.shared(x.transpose(-1, -2))
F0 = x.transpose(-1, -2)
for block in self.F0:
F0 = block(F0, s)
F0 = self.F0_proj(F0)
N = x.transpose(-1, -2)
for block in self.N:
N = block(N, s)
N = self.N_proj(N)
return F0.squeeze(1), N.squeeze(1)
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
class DurationEncoder(nn.Module):
def __init__(self, sty_dim, d_model, nlayers, dropout=0.1):
super().__init__()
self.lstms = nn.ModuleList()
for _ in range(nlayers):
self.lstms.append(nn.LSTM(d_model + sty_dim,
d_model // 2,
num_layers=1,
batch_first=True,
bidirectional=True,
dropout=dropout))
self.lstms.append(AdaLayerNorm(sty_dim, d_model))
self.dropout = dropout
self.d_model = d_model
self.sty_dim = sty_dim
def forward(self, x, style, text_lengths, m):
masks = m.to(text_lengths.device)
x = x.permute(2, 0, 1)
s = style.expand(x.shape[0], x.shape[1], -1)
x = torch.cat([x, s], axis=-1)
x.masked_fill_(masks.unsqueeze(-1).transpose(0, 1), 0.0)
x = x.transpose(0, 1)
input_lengths = text_lengths.cpu().numpy()
x = x.transpose(-1, -2)
for block in self.lstms:
if isinstance(block, AdaLayerNorm):
x = block(x.transpose(-1, -2), style).transpose(-1, -2)
x = torch.cat([x, s.permute(1, -1, 0)], axis=1)
x.masked_fill_(masks.unsqueeze(-1).transpose(-1, -2), 0.0)
else:
x = x.transpose(-1, -2)
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True, enforce_sorted=False)
block.flatten_parameters()
x, _ = block(x)
x, _ = nn.utils.rnn.pad_packed_sequence(
x, batch_first=True)
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(-1, -2)
x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]])
x_pad[:, :, :x.shape[-1]] = x
x = x_pad.to(x.device)
return x.transpose(-1, -2)
def inference(self, x, style):
x = self.embedding(x.transpose(-1, -2)) * math.sqrt(self.d_model)
style = style.expand(x.shape[0], x.shape[1], -1)
x = torch.cat([x, style], axis=-1)
src = self.pos_encoder(x)
output = self.transformer_encoder(src).transpose(0, 1)
return output
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
def load_F0_models(path):
# load F0 model
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load(path, map_location='cpu')['net']
F0_model.load_state_dict(params)
_ = F0_model.train()
return F0_model
def load_ASR_models(ASR_MODEL_PATH, ASR_MODEL_CONFIG):
# load ASR model
def _load_config(path):
with open(path) as f:
config = yaml.safe_load(f)
model_config = config['model_params']
return model_config
def _load_model(model_config, model_path):
model = ASRCNN(**model_config)
params = torch.load(model_path, map_location='cpu')['model']
model.load_state_dict(params)
return model
asr_model_config = _load_config(ASR_MODEL_CONFIG)
asr_model = _load_model(asr_model_config, ASR_MODEL_PATH)
_ = asr_model.train()
return asr_model
def build_model(args, text_aligner, pitch_extractor, bert):
assert args.decoder.type in ['istftnet', 'hifigan'], 'Decoder type unknown'
if args.decoder.type == "istftnet":
decoder = Decoder(dim_in=args.hidden_dim, style_dim=args.style_dim, dim_out=args.n_mels,
resblock_kernel_sizes = args.decoder.resblock_kernel_sizes,
upsample_rates = args.decoder.upsample_rates,
upsample_initial_channel=args.decoder.upsample_initial_channel,
resblock_dilation_sizes=args.decoder.resblock_dilation_sizes,
upsample_kernel_sizes=args.decoder.upsample_kernel_sizes,
gen_istft_n_fft=args.decoder.gen_istft_n_fft, gen_istft_hop_size=args.decoder.gen_istft_hop_size)
else:
decoder = Decoder(dim_in=args.hidden_dim, style_dim=args.style_dim, dim_out=args.n_mels,
resblock_kernel_sizes = args.decoder.resblock_kernel_sizes,
upsample_rates = args.decoder.upsample_rates,
upsample_initial_channel=args.decoder.upsample_initial_channel,
resblock_dilation_sizes=args.decoder.resblock_dilation_sizes,
upsample_kernel_sizes=args.decoder.upsample_kernel_sizes)
text_encoder = TextEncoder(channels=args.hidden_dim, kernel_size=5, depth=args.n_layer, n_symbols=args.n_token)
predictor = ProsodyPredictor(style_dim=args.style_dim, d_hid=args.hidden_dim, nlayers=args.n_layer, max_dur=args.max_dur, dropout=args.dropout)
style_encoder = StyleEncoder(dim_in=args.dim_in, style_dim=args.style_dim, max_conv_dim=args.hidden_dim) # acoustic style encoder
predictor_encoder = StyleEncoder(dim_in=args.dim_in, style_dim=args.style_dim, max_conv_dim=args.hidden_dim) # prosodic style encoder
# define diffusion model
if args.multispeaker:
transformer = StyleTransformer1d(channels=args.style_dim*2,
context_embedding_features=bert.config.hidden_size,
context_features=args.style_dim*2,
**args.diffusion.transformer)
else:
transformer = Transformer1d(channels=args.style_dim*2,
context_embedding_features=bert.config.hidden_size,
**args.diffusion.transformer)
diffusion = AudioDiffusionConditional(
in_channels=1,
embedding_max_length=bert.config.max_position_embeddings,
embedding_features=bert.config.hidden_size,
embedding_mask_proba=args.diffusion.embedding_mask_proba, # Conditional dropout of batch elements,
channels=args.style_dim*2,
context_features=args.style_dim*2,
)
diffusion.diffusion = KDiffusion(
net=diffusion.unet,
sigma_distribution=LogNormalDistribution(mean = args.diffusion.dist.mean, std = args.diffusion.dist.std),
sigma_data=args.diffusion.dist.sigma_data, # a placeholder, will be changed dynamically when start training diffusion model
dynamic_threshold=0.0
)
diffusion.diffusion.net = transformer
diffusion.unet = transformer
nets = Munch(
bert=bert,
bert_encoder=nn.Linear(bert.config.hidden_size, args.hidden_dim),
predictor=predictor,
decoder=decoder,
text_encoder=text_encoder,
predictor_encoder=predictor_encoder,
style_encoder=style_encoder,
diffusion=diffusion,
text_aligner = text_aligner,
pitch_extractor=pitch_extractor,
| mpd = MultiPeriodDiscriminator(), | 7 | 2023-12-15 10:04:21+00:00 | 12k |
alibaba/u2mot | yolox/models/yolox.py | [
{
"identifier": "YOLOXHead",
"path": "yolox/models/yolo_head.py",
"snippet": "class YOLOXHead(nn.Module):\n def __init__(\n self,\n num_classes,\n width=1.0,\n strides=[8, 16, 32],\n in_channels=[256, 512, 1024],\n act=\"silu\",\n depthwise=False,\n ):\n \"\"\"\n Args:\n act (str): activation type of conv. Defalut value: \"silu\".\n depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False.\n \"\"\"\n super().__init__()\n\n self.n_anchors = 1\n self.num_classes = num_classes\n self.decode_in_inference = True # for deploy, set to False\n\n self.cls_convs = nn.ModuleList() # cls conv layer\n self.reg_convs = nn.ModuleList() # reg conv layer\n self.cls_preds = nn.ModuleList() # cls pred layer\n self.reg_preds = nn.ModuleList() # reg pred layer\n self.obj_preds = nn.ModuleList() # obj pred layer\n self.stems = nn.ModuleList() # stems\n\n # TODO: reid head\n self.reid_convs = nn.ModuleList() # cls conv layer\n self.reid_preds = nn.ModuleList() # cls pred layer\n self.emb_dim = 128 # dimension of reid embedding\n # self.s_det = nn.Parameter(-1.85 * torch.ones(1)) # TODO: For Uncertainty loss\n # self.s_id = nn.Parameter(-1.05 * torch.ones(1)) # TODO: For Uncertainty loss\n # self.settings = {}\n self.s_bbox = 1.0\n self.s_reid = 0.5\n self.s_moco = 0.5\n\n Conv = DWConv if depthwise else BaseConv\n\n for i in range(len(in_channels)): # iteration over levels of output features\n self.stems.append( # 1 BaseConv layer\n BaseConv(\n in_channels=int(in_channels[i] * width),\n out_channels=int(256 * width),\n ksize=1,\n stride=1,\n act=act,\n )\n )\n self.cls_convs.append(\n nn.Sequential( # 2 BaseConv layers\n *[\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n ]\n )\n )\n self.reg_convs.append(\n nn.Sequential( # 2 BaseConv layers\n *[\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n Conv(\n in_channels=int(256 * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n ]\n )\n )\n self.cls_preds.append( # 1 Conv2d layer, output channel is 'self.n_anchors * self.num_classes'\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=self.n_anchors * self.num_classes,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n self.reg_preds.append( # 1 Conv2d layer, output channel is '4'\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=4,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n self.obj_preds.append( # 1 Conv2d layer, output channel is 'self.n_anchors * 1'\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=self.n_anchors * 1,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n )\n\n # TODO: reid head: reid_convs (2 * 3x3 Conv) + reid_preds (1 * 1x1 Conv)\n if i == 0:\n self.reid_convs.append(\n Conv(\n in_channels=int(in_channels[i] * width),\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n )\n )\n elif i == 1:\n self.reid_convs.append(\n Upsample(\n in_channels=int(in_channels[i] * width),\n out_channels=int(256 * width),\n gain=2,\n )\n )\n elif i == 2:\n self.reid_convs.append(\n nn.Sequential( # 2 Upsample layers\n *[\n Upsample(\n in_channels=int(in_channels[i] * width),\n out_channels=int(in_channels[i] * width / 2),\n gain=2,\n ),\n Upsample(\n in_channels=int(in_channels[i] * width / 2),\n out_channels=int(256 * width),\n gain=2,\n ),\n ]\n )\n )\n # self.reid_preds = nn.Conv2d(\n # in_channels=int(256 * width) * 3,\n # out_channels=self.emb_dim,\n # kernel_size=1,\n # stride=1,\n # padding=0,\n # )\n self.reid_preds = nn.Sequential( # 2 BaseConv layers\n *[\n Conv(\n in_channels=int(256 * width) * 3,\n out_channels=int(256 * width),\n ksize=3,\n stride=1,\n act=act,\n ),\n nn.Conv2d(\n in_channels=int(256 * width),\n out_channels=self.emb_dim,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n ]\n )\n\n self.use_l1 = False\n self.l1_loss = nn.L1Loss(reduction=\"none\")\n self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction=\"none\")\n self.iou_loss = IOUloss(reduction=\"none\")\n self.strides = strides\n self.grids = [torch.zeros(1)] * len(in_channels)\n self.expanded_strides = [None] * len(in_channels)\n\n def initialize_biases(self, prior_prob):\n for conv in self.cls_preds:\n b = conv.bias.view(self.n_anchors, -1)\n b.data.fill_(-math.log((1 - prior_prob) / prior_prob))\n conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n for conv in self.obj_preds:\n b = conv.bias.view(self.n_anchors, -1)\n b.data.fill_(-math.log((1 - prior_prob) / prior_prob))\n conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def forward(self, xin, labels=None, imgs=None, context=None, freeze_detector=False): # xin: 256/8, 512/16, 1024/32\n outputs = []\n origin_preds = []\n x_shifts = []\n y_shifts = []\n expanded_strides = []\n reid_x = []\n '''iteration over levels of output feature map: 256/8, 512/16, 1024/32'''\n for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(\n zip(self.cls_convs, self.reg_convs, self.strides, xin)\n ):\n reid_x.append(self.reid_convs[k](x)) # TODO: reid branch\n\n ctx = torch.no_grad() if freeze_detector else contextlib.nullcontext()\n with ctx:\n x = self.stems[k](x)\n cls_x = x\n reg_x = x\n\n cls_feat = cls_conv(cls_x) # cls_conv: 256/8, 512/16, 1024/32\n cls_output = self.cls_preds[k](cls_feat) # cls_preds: [batchsize, clsss_num, H/8, W/8]/8, /16, /32\n\n reg_feat = reg_conv(reg_x) # reg_conv: 256/8, 512/16, 1024/32\n reg_output = self.reg_preds[k](reg_feat) # reg_preds: [batchsize, 4, W/8, H/8]/8, /16, /32\n obj_output = self.obj_preds[k](reg_feat) # obj_preds: [batchsize, 1, W/8, H/8]/8, /16, /32\n\n if self.training and context is not None:\n output = torch.cat([reg_output, obj_output, cls_output], 1) # cat order: reg(4), obj(1), cls(1)\n output, grid = self.get_output_and_grid( # [1, 6, H/s, W/s] ==> [1, 6, H/s, W/s]\n output, k, stride_this_level, xin[0].type()\n )\n x_shifts.append(grid[:, :, 0]) # list, H/s loops of range(W/s)\n y_shifts.append(grid[:, :, 1]) # list, W/s loops of range(H/s)\n expanded_strides.append(\n torch.zeros(1, grid.shape[1])\n .fill_(stride_this_level)\n .type_as(xin[0])\n )\n if self.use_l1:\n batch_size = reg_output.shape[0]\n hsize, wsize = reg_output.shape[-2:]\n reg_output = reg_output.view(\n batch_size, self.n_anchors, 4, hsize, wsize\n )\n reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape(\n batch_size, -1, 4\n )\n origin_preds.append(reg_output.clone())\n\n else:\n output = torch.cat(\n [reg_output, obj_output.sigmoid(), cls_output.sigmoid()], 1 # obj_output, cls_output use sigmoid\n )\n\n outputs.append(output)\n \n reid_feat = self.reid_preds(torch.cat(reid_x, dim=1))\n reid_feat = F.normalize(reid_feat, dim=1).permute(0, 2, 3, 1) # reid_preds: [batchsize, H/8, W/8, 128]\n\n if self.training and context is not None:\n moco, backbone, infos, x_aug, x_prev, targets_aug, targets_prev = context\n moco._momentum_update_key_encoder(backbone, self)\n reid_feat_aug, reid_feat_prev = [moco.extract_feat(inp) for inp in [x_aug, x_prev]]\n context = [moco, infos, reid_feat, reid_feat_aug, reid_feat_prev, labels.clone(), targets_aug, targets_prev]\n\n return self.get_losses(\n imgs,\n x_shifts,\n y_shifts,\n expanded_strides,\n labels,\n torch.cat(outputs, 1),\n origin_preds,\n dtype=xin[0].dtype,\n context=context\n )\n else:\n self.hw = [x.shape[-2:] for x in outputs]\n # [batch, n_anchors_all, 85]\n outputs = torch.cat(\n [x.flatten(start_dim=2) for x in outputs], dim=2\n ).permute(0, 2, 1)\n if self.decode_in_inference:\n return self.decode_outputs(outputs, dtype=xin[0].type()), reid_feat\n else:\n return outputs, reid_feat\n\n def get_output_and_grid(self, output, k, stride, dtype):\n grid = self.grids[k]\n\n batch_size = output.shape[0]\n n_ch = 5 + self.num_classes # number of channel, reg(4) + obj(1) + cls\n hsize, wsize = output.shape[-2:]\n if grid.shape[2:4] != output.shape[2:4]:\n yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])\n grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize, 2).type(dtype) # [1, 1, H/s, W/s, 2]\n self.grids[k] = grid\n\n output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize) # [batchsize, 5+n_cls, H/s, W/s] --> [batchsize, n_anchors, 5+n_cls, H/s, W/s]\n output = output.permute(0, 1, 3, 4, 2).reshape( # [batchsize, n_anchors, 5+n_cls, H/s, W/s] --> [bs, n_anchors * H/s* W/s, 5+n_cls]\n batch_size, self.n_anchors * hsize * wsize, -1\n )\n grid = grid.view(1, -1, 2) # [1, 1, H/s, W/s, 2] --> [1, H/s * W/s, 2]\n output[..., :2] = (output[..., :2] + grid) * stride # offset w.r.t top-left corner coordinate of grid cell\n output[..., 2:4] = torch.exp(output[..., 2:4]) * stride # w, h\n return output, grid\n\n def decode_outputs(self, outputs, dtype):\n grids = []\n strides = []\n for (hsize, wsize), stride in zip(self.hw, self.strides):\n yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])\n grid = torch.stack((xv, yv), 2).view(1, -1, 2)\n grids.append(grid)\n shape = grid.shape[:2]\n strides.append(torch.full((*shape, 1), stride))\n\n grids = torch.cat(grids, dim=1).type(dtype)\n strides = torch.cat(strides, dim=1).type(dtype)\n\n outputs[..., :2] = (outputs[..., :2] + grids) * strides\n outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides\n return outputs\n\n def get_losses(\n self,\n imgs,\n x_shifts,\n y_shifts,\n expanded_strides,\n labels,\n outputs,\n origin_preds,\n dtype,\n context\n ):\n # get preds from outputs\n bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]\n obj_preds = outputs[:, :, 4].unsqueeze(-1) # [batch, n_anchors_all, 1]\n cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]\n\n # calculate targets\n mixup = labels.shape[2] > 5\n if mixup:\n label_cut = labels[..., :5]\n else:\n label_cut = labels\n nlabel = (label_cut.sum(dim=2) > 0).sum(dim=1) # number of objects\n\n total_num_anchors = outputs.shape[1]\n x_shifts = torch.cat(x_shifts, 1) # [1, n_anchors_all]\n y_shifts = torch.cat(y_shifts, 1) # [1, n_anchors_all]\n expanded_strides = torch.cat(expanded_strides, 1) # [1, n_anchors_all]\n if self.use_l1:\n origin_preds = torch.cat(origin_preds, 1)\n\n cls_targets = []\n reg_targets = []\n l1_targets = []\n obj_targets = []\n fg_masks = []\n\n num_fg = 0.0\n num_gts = 0.0\n\n for batch_idx in range(outputs.shape[0]):\n num_gt = int(nlabel[batch_idx])\n num_gts += num_gt\n if num_gt == 0:\n cls_target = outputs.new_zeros((0, self.num_classes)) # [matched_anchor, class_number]\n reg_target = outputs.new_zeros((0, 4)) # [matched_anchor, 4]\n l1_target = outputs.new_zeros((0, 4)) # [n_anchors, 1]\n obj_target = outputs.new_zeros((total_num_anchors, 1))\n fg_mask = outputs.new_zeros(total_num_anchors).bool()\n else:\n # get target for loss, 'per_image' is used for assignment\n gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5] # [matched_anchor, 4]\n gt_classes = labels[batch_idx, :num_gt, 0] # [matched_anchor]\n\n bboxes_preds_per_image = bbox_preds[batch_idx] # [n_anchors_all, 4]\n\n # assignment between gts and anchors (e.g. positive anchors to optimize)\n try:\n (\n gt_matched_classes, # [matched_anchor], class of matched anchors\n fg_mask, # [n_anchors], .sum()=matched_anchor, to mask out unmatched anchors\n pred_ious_this_matching, # [matched_anchor], IoU of matched anchors\n matched_gt_inds, # [matched_anchor], index of gts for each matched anchor\n num_fg_img, # [1], matched_anchor\n ) = self.get_assignments( # noqa\n batch_idx,\n num_gt,\n total_num_anchors,\n gt_bboxes_per_image,\n gt_classes,\n bboxes_preds_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n cls_preds,\n bbox_preds,\n obj_preds,\n labels,\n imgs,\n )\n except RuntimeError:\n logger.info(\n \"OOM RuntimeError is raised due to the huge memory cost during label assignment. \\\n CPU mode is applied in this batch. If you want to avoid this issue, \\\n try to reduce the batch size or image size.\"\n )\n print(\"OOM RuntimeError is raised due to the huge memory cost during label assignment. \\\n CPU mode is applied in this batch. If you want to avoid this issue, \\\n try to reduce the batch size or image size.\")\n torch.cuda.empty_cache()\n (\n gt_matched_classes, # [matched_anchor], class of matched anchors\n fg_mask, # [n_anchors], .sum()=matched_anchor, to mask out unmatched anchors\n pred_ious_this_matching, # [matched_anchor], IoU of matched anchors\n matched_gt_inds, # [matched_anchor], index of gts for each matched anchor\n num_fg_img, # [1], matched_anchor\n ) = self.get_assignments(\n batch_idx,\n num_gt,\n total_num_anchors,\n gt_bboxes_per_image,\n gt_classes,\n bboxes_preds_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n cls_preds,\n bbox_preds,\n obj_preds,\n labels,\n imgs,\n \"cpu\",\n )\n \n \n torch.cuda.empty_cache()\n num_fg += num_fg_img\n\n # get target for optimization. Because of multiple optisive strategy, each gt has multiple anchors to optimize\n # so the number of targets is matched_anchor, other than number of gt\n cls_target = F.one_hot( # https://github.com/Megvii-BaseDetection/YOLOX/issues/949\n gt_matched_classes.to(torch.int64), self.num_classes\n ) * pred_ious_this_matching.unsqueeze(-1) # [matched_anchor, class_number]\n # We would like to encode the iou information into the target, to relieve the misalignment\n # between the classification and regression prediction.\n obj_target = fg_mask.unsqueeze(-1) # [n_anchors] --> [n_anchors, 1]\n reg_target = gt_bboxes_per_image[matched_gt_inds] # [matched_anchor, 4]\n\n if self.use_l1:\n l1_target = self.get_l1_target(\n outputs.new_zeros((num_fg_img, 4)),\n gt_bboxes_per_image[matched_gt_inds],\n expanded_strides[0][fg_mask],\n x_shifts=x_shifts[0][fg_mask],\n y_shifts=y_shifts[0][fg_mask],\n )\n\n cls_targets.append(cls_target) # cls target\n reg_targets.append(reg_target) # reg target\n obj_targets.append(obj_target.to(dtype)) # obj target\n fg_masks.append(fg_mask) # fg_mask\n if self.use_l1:\n l1_targets.append(l1_target)\n\n cls_targets = torch.cat(cls_targets, 0) # [matched_anchor, 1]\n reg_targets = torch.cat(reg_targets, 0) # [matched_anchor, 4]\n obj_targets = torch.cat(obj_targets, 0) # [all_anchor, 1]\n fg_masks = torch.cat(fg_masks, 0) # [all_anchor]\n if self.use_l1:\n l1_targets = torch.cat(l1_targets, 0)\n\n # compute loss\n num_fg = max(num_fg, 1)\n loss_iou = (\n self.iou_loss(bbox_preds.view(-1, 4)[fg_masks], reg_targets) # [matched_anchor, 4]\n ).sum() / num_fg\n loss_obj = (\n self.bcewithlog_loss(obj_preds.view(-1, 1), obj_targets) # [all_anchor, 1]\n ).sum() / num_fg\n loss_cls = (\n self.bcewithlog_loss(\n cls_preds.view(-1, self.num_classes)[fg_masks], cls_targets # [matched_anchor, 1]\n )\n ).sum() / num_fg\n # TODO: ReID. compute id loss\n loss_id, loss_id_aux = context[0].compute_loss(context[1:])\n\n if self.use_l1:\n loss_l1 = (\n self.l1_loss(origin_preds.view(-1, 4)[fg_masks], l1_targets)\n ).sum() / num_fg\n else:\n loss_l1 = 0.0\n\n reg_weight = 5.0\n\n # loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1 # TODO: original loss (only detection)\n\n det_loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1\n # id_loss = loss_id\n # TODO: ReID. Uncertainty Loss\n # print(\"self.s_det:\", self.s_det, \"self.s_id:\", self.s_id) # for debug (0114)\n # loss = torch.exp(-self.s_det) * det_loss + torch.exp(-self.s_id) * id_loss + (self.s_det + self.s_id)\n # loss *= 0.5\n # self.settings.update({'s_det': self.s_det, 's_id': self.s_id})\n id_loss = loss_id * self.s_moco + loss_id_aux * (1. - self.s_moco)\n loss = self.s_bbox * det_loss + self.s_reid * id_loss\n\n\n return (\n loss,\n reg_weight * loss_iou,\n loss_obj,\n loss_cls,\n loss_id, # TODO: ReID. return id loss\n loss_id_aux, # TODO: ReID. return id loss\n loss_l1,\n num_fg / max(num_gts, 1),\n # self.settings\n )\n\n def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps=1e-8):\n l1_target[:, 0] = gt[:, 0] / stride - x_shifts\n l1_target[:, 1] = gt[:, 1] / stride - y_shifts\n l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)\n l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)\n return l1_target\n\n @torch.no_grad()\n def get_assignments(\n self,\n batch_idx,\n num_gt,\n total_num_anchors,\n gt_bboxes_per_image,\n gt_classes,\n bboxes_preds_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n cls_preds,\n bbox_preds,\n obj_preds,\n labels,\n imgs,\n mode=\"gpu\",\n ):\n\n if mode == \"cpu\":\n print(\"------------CPU Mode for This Batch-------------\")\n gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()\n bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()\n gt_classes = gt_classes.cpu().float()\n expanded_strides = expanded_strides.cpu().float()\n x_shifts = x_shifts.cpu()\n y_shifts = y_shifts.cpu()\n\n img_size = imgs.shape[2:]\n # fg_mask: [all_anchors]\n # is_in_boxes_and_center: [gt_num, matched_anchors]\n fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(\n gt_bboxes_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n total_num_anchors,\n num_gt,\n img_size\n )\n\n bboxes_preds_per_image = bboxes_preds_per_image[fg_mask] # [matched_anchor, 4]\n cls_preds_ = cls_preds[batch_idx][fg_mask] # [matched_anchor, 1]\n obj_preds_ = obj_preds[batch_idx][fg_mask] # [matched_anchor, 1]\n num_in_boxes_anchor = bboxes_preds_per_image.shape[0] # [1], matched_anchor\n\n if mode == \"cpu\":\n gt_bboxes_per_image = gt_bboxes_per_image.cpu()\n bboxes_preds_per_image = bboxes_preds_per_image.cpu()\n\n pair_wise_ious = bboxes_iou(gt_bboxes_per_image, bboxes_preds_per_image, False) # [gt_num, matched_anchor]\n\n gt_cls_per_image = ( # [gt_num, matched_anchor, class_num]\n F.one_hot(gt_classes.to(torch.int64), self.num_classes)\n .float()\n .unsqueeze(1)\n .repeat(1, num_in_boxes_anchor, 1)\n )\n pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8) # [gt_num, matched_anchor]\n\n if mode == \"cpu\":\n cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()\n\n with torch.cuda.amp.autocast(enabled=False):\n cls_preds_ = ( # [gt_num, matched_anchor, 1]\n cls_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() # [gt_num, matched_anchor, 1]\n * obj_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() # [gt_num, matched_anchor, 1]\n )\n pair_wise_cls_loss = F.binary_cross_entropy( # [gt_num, matched_anchor]\n cls_preds_.sqrt_(), gt_cls_per_image, reduction=\"none\"\n ).sum(-1)\n del cls_preds_\n\n cost = (\n pair_wise_cls_loss\n + 3.0 * pair_wise_ious_loss\n + 100000.0 * (~is_in_boxes_and_center)\n )\n\n (\n num_fg,\n gt_matched_classes, # [k_anchors]\n pred_ious_this_matching, # [k_anchors]\n matched_gt_inds, # [k_anchors]\n ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt, fg_mask) # assignment strategy 3\n del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss\n\n if mode == \"cpu\":\n gt_matched_classes = gt_matched_classes.cuda()\n fg_mask = fg_mask.cuda()\n pred_ious_this_matching = pred_ious_this_matching.cuda()\n matched_gt_inds = matched_gt_inds.cuda()\n\n return (\n gt_matched_classes, # [k_anchors]\n fg_mask, # [all_anchors]\n pred_ious_this_matching, # [k_anchors]\n matched_gt_inds, # [k_anchors]\n num_fg, # k_anchors\n )\n\n def get_in_boxes_info(\n self,\n gt_bboxes_per_image,\n expanded_strides,\n x_shifts,\n y_shifts,\n total_num_anchors,\n num_gt,\n img_size\n ):\n \"\"\"assignment strategy 1: anchors whose center is inside corresponding gt_bbox\"\"\"\n expanded_strides_per_image = expanded_strides[0] # [n_anchors_all]\n x_shifts_per_image = x_shifts[0] * expanded_strides_per_image # shift on image\n y_shifts_per_image = y_shifts[0] * expanded_strides_per_image # shift on image\n x_centers_per_image = (\n (x_shifts_per_image + 0.5 * expanded_strides_per_image)\n .unsqueeze(0)\n .repeat(num_gt, 1)\n ) # [n_anchor] -> [n_gt, n_anchor]\n y_centers_per_image = (\n (y_shifts_per_image + 0.5 * expanded_strides_per_image)\n .unsqueeze(0)\n .repeat(num_gt, 1)\n ) # [n_anchor] -> [n_gt, n_anchor]\n # gt_bboxes to tlbr\n gt_bboxes_per_image_l = (\n (gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2])\n .unsqueeze(1)\n .repeat(1, total_num_anchors)\n ) # [n_gt, n_anchor]\n gt_bboxes_per_image_r = (\n (gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2])\n .unsqueeze(1)\n .repeat(1, total_num_anchors)\n ) # [n_gt, n_anchor]\n gt_bboxes_per_image_t = (\n (gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3])\n .unsqueeze(1)\n .repeat(1, total_num_anchors)\n ) # [n_gt, n_anchor]\n gt_bboxes_per_image_b = (\n (gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3])\n .unsqueeze(1)\n .repeat(1, total_num_anchors)\n ) # [n_gt, n_anchor]\n\n b_l = x_centers_per_image - gt_bboxes_per_image_l\n b_r = gt_bboxes_per_image_r - x_centers_per_image\n b_t = y_centers_per_image - gt_bboxes_per_image_t\n b_b = gt_bboxes_per_image_b - y_centers_per_image\n bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)\n\n is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0\n is_in_boxes_all = is_in_boxes.sum(dim=0) > 0\n # in fixed center\n \"\"\"assignment strategy 2: anchors whose center is inside the 5^2 area centered at gt_bbox center\"\"\"\n center_radius = 2.5\n # clip center inside image\n gt_bboxes_per_image_clip = gt_bboxes_per_image[:, 0:2].clone()\n gt_bboxes_per_image_clip[:, 0] = torch.clamp(gt_bboxes_per_image_clip[:, 0], min=0, max=img_size[1])\n gt_bboxes_per_image_clip[:, 1] = torch.clamp(gt_bboxes_per_image_clip[:, 1], min=0, max=img_size[0])\n # tlbr of gt_bboxes\n gt_bboxes_per_image_l = (gt_bboxes_per_image_clip[:, 0]).unsqueeze(1).repeat(\n 1, total_num_anchors\n ) - center_radius * expanded_strides_per_image.unsqueeze(0)\n gt_bboxes_per_image_r = (gt_bboxes_per_image_clip[:, 0]).unsqueeze(1).repeat(\n 1, total_num_anchors\n ) + center_radius * expanded_strides_per_image.unsqueeze(0)\n gt_bboxes_per_image_t = (gt_bboxes_per_image_clip[:, 1]).unsqueeze(1).repeat(\n 1, total_num_anchors\n ) - center_radius * expanded_strides_per_image.unsqueeze(0)\n gt_bboxes_per_image_b = (gt_bboxes_per_image_clip[:, 1]).unsqueeze(1).repeat(\n 1, total_num_anchors\n ) + center_radius * expanded_strides_per_image.unsqueeze(0)\n\n c_l = x_centers_per_image - gt_bboxes_per_image_l\n c_r = gt_bboxes_per_image_r - x_centers_per_image\n c_t = y_centers_per_image - gt_bboxes_per_image_t\n c_b = gt_bboxes_per_image_b - y_centers_per_image\n center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)\n is_in_centers = center_deltas.min(dim=-1).values > 0.0\n is_in_centers_all = is_in_centers.sum(dim=0) > 0\n\n # in boxes and in centers (combine 2 assignment strategy)\n is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all # [n_anchor]\n\n is_in_boxes_and_center = (\n is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor] # [n_labels, n_anchor]\n )\n del gt_bboxes_per_image_clip\n return is_in_boxes_anchor, is_in_boxes_and_center\n\n def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt, fg_mask):\n # strategy 3: Dynamic K, simplified SimOTA\n # ---------------------------------------------------------------\n matching_matrix = torch.zeros_like(cost)\n\n ious_in_boxes_matrix = pair_wise_ious\n n_candidate_k = min(10, ious_in_boxes_matrix.size(1))\n topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)\n dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)\n for gt_idx in range(num_gt):\n _, pos_idx = torch.topk(\n cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False\n )\n matching_matrix[gt_idx][pos_idx] = 1.0\n\n del topk_ious, dynamic_ks, pos_idx\n\n anchor_matching_gt = matching_matrix.sum(0)\n if (anchor_matching_gt > 1).sum() > 0:\n cost_min, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)\n matching_matrix[:, anchor_matching_gt > 1] *= 0.0\n matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0\n fg_mask_inboxes = matching_matrix.sum(0) > 0.0\n num_fg = fg_mask_inboxes.sum().item()\n\n fg_mask[fg_mask.clone()] = fg_mask_inboxes\n\n matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)\n gt_matched_classes = gt_classes[matched_gt_inds] # gt classes for matched anchors\n pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[\n fg_mask_inboxes\n ]\n return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds"
},
{
"identifier": "YOLOPAFPN",
"path": "yolox/models/yolo_pafpn.py",
"snippet": "class YOLOPAFPN(nn.Module):\n \"\"\"\n YOLOv3 model. Darknet 53 is the default backbone of this model.\n \"\"\"\n\n def __init__(\n self,\n depth=1.0,\n width=1.0,\n in_features=(\"dark3\", \"dark4\", \"dark5\"),\n in_channels=[256, 512, 1024],\n depthwise=False,\n act=\"silu\",\n ):\n super().__init__()\n self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)\n self.in_features = in_features # (\"dark3\", \"dark4\", \"dark5\")\n self.in_channels = in_channels # [256, 512, 1024]\n Conv = DWConv if depthwise else BaseConv\n '''\n BaseConv: A Conv2d -> Batchnorm -> silu/leaky relu block\n in_channels, out_channels, ksize, stride, groups=1, bias=False, act=\"silu\"\n '''\n '''\n DWConv: Depthwise Conv (with BN and activation) + Pointwise Conv (with BN and activation)\n in_channels, out_channels, ksize, stride=1, act=\"silu\"\n '''\n\n self.upsample = nn.Upsample(scale_factor=2, mode=\"nearest\")\n self.lateral_conv0 = BaseConv(\n int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act\n )\n self.C3_p4 = CSPLayer(\n int(2 * in_channels[1] * width),\n int(in_channels[1] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n ) # cat\n\n self.reduce_conv1 = BaseConv(\n int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act\n )\n self.C3_p3 = CSPLayer(\n int(2 * in_channels[0] * width),\n int(in_channels[0] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n # bottom-up conv\n self.bu_conv2 = Conv(\n int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act\n )\n self.C3_n3 = CSPLayer(\n int(2 * in_channels[0] * width),\n int(in_channels[1] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n # bottom-up conv\n self.bu_conv1 = Conv(\n int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act\n )\n self.C3_n4 = CSPLayer(\n int(2 * in_channels[1] * width),\n int(in_channels[2] * width),\n round(3 * depth),\n False,\n depthwise=depthwise,\n act=act,\n )\n\n def forward(self, input):\n \"\"\"\n Args:\n inputs: input images.\n\n Returns:\n Tuple[Tensor]: FPN feature.\n \"\"\"\n\n # backbone\n out_features = self.backbone(input) # input: [1, 3, 64, 64]\n features = [out_features[f] for f in self.in_features] # 'dark3', 'dark4', 'dark5'\n [x2, x1, x0] = features # x2: [1, 320, 8, 8] / 8, x1: [1, 640, 4, 4] / 16, x0: [1, 1280, 2, 2] / 32\n\n fpn_out0 = self.lateral_conv0(x0) # 1024->512/32\n f_out0 = self.upsample(fpn_out0) # 512/16\n f_out0 = torch.cat([f_out0, x1], 1) # 512->1024/16\n f_out0 = self.C3_p4(f_out0) # 1024->512/16\n\n fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16\n f_out1 = self.upsample(fpn_out1) # 256/8\n f_out1 = torch.cat([f_out1, x2], 1) # 256->512/8\n pan_out2 = self.C3_p3(f_out1) # 512->256/8\n\n p_out1 = self.bu_conv2(pan_out2) # 256->256/16\n p_out1 = torch.cat([p_out1, fpn_out1], 1) # 256->512/16\n pan_out1 = self.C3_n3(p_out1) # 512->512/16\n\n p_out0 = self.bu_conv1(pan_out1) # 512->512/32\n p_out0 = torch.cat([p_out0, fpn_out0], 1) # 512->1024/32\n pan_out0 = self.C3_n4(p_out0) # 1024->1024/32\n\n outputs = (pan_out2, pan_out1, pan_out0) # 256/8, 512/16, 1024/32\n return outputs"
}
] | import torch
import torch.nn as nn
import contextlib
from .yolo_head import YOLOXHead
from .yolo_pafpn import YOLOPAFPN | 10,305 | #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
# Copyright (c) Alibaba, Inc. and its affiliates.
class YOLOX(nn.Module):
"""
YOLOX model module. The module list is defined by create_yolov3_modules function.
The network returns loss values from three YOLO layers during training
and detection results during test.
"""
def __init__(self, backbone=None, head=None, moco=None, freeze=False):
super().__init__()
if backbone is None:
| #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
# Copyright (c) Alibaba, Inc. and its affiliates.
class YOLOX(nn.Module):
"""
YOLOX model module. The module list is defined by create_yolov3_modules function.
The network returns loss values from three YOLO layers during training
and detection results during test.
"""
def __init__(self, backbone=None, head=None, moco=None, freeze=False):
super().__init__()
if backbone is None: | backbone = YOLOPAFPN() # backbone, CSPNet with PANet | 1 | 2023-12-18 10:04:40+00:00 | 12k |
liuhuang31/HiFTNet-sr | train.py | [
{
"identifier": "AttrDict",
"path": "env.py",
"snippet": "class AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self"
},
{
"identifier": "build_env",
"path": "env.py",
"snippet": "def build_env(config, config_name, path):\n t_path = os.path.join(path, config_name)\n if config != t_path:\n os.makedirs(path, exist_ok=True)\n shutil.copyfile(config, os.path.join(path, config_name))"
},
{
"identifier": "MelDataset",
"path": "meldataset.py",
"snippet": "class MelDataset(torch.utils.data.Dataset):\n def __init__(self, training_files, segment_size, n_fft, num_mels,\n hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,\n device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):\n self.audio_files = training_files\n random.seed(1234)\n if shuffle:\n random.shuffle(self.audio_files)\n self.segment_size = segment_size\n self.sampling_rate = sampling_rate\n self.split = split\n self.n_fft = n_fft\n self.num_mels = num_mels\n self.hop_size = hop_size\n self.win_size = win_size\n self.fmin = fmin\n self.fmax = fmax\n self.fmax_loss = fmax_loss\n self.cached_wav = None\n self.n_cache_reuse = n_cache_reuse\n self._cache_ref_count = 0\n self.device = device\n self.fine_tuning = fine_tuning\n self.base_mels_path = base_mels_path\n\n def __getitem__(self, index):\n filename = self.audio_files[index]\n if self._cache_ref_count == 0:\n audio, sampling_rate = load_wav(filename, self.sampling_rate)\n # audio = audio / MAX_WAV_VALUE\n if not self.fine_tuning:\n audio = normalize(audio) * 0.95\n self.cached_wav = audio\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n self._cache_ref_count = self.n_cache_reuse\n else:\n audio = self.cached_wav\n self._cache_ref_count -= 1\n\n audio = torch.FloatTensor(audio)\n audio = audio.unsqueeze(0)\n\n if not self.fine_tuning:\n if self.split:\n if audio.size(1) >= self.segment_size:\n max_audio_start = audio.size(1) - self.segment_size\n audio_start = random.randint(0, max_audio_start)\n audio = audio[:, audio_start:audio_start+self.segment_size]\n else:\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,\n center=False, training=True)\n else:\n mel = np.load(\n os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))\n mel = torch.from_numpy(mel)\n\n if len(mel.shape) < 3:\n mel = mel.unsqueeze(0)\n\n if self.split:\n frames_per_seg = math.ceil(self.segment_size / self.hop_size)\n\n if audio.size(1) >= self.segment_size:\n mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)\n mel = mel[:, :, mel_start:mel_start + frames_per_seg]\n audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]\n else:\n mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,\n center=False)\n\n return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())\n\n def __len__(self):\n return len(self.audio_files)"
},
{
"identifier": "mel_spectrogram",
"path": "meldataset.py",
"snippet": "def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False, training=False):\n # if torch.min(y) < -1.:\n # print('min value is ', torch.min(y))\n # if torch.max(y) > 1.:\n # print('max value is ', torch.max(y))\n if training:\n with torch.no_grad():\n # 16k to 24k/48k\n if fmax <= 8000 and (sampling_rate == 24000 or sampling_rate == 48000):\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 16000)\n y = librosa.resample(y, 16000, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n # 24k to 48k\n elif fmax <= 12000 and sampling_rate == 48000:\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n else:\n pass\n\n global mel_basis, hann_window\n if fmax not in mel_basis:\n mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)\n mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)\n hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n # complex tensor as default, then use view_as_real for future pytorch compatibility\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],\n center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)\n spec = torch.view_as_real(spec)\n spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))\n\n spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "get_dataset_filelist",
"path": "meldataset.py",
"snippet": "def get_dataset_filelist(a):\n training_files =[]\n validation_files =[]\n total_files = 0\n input_wave_dirs = a.input_wavs_dir.split(\",\")\n\n for wave_dir in input_wave_dirs:\n num_validation_files = 3\n files_under_path = 0\n allfiles = find_all_wav_path(wave_dir)\n for input_file_name in allfiles:\n if not os.path.splitext(input_file_name)[-1] == '.wav':\n continue\n files_under_path +=1\n full_file_path = input_file_name\n if num_validation_files <=0:\n training_files.append(full_file_path)\n else:\n validation_files.append(full_file_path)\n num_validation_files -=1\n if files_under_path == 0:\n raise Exception(\"no wave file found!\")\n total_files +=files_under_path\n print(f'total files:{total_files}')\n \n return training_files, validation_files"
},
{
"identifier": "Generator",
"path": "models.py",
"snippet": "class Generator(torch.nn.Module):\n def __init__(self, h, F0_model):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))\n resblock = ResBlock1 if h.resblock == '1' else ResBlock2\n\n self.m_source = SourceModuleHnNSF(\n sampling_rate=h.sampling_rate,\n upsample_scale=np.prod(h.upsample_rates) * h.gen_istft_hop_size,\n harmonic_num=8, voiced_threshod=10)\n self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h.upsample_rates) * h.gen_istft_hop_size)\n self.noise_convs = nn.ModuleList()\n self.noise_res = nn.ModuleList()\n \n self.F0_model = F0_model\n \n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n self.ups.append(weight_norm(\n ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),\n k, u, padding=(k-u)//2)))\n\n c_cur = h.upsample_initial_channel // (2 ** (i + 1))\n \n if i + 1 < len(h.upsample_rates): #\n stride_f0 = np.prod(h.upsample_rates[i + 1:])\n self.noise_convs.append(Conv1d(\n h.gen_istft_n_fft + 2, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2))\n self.noise_res.append(resblock(h, c_cur, 7, [1,3,5]))\n else:\n self.noise_convs.append(Conv1d(h.gen_istft_n_fft + 2, c_cur, kernel_size=1))\n self.noise_res.append(resblock(h, c_cur, 11, [1,3,5]))\n \n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = h.upsample_initial_channel//(2**(i+1))\n for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.post_n_fft = h.gen_istft_n_fft\n self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))\n self.stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft)\n\n def forward(self, x):\n f0, _, _ = self.F0_model(x.unsqueeze(1))\n if len(f0.shape) == 1:\n f0 = f0.unsqueeze(0)\n \n f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t\n\n har_source, _, _ = self.m_source(f0)\n har_source = har_source.transpose(1, 2).squeeze(1)\n har_spec, har_phase = self.stft.transform(har_source)\n har = torch.cat([har_spec, har_phase], dim=1)\n \n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x_source = self.noise_convs[i](har)\n x_source = self.noise_res[i](x_source)\n \n x = self.ups[i](x)\n if i == self.num_upsamples - 1:\n x = self.reflection_pad(x)\n \n x = x + x_source\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i*self.num_kernels+j](x)\n else:\n xs += self.resblocks[i*self.num_kernels+j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :])\n phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :])\n\n return spec, phase\n\n def remove_weight_norm(self):\n print('Removing weight norm...')\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "models.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiPeriodDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n DiscriminatorP(2),\n DiscriminatorP(3),\n DiscriminatorP(5),\n DiscriminatorP(7),\n DiscriminatorP(11),\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "MultiResSpecDiscriminator",
"path": "models.py",
"snippet": "class MultiResSpecDiscriminator(torch.nn.Module):\n\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\"):\n\n super(MultiResSpecDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),\n SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),\n SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "feature_loss",
"path": "models.py",
"snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss*2"
},
{
"identifier": "generator_loss",
"path": "models.py",
"snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses"
},
{
"identifier": "discriminator_loss",
"path": "models.py",
"snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses"
},
{
"identifier": "discriminator_TPRLS_loss",
"path": "models.py",
"snippet": "def discriminator_TPRLS_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n tau = 0.04\n m_DG = torch.median((dr-dg))\n L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG])\n loss += tau - F.relu(tau - L_rel)\n return loss"
},
{
"identifier": "generator_TPRLS_loss",
"path": "models.py",
"snippet": "def generator_TPRLS_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n for dg, dr in zip(disc_real_outputs, disc_generated_outputs):\n tau = 0.04\n m_DG = torch.median((dr-dg))\n L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG])\n loss += tau - F.relu(tau - L_rel)\n return loss"
},
{
"identifier": "plot_spectrogram",
"path": "utils.py",
"snippet": "def plot_spectrogram(spectrogram):\n fig, ax = plt.subplots(figsize=(10, 2))\n im = ax.imshow(spectrogram, aspect=\"auto\", origin=\"lower\",\n interpolation='none')\n plt.colorbar(im, ax=ax)\n\n fig.canvas.draw()\n plt.close()\n\n return fig"
},
{
"identifier": "scan_checkpoint",
"path": "utils.py",
"snippet": "def scan_checkpoint(cp_dir, prefix):\n pattern = os.path.join(cp_dir, prefix + '????????')\n cp_list = glob.glob(pattern)\n if len(cp_list) == 0:\n return None\n return sorted(cp_list)[-1]"
},
{
"identifier": "load_checkpoint",
"path": "utils.py",
"snippet": "def load_checkpoint(filepath, device):\n assert os.path.isfile(filepath)\n print(\"Loading '{}'\".format(filepath))\n checkpoint_dict = torch.load(filepath, map_location=device)\n print(\"Complete.\")\n return checkpoint_dict"
},
{
"identifier": "save_checkpoint",
"path": "utils.py",
"snippet": "def save_checkpoint(filepath, obj):\n print(\"Saving checkpoint to {}\".format(filepath))\n torch.save(obj, filepath)\n print(\"Complete.\")"
},
{
"identifier": "TorchSTFT",
"path": "stft.py",
"snippet": "class TorchSTFT(torch.nn.Module):\n def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'):\n super().__init__()\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.win_length = win_length\n self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32))\n\n def transform(self, input_data):\n forward_transform = torch.stft(\n input_data,\n self.filter_length, self.hop_length, self.win_length, window=self.window.to(input_data.device),\n return_complex=True)\n\n return torch.abs(forward_transform), torch.angle(forward_transform)\n\n def inverse(self, magnitude, phase):\n inverse_transform = torch.istft(\n magnitude * torch.exp(phase * 1j),\n self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device))\n\n return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation\n\n def forward(self, input_data):\n self.magnitude, self.phase = self.transform(input_data)\n reconstruction = self.inverse(self.magnitude, self.phase)\n return reconstruction"
},
{
"identifier": "JDCNet",
"path": "Utils/JDC/model.py",
"snippet": "class JDCNet(nn.Module):\n \"\"\"\n Joint Detection and Classification Network model for singing voice melody.\n \"\"\"\n def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):\n super().__init__()\n self.num_class = num_class\n\n # input = (b, 1, 31, 513), b = batch size\n self.conv_block = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)\n nn.BatchNorm2d(num_features=64),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)\n )\n\n # res blocks\n self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)\n self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)\n self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)\n\n # pool block\n self.pool_block = nn.Sequential(\n nn.BatchNorm2d(num_features=256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)\n nn.Dropout(p=0.2),\n )\n\n # maxpool layers (for auxiliary network inputs)\n # in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)\n self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))\n # in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)\n self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))\n # in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)\n self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))\n\n # in = (b, 640, 31, 2), out = (b, 256, 31, 2)\n self.detector_conv = nn.Sequential(\n nn.Conv2d(640, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Dropout(p=0.2),\n )\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_classifier = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_detector = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b * 31, 512)\n self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)\n\n # input: (b * 31, 512)\n self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier\n\n # initialize weights\n self.apply(self.init_weights)\n\n def get_feature_GAN(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return poolblock_out.transpose(-1, -2)\n \n def get_feature(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return self.pool_block[2](poolblock_out)\n \n def forward(self, x):\n \"\"\"\n Returns:\n classification_prediction, detection_prediction\n sizes: (b, 31, 722), (b, 31, 2)\n \"\"\"\n ###############################\n # forward pass for classifier #\n ###############################\n seq_len = x.shape[-1]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n \n \n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n GAN_feature = poolblock_out.transpose(-1, -2)\n poolblock_out = self.pool_block[2](poolblock_out)\n \n # (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)\n classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512))\n classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states\n\n classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)\n classifier_out = self.classifier(classifier_out)\n classifier_out = classifier_out.view((-1, seq_len, self.num_class)) # (b, 31, num_class)\n \n # sizes: (b, 31, 722), (b, 31, 2)\n # classifier output consists of predicted pitch classes per frame\n # detector output consists of: (isvoice, notvoice) estimates per frame\n return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out\n\n @staticmethod\n def init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.kaiming_uniform_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv2d):\n nn.init.xavier_normal_(m.weight)\n elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):\n for p in m.parameters():\n if p.data is None:\n continue\n\n if len(p.shape) >= 2:\n nn.init.orthogonal_(p.data)\n else:\n nn.init.normal_(p.data)"
}
] | import warnings
import itertools
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from env import AttrDict, build_env
from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist
from models import Generator, MultiPeriodDiscriminator, MultiResSpecDiscriminator, feature_loss, generator_loss,\
discriminator_loss, discriminator_TPRLS_loss, generator_TPRLS_loss
from utils import plot_spectrogram, scan_checkpoint, load_checkpoint, save_checkpoint
from stft import TorchSTFT
from Utils.JDC.model import JDCNet | 8,279 | stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft).to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
state_dict_g = load_checkpoint(cp_g, device)
state_dict_do = load_checkpoint(cp_do, device)
generator.load_state_dict(state_dict_g['generator'])
mpd.load_state_dict(state_dict_do['mpd'])
msd.load_state_dict(state_dict_do['msd'])
steps = state_dict_do['steps'] + 1
last_epoch = state_dict_do['epoch']
if h.num_gpus > 1:
generator = DistributedDataParallel(generator, device_ids=[rank], find_unused_parameters=True).to(device)
mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)
msd = DistributedDataParallel(msd, device_ids=[rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()),
h.learning_rate, betas=[h.adam_b1, h.adam_b2])
if state_dict_do is not None:
optim_g.load_state_dict(state_dict_do['optim_g'])
optim_d.load_state_dict(state_dict_do['optim_d'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
training_filelist, validation_filelist = get_dataset_filelist(a)
trainset = MelDataset(training_filelist, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0,
shuffle=False if h.num_gpus > 1 else True, fmax_loss=h.fmax_for_loss, device=device,
fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir)
train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
train_loader = DataLoader(trainset, num_workers=h.num_workers, shuffle=False,
sampler=train_sampler,
batch_size=h.batch_size,
pin_memory=True,
drop_last=True)
if rank == 0:
validset = MelDataset(validation_filelist, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning,
base_mels_path=a.input_mels_dir)
validation_loader = DataLoader(validset, num_workers=1, shuffle=False,
sampler=None,
batch_size=1,
pin_memory=True,
drop_last=True)
sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))
generator.train()
mpd.train()
msd.train()
for epoch in range(max(0, last_epoch), a.training_epochs):
if rank == 0:
start = time.time()
print("Epoch: {}".format(epoch+1))
if h.num_gpus > 1:
train_sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
if rank == 0:
start_b = time.time()
x, y, _, y_mel = batch
x = torch.autograd.Variable(x.to(device, non_blocking=True))
y = torch.autograd.Variable(y.to(device, non_blocking=True))
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True))
y = y.unsqueeze(1)
# y_g_hat = generator(x)
spec, phase = generator(x)
y_g_hat = stft.inverse(spec, phase)
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size,
h.fmin, h.fmax_for_loss)
optim_d.zero_grad()
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)
loss_disc_f += discriminator_TPRLS_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_s += discriminator_TPRLS_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_all = loss_disc_s + loss_disc_f
loss_disc_all.backward()
optim_d.step()
# Generator
optim_g.zero_grad()
# L1 Mel-Spectrogram Loss
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
| warnings.simplefilter(action='ignore', category=FutureWarning)
torch.backends.cudnn.benchmark = True
def train(rank, a, h):
if h.num_gpus > 1:
init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],
world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(rank))
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load(h.F0_path)['model']
F0_model.load_state_dict(params)
generator = Generator(h, F0_model).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiResSpecDiscriminator().to(device)
stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft).to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
state_dict_g = load_checkpoint(cp_g, device)
state_dict_do = load_checkpoint(cp_do, device)
generator.load_state_dict(state_dict_g['generator'])
mpd.load_state_dict(state_dict_do['mpd'])
msd.load_state_dict(state_dict_do['msd'])
steps = state_dict_do['steps'] + 1
last_epoch = state_dict_do['epoch']
if h.num_gpus > 1:
generator = DistributedDataParallel(generator, device_ids=[rank], find_unused_parameters=True).to(device)
mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)
msd = DistributedDataParallel(msd, device_ids=[rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()),
h.learning_rate, betas=[h.adam_b1, h.adam_b2])
if state_dict_do is not None:
optim_g.load_state_dict(state_dict_do['optim_g'])
optim_d.load_state_dict(state_dict_do['optim_d'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
training_filelist, validation_filelist = get_dataset_filelist(a)
trainset = MelDataset(training_filelist, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0,
shuffle=False if h.num_gpus > 1 else True, fmax_loss=h.fmax_for_loss, device=device,
fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir)
train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
train_loader = DataLoader(trainset, num_workers=h.num_workers, shuffle=False,
sampler=train_sampler,
batch_size=h.batch_size,
pin_memory=True,
drop_last=True)
if rank == 0:
validset = MelDataset(validation_filelist, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning,
base_mels_path=a.input_mels_dir)
validation_loader = DataLoader(validset, num_workers=1, shuffle=False,
sampler=None,
batch_size=1,
pin_memory=True,
drop_last=True)
sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))
generator.train()
mpd.train()
msd.train()
for epoch in range(max(0, last_epoch), a.training_epochs):
if rank == 0:
start = time.time()
print("Epoch: {}".format(epoch+1))
if h.num_gpus > 1:
train_sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
if rank == 0:
start_b = time.time()
x, y, _, y_mel = batch
x = torch.autograd.Variable(x.to(device, non_blocking=True))
y = torch.autograd.Variable(y.to(device, non_blocking=True))
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True))
y = y.unsqueeze(1)
# y_g_hat = generator(x)
spec, phase = generator(x)
y_g_hat = stft.inverse(spec, phase)
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size,
h.fmin, h.fmax_for_loss)
optim_d.zero_grad()
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)
loss_disc_f += discriminator_TPRLS_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_s += discriminator_TPRLS_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_all = loss_disc_s + loss_disc_f
loss_disc_all.backward()
optim_d.step()
# Generator
optim_g.zero_grad()
# L1 Mel-Spectrogram Loss
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat) | loss_fm_f = feature_loss(fmap_f_r, fmap_f_g) | 8 | 2023-12-16 03:53:55+00:00 | 12k |
UnbSky/Hanabi-AI-Assitant | main_connect.py | [
{
"identifier": "AIWindow",
"path": "game_ui.py",
"snippet": "class AIWindow(QMainWindow, Ui_AIUI):\n def __init__(self, url, cookie, model_data=None):\n super().__init__()\n self.setupUi(self)\n #self.setFixedSize(1300, 1200)\n self.setWindowTitle(\"HanabiAIAssitant\")\n\n self.nextstep_btn.setEnabled(False)\n self.prevstep_btn.setEnabled(False)\n self.openhistory_btn.setEnabled(False)\n\n #连接服务器的线程\n self.worker_thread = ClientThread(url, cookie)\n self.worker_thread.game_over_sig.connect(self.game_over)\n self.worker_thread.update_table_ui_sig.connect(self.update_table_info)\n self.worker_thread.handle_action_sig.connect(self.handle_action)\n self.worker_thread.ws_load_sig.connect(self.ws_load)\n self.worker_thread.game_start_sig.connect(self.game_start)\n self.worker_thread.table_joined_sig.connect(self.table_joined)\n\n self.enable_active_btn(False)\n self.game_controller = GameController(model_data)\n self.current_loss_card = None\n self.support_variant = [\n \"No Variant\", \"6 Suits\", \"Black (5 Suits)\", \"Black (6 Suits)\", \"Rainbow (5 Suits)\", \"Rainbow (6 Suits)\",\n \"Brown (5 Suits)\", \"Brown (6 Suits)\", \"Dark Rainbow (6 Suits)\", \"White (5 Suits)\", \"White (6 Suits)\",\n \"Pink (5 Suits)\", \"Pink (6 Suits)\", \"Gray (6 Suits)\"\n ]\n\n self.play_btn.clicked.connect(self.play_clicked)\n self.discard_btn.clicked.connect(self.discard_clicked)\n self.clue_btn.clicked.connect(self.clue_clicked)\n self.leave_btn.clicked.connect(self.leave_table_clicked)\n self.draw_state = False #发牌状态\n self.in_table = False\n\n self.nextstep_btn.clicked.connect(self.next_history_clicked)\n self.prevstep_btn.clicked.connect(self.prev_history_clicked)\n self.openhistory_btn.clicked.connect(self.open_history_clicked)\n\n self.tables = {}\n self.room_label.setWordWrap(3)\n\n #连接服务器\n self.worker_thread.start()\n #0表示在房间里,1表示等待中,2表示游戏中\n self.in_room_status = 0\n\n def send(self, command, data):\n if not isinstance(data, dict):\n data = {}\n self.ws.send(command + \" \" + json.dumps(data))\n print('debug: sent command \"' + command + '\"')\n\n def table_joined(self, data):\n tableID = data['tableID']\n table_info = self.tables[tableID]\n table_id = table_info[\"id\"]\n numPlayers = table_info[\"numPlayers\"]\n variant = table_info[\"variant\"]\n players = table_info[\"players\"]\n name = table_info[\"name\"]\n self.table_id = tableID\n table_str = f\"{name}\\n 在房间中 ID:{table_id} P:{numPlayers} \\n模式:{variant} \\n 玩家:{players}\"\n self.room_label.setText(table_str)\n self.in_room_status = 1\n self.update_table_info(self.tables)\n\n def game_over(self, data):\n table_str = f\"游戏结束了,点击退出离开房间\"\n self.room_label.setText(table_str)\n\n def handle_action(self, data):\n #游戏状态有以下几种\n #draw\n #clue-staus-turn\n #play-draw-status-turn\n #discard-draw-status-turn\n try:\n if data[\"type\"] == \"draw\":\n self.init_draw_round -= 1\n self.game_controller.online_handle_draw(data)\n self.update_all_game_info()\n #游戏开始了,唤醒一下\n if self.init_draw_round == 0:\n self.call_next_round(0)\n\n elif data[\"type\"] == \"play\":\n action_str = self.game_controller.online_handle_play(data)\n self.update_all_game_info()\n self.online_action_list.append(action_str)\n\n elif data[\"type\"] == \"discard\":\n action_str = self.game_controller.online_handle_discard(data)\n self.update_all_game_info()\n self.online_action_list.append(action_str)\n\n elif data[\"type\"] == \"clue\":\n action_str = self.game_controller.online_handle_clue(data)\n self.update_all_game_info()\n for clue_r in self.clue_replace:\n if clue_r in action_str:\n action_str = action_str.replace(clue_r, self.clue_replace[clue_r])\n break\n self.online_action_list.append(action_str)\n\n elif data[\"type\"] == \"turn\":\n pid = data[\"currentPlayerIndex\"]\n self.call_next_round(pid)\n self.update_all_game_info()\n\n elif data[\"type\"] == \"status\":\n self.game_controller.online_handle_status(data)\n self.update_game_state()\n else:\n print(data)\n except Exception as e:\n print(e)\n traceback.print_exc()\n\n def ws_load(self, ws):\n self.ws = ws[\"ws\"]\n\n def game_start(self, data):\n print(data)\n tableID = data[\"tableID\"]\n self.clear_UI()\n try:\n #基础游戏设置\n self.clue_replace = {\n \"I0\": '红色(I0)',\n \"I1\": '黄色(I1)',\n \"I2\": '绿色(I2)',\n \"I3\": '蓝色(I3)',\n \"I4\": '紫色(I4)',\n \"I5\": '青色(I5)',\n \"R1\": '数字1(R1)',\n \"R2\": '数字2(R2)',\n \"R3\": '数字3(R3)',\n \"R4\": '数字4(R4)',\n \"R5\": '数字5(R5)',\n }\n colors = [\n (255, 182, 193), # 淡红\n (255, 255, 224), # 淡黄\n (144, 238, 144), # 淡绿\n (173, 216, 230), # 淡蓝\n (221, 160, 221), # 淡紫\n (173, 216, 230) # 淡青\n ]\n self.index_to_color = [f\"background-color: rgb{color}\" for color in colors]\n\n table_info = self.tables[tableID]\n table_id = table_info[\"id\"]\n numPlayers = table_info[\"numPlayers\"]\n variant = table_info[\"variant\"]\n players = table_info[\"players\"]\n name = table_info[\"name\"]\n table_str = f\"{name}\\n 游戏开始 ID:{table_id} P:{numPlayers} \\n模式:{variant} \\n 玩家:{players}\"\n\n self.room_label.setText(table_str)\n self.in_room_status = 2\n self.update_table_info(self.tables)\n self.online_action_list = []\n self.active_pid = 0\n self.random_start = False\n self.server_game = True\n self.table_id = data[\"tableID\"]\n self.game_actions = []\n self.player_count = data[\"options\"][\"numPlayers\"]\n self.spectating = data[\"spectating\"]\n self.playerNames = data[\"playerNames\"]\n if self.player_count <= 3:\n self.card_count = 5\n else:\n self.card_count = 4\n self.varient_name = data[\"options\"][\"variantName\"]\n self.init_draw_round = self.card_count * self.player_count\n\n self.AI_pids = []\n #AI支持的玩法才会有AI预测\n if self.varient_name in self.support_variant:\n if self.spectating:\n for i in range(self.player_count):\n self.AI_pids.append(i)\n else:\n self.AI_pids = [data[\"ourPlayerIndex\"]]\n else:\n print(f\"Unsupported variant: {self.varient_name}\")\n\n game_args = dict(\n players=self.player_count,\n players_card=self.card_count,\n AIplayer=self.AI_pids,\n variant=self.varient_name,\n random_start=False,\n start_card=None,\n allow_drawback=False\n )\n\n gameconf = GameArgs(**game_args)\n self.game_controller.start_game(gameconf)\n special_dict = self.game_controller.special_dict\n if \"Dark Rainbow\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"暗彩虹\"\n self.index_to_color[special_dict.last_special_card] = \"background: qlineargradient(x1:0, y1:0, x2:1, y2:0, stop:0 #FF0000, stop:0.17 #FF7F00, stop:0.33 #FFFF00, stop:0.50 #00FF00, stop:0.67 #0000FF, stop:0.83 #4B0082, stop:1 #9400D3);\"\n elif \"Rainbow\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"彩虹\"\n self.index_to_color[special_dict.last_special_card] = \"background: qlineargradient(x1:0, y1:0, x2:1, y2:0, stop:0 #FFB6C1, stop:0.17 #FFE4C4, stop:0.33 #FFFFE0, stop:0.50 #98FB98, stop:0.67 #ADD8E6, stop:0.83 #E6E6FA, stop:1 #E3E3E3);\"\n elif \"Brown\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"棕色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(205, 133, 63)\"\n elif \"Black\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"黑色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(64, 64, 64)\"\n elif \"White\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"白色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(250, 250, 250)\"\n elif \"Pink\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"粉色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(255, 182, 193)\"\n elif \"Gray\" in self.varient_name:\n self.clue_replace[f\"I{special_dict.last_special_card}\"] = \"灰色\"\n self.index_to_color[special_dict.last_special_card] = f\"background-color: rgb(220, 220, 220)\"\n self.setup_button_pannel(self.player_count)\n\n except Exception as e:\n print(\"ERROR:\", e)\n traceback.print_exc()\n return\n\n def open_history_clicked(self):\n options = QFileDialog.Options()\n options |= QFileDialog.ReadOnly\n file_name, _ = QFileDialog.getOpenFileName(self, 'Open File', '', 'JSON Files (*.json);;All Files (*)', options=options)\n if file_name:\n # 打开文件并读取内容\n with open(file_name, 'r') as file:\n try:\n # 解析JSON内容\n self.clear_UI()\n history_data = json.load(file)\n file_name = f\"{file_name}\"\n file_name = file_name.replace(\"ERROR_\",\"\")\n game_args = file_name.split(\"_\")\n print(game_args)\n fake_game_data = {\n \"tableID\": 1,\n \"spectating\": True,\n \"playerNames\": [\"AI0\",\"AI1\",\"AI2\",\"AI3\",\"AI4\",\"AI5\"],\n \"options\":{\n \"numPlayers\": int(game_args[1][0]),\n \"variantName\": game_args[0].split(\"/\")[-1],\n }\n }\n self.game_start(fake_game_data)\n self.current_history_index = 0\n self.game_controller.game_history = history_data\n\n action = self.game_controller.set_current_history(self.current_history_index)\n #print(\"Update History\")\n self.update_all_game_info()\n #print(\"update_all_game_info\")\n self.info_label.setText(f'选择操作: {action[\"str\"]}')\n #print(\"setText\")\n except Exception as e:\n print(f'Error reading history: {e}')\n traceback.print_exc()\n\n def next_history_clicked(self):\n if self.current_history_index < len(self.game_controller.game_history) - 1:\n self.current_history_index += 1\n action = self.game_controller.set_current_history(self.current_history_index)\n self.active_pid = self.game_controller.active_pid\n self.update_all_game_info()\n self.info_label.setText(f'选择操作: {action[\"str\"]}')\n\n def prev_history_clicked(self):\n if self.current_history_index > 0:\n self.current_history_index -= 1\n action = self.game_controller.set_current_history(self.current_history_index)\n self.active_pid = self.game_controller.active_pid\n self.update_all_game_info()\n self.info_label.setText(f'选择操作: {action[\"str\"]}')\n\n\n def ai_action_clicked(self, action_detail):\n act_type = action_detail[\"type\"]\n if act_type == \"play\":\n pid = action_detail[\"pid\"]\n pos = action_detail[\"pos\"]\n if pid != self.active_pid:\n print(\"ERROR: 不能操作非当前回合玩家的牌\")\n return\n order = self.game_controller.players[pid].online_order[pos]\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": ACTION.PLAY,\n \"target\": order,\n },\n )\n elif act_type == \"discard\":\n pid = action_detail[\"pid\"]\n pos = action_detail[\"pos\"]\n if pid != self.active_pid:\n print(\"ERROR: 不能操作非当前回合玩家的牌\")\n return\n order = self.game_controller.players[pid].online_order[pos]\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": ACTION.DISCARD,\n \"target\": order,\n },\n )\n elif act_type == \"clue\":\n from_pid = action_detail[\"from\"]\n to_pid = action_detail[\"to\"]\n clue_type = action_detail[\"clue_type\"]\n clue_value = action_detail[\"clue_value\"]\n if clue_type == 0:\n clue_type = ACTION.COLOR_CLUE\n else:\n clue_type = ACTION.RANK_CLUE\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": clue_type,\n \"target\": to_pid,\n \"value\": clue_value,\n },\n )\n\n def leave_table_clicked(self):\n try:\n if self.in_room_status == 2:\n #游戏已经开始,暴力退出\n self.send(\n \"tableUnattend\",\n {\n \"tableID\": self.table_id,\n },\n )\n elif self.in_room_status == 1:\n #游戏还没开始,退出房间\n self.send(\n \"tableLeave\",\n {\n \"tableID\": self.table_id,\n },\n )\n # 清空游戏相关的所有内容\n self.in_room_status = 0\n self.update_table_info(self.tables)\n self.info_label.setText(\"游戏未开始\")\n self.state_label.setText(\"无游戏\")\n self.room_label.setText(\"不在房间里\")\n self.clear_UI()\n except Exception:\n traceback.print_exc()\n\n def clear_UI(self):\n a = QWidget()\n self.cards_area.setWidget(a)\n a = QWidget()\n self.AIpredict_area.setWidget(a)\n a = QWidget()\n self.discard_area.setWidget(a)\n a = QWidget()\n self.history_area.setWidget(a)\n while self.Layout_Clue.count():\n item = self.Layout_Clue.takeAt(0)\n widget = item.widget()\n if widget:\n widget.setParent(None)\n widget.deleteLater()\n while self.Layout_score.count():\n item = self.Layout_score.takeAt(0)\n widget = item.widget()\n if widget:\n widget.setParent(None)\n widget.deleteLater()\n while self.Layout_toP.count():\n item = self.Layout_toP.takeAt(0)\n widget = item.widget()\n if widget:\n widget.setParent(None)\n widget.deleteLater()\n\n def join_table_click(self, table_id):\n password = self.password_edit.toPlainText()\n password = password.strip()\n table = self.tables[table_id]\n if table[\"running\"]:\n #游戏已经开始,进入观战\n print(\"Try tableSpectate\")\n self.send(\n \"tableSpectate\",\n {\n \"shadowingPlayerIndex\": -1,\n \"tableID\": table_id\n },\n )\n else:\n #正常加入\n if table[\"passwordProtected\"]:\n self.send(\n \"tableJoin\",\n {\n \"tableID\": table_id,\n \"password\": password\n },\n )\n else:\n self.send(\n \"tableJoin\",\n {\n \"tableID\": table_id,\n },\n )\n\n def update_table_info(self, tables):\n try:\n self.tables = tables\n lc = QHBoxLayout()\n for table in tables.values():\n table_id = table[\"id\"]\n numPlayers = table[\"numPlayers\"]\n running = table[\"running\"]\n variant = table[\"variant\"]\n passwordProtected = table[\"passwordProtected\"]\n players = table[\"players\"]\n name = table[\"name\"]\n\n table_str = f\"[{name}] \\n\" \\\n f\"ID:{table_id} | 玩家数: {numPlayers} | 游戏中: {running} \\n\" \\\n f\"模式: {variant} \\n\" \\\n f\"密码: {passwordProtected} \\n\" \\\n f\"[{','.join(players)}]\"\n\n cbutton = QPushButton(table_str)\n cbutton.setFixedSize(300, 140)\n cbutton.setStyleSheet(f\"text-align: center; font: 15px;\")\n cbutton.clicked.connect(lambda _, xx=table_id: self.join_table_click(xx))\n if self.in_room_status != 0:\n cbutton.setEnabled(False)\n else:\n cbutton.setEnabled(True)\n\n lc.addWidget(cbutton)\n\n lc.addStretch(1)\n a = QWidget()\n a.setLayout(lc)\n self.table_area.setWidget(a)\n except Exception as e:\n traceback.print_exc()\n\n def enable_active_btn(self, enable):\n self.discard_btn.setEnabled(enable)\n self.play_btn.setEnabled(enable)\n self.clue_btn.setEnabled(enable)\n\n def play_clicked(self):\n if self.current_pcard_pid_pos is None:\n print(\"ERROR: 没有选中的玩家牌\")\n return\n [pid, pos] = self.current_pcard_pid_pos\n if pid != self.active_pid:\n print(\"ERROR: 不能操作非当前回合玩家的牌\")\n return\n order = self.game_controller.players[pid].online_order[pos]\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": ACTION.PLAY,\n \"target\": order,\n },\n )\n\n def discard_clicked(self):\n if self.current_pcard_pid_pos is None:\n print(\"ERROR: 没有选中的玩家牌\")\n return\n [pid, pos] = self.current_pcard_pid_pos\n if pid != self.active_pid:\n print(\"ERROR: 不能操作非当前回合玩家的牌\")\n return\n order = self.game_controller.players[pid].online_order[pos]\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": ACTION.DISCARD,\n \"target\": order,\n },\n )\n\n def clue_clicked(self):\n try:\n if self.clue_choose is None:\n print(\"ERROR: 还没有选择提示\")\n return\n if self.splayer_choose is None:\n print(\"ERROR: 还没有选择提示的玩家\")\n return\n rpid = self.splayer_choose - self.active_pid\n if rpid < 0:\n rpid += self.player_count\n if self.clue_choose[0] == \"I\":\n clue_type = ACTION.COLOR_CLUE\n else:\n clue_type = ACTION.RANK_CLUE\n self.send(\n \"action\",\n {\n \"tableID\": self.table_id,\n \"type\": clue_type,\n \"target\": self.splayer_choose,\n \"value\": int(self.clue_choose[1]),\n },\n )\n except Exception as e:\n print(e)\n traceback.print_exc()\n\n def call_next_round(self, active_pid):\n try:\n lb = QVBoxLayout()\n self.active_pid = active_pid\n self.info_label.setText(f\"轮到P{active_pid}【{self.playerNames[active_pid]}】操作\")\n if len(self.AI_pids) > 0:\n if active_pid == self.AI_pids[0] and (not self.spectating):\n self.info_label.setText(f\"轮到P{active_pid}【{self.playerNames[active_pid]}】【你自己!】操作\")\n\n if active_pid in self.AI_pids:\n #向AI查询预测结果\n if not self.spectating:\n self.enable_active_btn(True)\n action_predict = self.game_controller.call_AI_predict(active_pid, 10)\n for action in action_predict:\n action_token = action[\"token\"]\n action_probs = action[\"probs\"]\n action_detail = self.game_controller.get_action(action_token, active_pid)\n action_desc = action_detail[\"str\"]\n for clue_r in self.clue_replace:\n if clue_r in action_desc:\n #print(clue_r)\n action_desc = action_desc.replace(clue_r, self.clue_replace[clue_r])\n break\n action_str = f'{action_desc} \\n 概率:{action_probs*100:.2f}%'\n actionbutton = ValueButton(action_str, action_detail)\n actionbutton.setStyleSheet(f\"font: bold 18px;\")\n actionbutton.setFixedSize(330, 50)\n actionbutton.clicked.connect(lambda _, i=copy.deepcopy(action_detail): self.ai_action_clicked(i))\n if self.spectating:\n actionbutton.setEnabled(False)\n else:\n actionbutton.setEnabled(True)\n lb.addWidget(actionbutton)\n else:\n self.enable_active_btn(False)\n\n lb.addStretch(1)\n a = QWidget()\n a.setLayout(lb)\n self.AIpredict_area.setWidget(a)\n print(\"Call_next_round Finish\")\n except Exception as e:\n print(e)\n traceback.print_exc()\n\n def update_game_state(self):\n state_txt = f\"得分:{self.game_controller.score}/{sum(self.game_controller.Hrank)}\\n线索:{self.game_controller.clue}\" \\\n f\"\\n 错误:{self.game_controller.mistake} \\n 剩余牌:{self.game_controller.get_current_card()}\"\n self.state_label.setText(state_txt)\n\n def update_all_game_info(self):\n\n self.player_card_btns = []\n self.current_pcard_pid_pos = None\n\n # 更新所有的分数信息\n for i in range(len(self.game_controller.Irank)):\n score = self.game_controller.Irank[i]\n self.scoreLabels[i].setText(f\"{score}\")\n\n #更新历史消息\n lb = QVBoxLayout()\n for history_str in self.online_action_list:\n button = QPushButton(f\"{history_str}\", self)\n button.setFixedSize(200, 40)\n lb.addWidget(button)\n lb.addStretch(1)\n a = QWidget()\n a.setLayout(lb)\n\n self.history_area.setWidget(a)\n\n v_scrollbar = self.history_area.findChild(QScrollBar)\n if v_scrollbar:\n v_scrollbar.setValue(v_scrollbar.maximum())\n\n #更新弃牌堆信息\n lg = QGridLayout()\n ind = 0\n for card in self.game_controller.discard_cards:\n row = ind // 6\n column = ind % 6\n ind += 1\n card_index, card_rank = self.game_controller.parse_card(card)\n button = QPushButton(f\"{card_rank}\", self)\n button.setFixedSize(40, 40)\n button.setStyleSheet(f\"{self.index_to_color[card_index]}; font: bold 24px;\")\n lg.addWidget(button, row, column)\n #lg.addStretch(1)\n a = QWidget()\n a.setLayout(lg)\n self.discard_area.setWidget(a)\n\n # 更新UI中玩家的卡\n lb = QVBoxLayout()\n pid = 0\n for player in self.game_controller.players:\n p_head = QLabel(f\"Player: {pid} [{self.playerNames[pid]}]\")\n if pid == self.active_pid:\n p_head.setStyleSheet(f'font: bold 35px;')\n else:\n p_head.setStyleSheet(f'font-size: 30px;')\n lb.addWidget(p_head)\n cl = len(player.cards)\n lc = QHBoxLayout()\n for i in range(cl - 1, -1, -1):\n card = player.cards[i]\n kcard = player.known_cards[i]\n card_index, card_rank = self.game_controller.parse_card(card)\n kcard_index, kcard_rank = self.game_controller.parse_card(kcard)\n if card_index == 9:\n card_color = \"background-color: rgb(200, 200, 200)\"\n else:\n card_color = self.index_to_color[card_index]\n if kcard_index == 9:\n kcard_color = \"background-color: rgb(200, 200, 200)\"\n else:\n kcard_color = self.index_to_color[kcard_index]\n if card_rank == 9:\n card_rank = \"?\"\n if kcard_rank == 9:\n kcard_rank = \"?\"\n\n pcbutton = CardButton(f\"{card_rank}\", f\"{kcard_rank}\", card_color, kcard_color, self.pcard_clicked, [pid, i])\n self.player_card_btns.append(pcbutton)\n #self.current_tbutton_list.append(pcbutton)\n lc.addWidget(pcbutton)\n\n pid += 1\n lc.addStretch(0)\n lb.addLayout(lc)\n\n lb.addStretch(1)\n a = QWidget()\n a.setLayout(lb)\n self.cards_area.setWidget(a)\n\n def playerchose_clicked(self, pid):\n self.splayer_choose = pid\n for cbtn in self.splayer_btns:\n if cbtn.get_value() == pid:\n cbtn.setStyleSheet(cbtn.styleSheet().replace(\"24px;\", \"36px;\"))\n else:\n cbtn.setStyleSheet(cbtn.styleSheet().replace(\"36px;\", \"24px;\"))\n\n def cluechose_clicked(self, clue):\n self.clue_choose = clue\n for cbtn in self.clue_btns:\n if cbtn.get_value() == clue:\n cbtn.setStyleSheet(cbtn.styleSheet().replace(\"24px;\", \"36px;\"))\n else:\n cbtn.setStyleSheet(cbtn.styleSheet().replace(\"36px;\", \"24px;\"))\n\n def pcard_clicked(self, pid_pos):\n self.current_pcard_pid_pos = pid_pos\n #print(self.current_pcard_pid_pos)\n for cbtn in self.player_card_btns:\n #print(cbtn.get_value())\n if cbtn.value[0] == pid_pos[0] and cbtn.value[1] == pid_pos[1]:\n cbtn.highlight(True)\n else:\n cbtn.highlight(False)\n\n def setup_button_pannel(self, players):\n #选择玩家区域的所有玩家\n self.splayer_btns = []\n self.splayer_choose = None\n for i in range(0, players):\n button = ValueButton(f\"P{i}\", i)\n button.setFixedSize(60, 60)\n\n button.setStyleSheet(f\"background-color: rgb(220, 220, 220); font: bold 24px;\")\n self.splayer_btns.append(button)\n\n button.clicked.connect(lambda _, i=i: self.playerchose_clicked(i))\n self.Layout_toP.addWidget(button, i)\n\n #选择提示线索区域的所有线索\n self.clue_btns = []\n self.clue_choose = None\n special_dict = self.game_controller.special_dict\n colors = special_dict.last_special_card + 1\n for i in range(colors):\n #提示颜色\n if i == special_dict.last_special_card and (special_dict.all_color_rule or special_dict.no_color_rule):\n #彩虹无法被提示,Null也无法被提示\n continue\n clue = f\"I{i}\"\n button = ValueButton(clue, clue)\n button.setFixedSize(50, 50)\n\n button.setStyleSheet(f\"{self.index_to_color[i]}; font: bold 24px;\")\n self.clue_btns.append(button)\n\n button.clicked.connect(lambda _, clue=clue: self.cluechose_clicked(clue))\n self.Layout_Clue.addWidget(button, 0, i)\n for i in range(1, 6):\n #提示数字\n clue = f\"R{i}\"\n button = ValueButton(clue, clue)\n button.setFixedSize(50, 50)\n\n button.setStyleSheet(f\"background-color: rgb(200, 200, 200); font: bold 24px;\")\n self.clue_btns.append(button)\n\n button.clicked.connect(lambda _, clue=clue: self.cluechose_clicked(clue))\n self.Layout_Clue.addWidget(button, 1, i - 1)\n\n #得分区域的显示(对应五种颜色)\n self.scoreLabels = []\n for i in range(colors):\n #提示颜色\n clue = f\"I{i}\"\n sl = QLabel(\"0\")\n sl.setFixedSize(70, 70)\n sl.setStyleSheet(\"QLabel {\"\n f\"{self.index_to_color[i]};\"\n \"border: 2px solid black;\"\n \"border-radius: 5px;\"\n \"font: bold 40px;\"\n \"text-align: center;\"\n \"}\")\n self.scoreLabels.append(sl)\n self.Layout_score.addWidget(sl, i)"
},
{
"identifier": "load_model",
"path": "play_util.py",
"snippet": "def load_model(model_name=None):\n #device = 'cuda' if torch.cuda.is_available() else 'cpu' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.\n device = 'cpu'\n\n acition_dict_toid = {}\n if model_name is None:\n dict_path = 'dict.json'\n else:\n dict_path = f'{model_name}/dict.json'\n with open(dict_path, 'r', encoding='utf-8') as file:\n acition_dict = json.load(file)\n acition_dict = [\"<pad>\"] + acition_dict\n ind = 0\n for action in acition_dict:\n acition_dict_toid[action] = ind\n #print(action, ind)\n ind += 1\n n_vacabs = len(acition_dict)\n output_acition_dict_toid = {}\n if model_name is None:\n output_dict_path = 'output_dict.json'\n else:\n output_dict_path = f'{model_name}/output_dict.json'\n with open(output_dict_path, 'r', encoding='utf-8') as file:\n output_acition_dict = json.load(file)\n output_acition_dict = [\"<pad>\"] + output_acition_dict\n ind = 0\n for action in output_acition_dict:\n output_acition_dict_toid[action] = ind\n #print(action, ind)\n ind += 1\n n_vacabs_out = len(output_acition_dict)\n\n if model_name is None:\n max_seq_len = 900\n dim = 384\n n_layers = 8\n n_heads = 8\n multiple_of = 32\n dropout = 0.0\n model_args = dict(\n dim=dim,\n n_layers=n_layers,\n n_heads=n_heads,\n n_kv_heads=n_heads,\n vocab_size=n_vacabs,\n output_vocab_size=n_vacabs_out,\n multiple_of=multiple_of,\n max_seq_len=max_seq_len,\n dropout=dropout,\n ) # s\n else:\n with open(f'{model_name}/config.json', 'r') as json_file:\n model_args = json.load(json_file)\n\n seed = 1337\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul\n torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn\n\n # init from a model saved in a specific directory\n if model_name is None:\n ckpt_path = 'best_valid.pth'\n else:\n ckpt_path = f'{model_name}/model.pth'\n state_dict = torch.load(ckpt_path, map_location=device)\n gptconf = ModelArgs(**model_args)\n model = Transformer(gptconf)\n unwanted_prefix = '_orig_mod.'\n for k, v in list(state_dict.items()):\n if k.startswith(unwanted_prefix):\n state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)\n model.load_state_dict(state_dict, strict=False)\n model.eval()\n model.to(device)\n return model, acition_dict, acition_dict_toid, output_acition_dict, output_acition_dict_toid, device"
}
] | import sys
import json
import requests
from PyQt5 import QtWidgets, QtCore
from game_ui import AIWindow
from play_util import load_model | 8,746 | def printf(*args):
print(*args, flush=True)
# Imports (3rd-party)
# Imports (local application)
# Authenticate, login to the WebSocket server, and run forever.
def login_to_hanab(username, password):
if username == "":
printf('error: "HANABI_USERNAME" is blank in the ".env" file')
sys.exit(1)
if password == "":
printf('error: "HANABI_PASSWORD" is blank in the ".env" file')
sys.exit(1)
# The official site uses HTTPS.
protocol = "https"
ws_protocol = "wss"
host = "hanab.live"
path = "/login"
ws_path = "/ws"
url = protocol + "://" + host + path
ws_url = ws_protocol + "://" + host + ws_path
printf('Authenticating to "' + url + '" with a username of "' + username + '".')
resp = requests.post(
url,
{
"username": username,
"password": password,
# This is normally supposed to be the version of the JavaScript
# client, but the server will also accept "bot" as a valid version.
"version": "bot",
},
)
# Handle failed authentication and other errors.
if resp.status_code != 200:
printf("Authentication failed:")
printf(resp.text)
sys.exit(1)
# Scrape the cookie from the response.
cookie = ""
for header in resp.headers.items():
if header[0] == "Set-Cookie":
cookie = header[1]
break
if cookie == "":
printf("Failed to parse the cookie from the authentication response headers:")
printf(resp.headers)
sys.exit(1)
return ws_url, cookie
def main():
with open(f'user_config.json', 'r') as json_file:
user_args = json.load(json_file)
username = user_args["username"]
password = user_args["password"]
model_name = user_args["model"]
printf("Load Model")
| def printf(*args):
print(*args, flush=True)
# Imports (3rd-party)
# Imports (local application)
# Authenticate, login to the WebSocket server, and run forever.
def login_to_hanab(username, password):
if username == "":
printf('error: "HANABI_USERNAME" is blank in the ".env" file')
sys.exit(1)
if password == "":
printf('error: "HANABI_PASSWORD" is blank in the ".env" file')
sys.exit(1)
# The official site uses HTTPS.
protocol = "https"
ws_protocol = "wss"
host = "hanab.live"
path = "/login"
ws_path = "/ws"
url = protocol + "://" + host + path
ws_url = ws_protocol + "://" + host + ws_path
printf('Authenticating to "' + url + '" with a username of "' + username + '".')
resp = requests.post(
url,
{
"username": username,
"password": password,
# This is normally supposed to be the version of the JavaScript
# client, but the server will also accept "bot" as a valid version.
"version": "bot",
},
)
# Handle failed authentication and other errors.
if resp.status_code != 200:
printf("Authentication failed:")
printf(resp.text)
sys.exit(1)
# Scrape the cookie from the response.
cookie = ""
for header in resp.headers.items():
if header[0] == "Set-Cookie":
cookie = header[1]
break
if cookie == "":
printf("Failed to parse the cookie from the authentication response headers:")
printf(resp.headers)
sys.exit(1)
return ws_url, cookie
def main():
with open(f'user_config.json', 'r') as json_file:
user_args = json.load(json_file)
username = user_args["username"]
password = user_args["password"]
model_name = user_args["model"]
printf("Load Model") | model, action_dict_toact, action_dict_toid, output_action_dict_toact, output_action_dict_toid, device = load_model(model_name) | 1 | 2023-12-17 03:57:47+00:00 | 12k |
m-abr/FCPCodebase | math_ops/Matrix_4x4.py | [
{
"identifier": "Math_Ops",
"path": "math_ops/Math_Ops.py",
"snippet": "class Math_Ops():\n '''\n This class provides general mathematical operations that are not directly available through numpy \n '''\n \n @staticmethod\n def deg_sph2cart(spherical_vec):\n ''' Converts SimSpark's spherical coordinates in degrees to cartesian coordinates '''\n r = spherical_vec[0]\n h = spherical_vec[1] * pi / 180\n v = spherical_vec[2] * pi / 180\n return np.array([r * cos(v) * cos(h), r * cos(v) * sin(h), r * sin(v)])\n\n @staticmethod\n def deg_sin(deg_angle):\n ''' Returns sin of degrees '''\n return sin(deg_angle * pi / 180)\n\n @staticmethod\n def deg_cos(deg_angle):\n ''' Returns cos of degrees '''\n return cos(deg_angle * pi / 180)\n\n @staticmethod\n def to_3d(vec_2d, value=0) -> np.ndarray:\n ''' Returns new 3d vector from 2d vector '''\n return np.append(vec_2d,value)\n\n @staticmethod\n def to_2d_as_3d(vec_3d) -> np.ndarray:\n ''' Returns new 3d vector where the 3rd dimension is zero '''\n vec_2d_as_3d = np.copy(vec_3d)\n vec_2d_as_3d[2] = 0\n return vec_2d_as_3d\n\n @staticmethod\n def normalize_vec(vec) -> np.ndarray:\n ''' Divides vector by its length '''\n size = np.linalg.norm(vec)\n if size == 0: return vec\n return vec / size\n\n @staticmethod\n def get_active_directory(dir:str) -> str:\n global GLOBAL_DIR\n return GLOBAL_DIR + dir\n\n @staticmethod\n def acos(val):\n ''' arccosine function that limits input '''\n return acos( np.clip(val,-1,1) )\n \n @staticmethod\n def asin(val):\n ''' arcsine function that limits input '''\n return asin( np.clip(val,-1,1) )\n\n @staticmethod\n def normalize_deg(val):\n ''' normalize val in range [-180,180[ '''\n return (val + 180.0) % 360 - 180\n\n @staticmethod\n def normalize_rad(val):\n ''' normalize val in range [-pi,pi[ '''\n return (val + pi) % (2*pi) - pi\n\n @staticmethod\n def deg_to_rad(val):\n ''' convert degrees to radians '''\n return val * 0.01745329251994330\n\n @staticmethod\n def rad_to_deg(val):\n ''' convert radians to degrees '''\n return val * 57.29577951308232\n\n @staticmethod\n def vector_angle(vector, is_rad=False):\n ''' angle (degrees or radians) of 2D vector '''\n if is_rad:\n return atan2(vector[1], vector[0])\n else:\n return atan2(vector[1], vector[0]) * 180 / pi\n\n @staticmethod\n def vectors_angle(vec1, vec2, is_rad=False):\n ''' get angle between vectors (degrees or radians) '''\n ang_rad = acos(np.dot(Math_Ops.normalize_vec(vec1),Math_Ops.normalize_vec(vec2)))\n return ang_rad if is_rad else ang_rad * 180 / pi\n\n @staticmethod\n def vector_from_angle(angle, is_rad=False):\n ''' unit vector with direction given by `angle` '''\n if is_rad:\n return np.array([cos(angle), sin(angle)], float)\n else:\n return np.array([Math_Ops.deg_cos(angle), Math_Ops.deg_sin(angle)], float)\n\n @staticmethod\n def target_abs_angle(pos2d, target, is_rad=False):\n ''' angle (degrees or radians) of vector (target-pos2d) '''\n if is_rad:\n return atan2(target[1]-pos2d[1], target[0]-pos2d[0])\n else:\n return atan2(target[1]-pos2d[1], target[0]-pos2d[0]) * 180 / pi\n\n @staticmethod\n def target_rel_angle(pos2d, ori, target, is_rad=False):\n ''' relative angle (degrees or radians) of target if we're located at 'pos2d' with orientation 'ori' (degrees or radians) '''\n if is_rad:\n return Math_Ops.normalize_rad( atan2(target[1]-pos2d[1], target[0]-pos2d[0]) - ori )\n else:\n return Math_Ops.normalize_deg( atan2(target[1]-pos2d[1], target[0]-pos2d[0]) * 180 / pi - ori )\n\n @staticmethod\n def rotate_2d_vec(vec, angle, is_rad=False):\n ''' rotate 2D vector anticlockwise around the origin by `angle` '''\n cos_ang = cos(angle) if is_rad else cos(angle * pi / 180)\n sin_ang = sin(angle) if is_rad else sin(angle * pi / 180)\n return np.array([cos_ang*vec[0]-sin_ang*vec[1], sin_ang*vec[0]+cos_ang*vec[1]])\n\n @staticmethod\n def distance_point_to_line(p:np.ndarray, a:np.ndarray, b:np.ndarray):\n ''' \n Distance between point p and 2d line 'ab' (and side where p is)\n\n Parameters\n ----------\n a : ndarray\n 2D point that defines line\n b : ndarray\n 2D point that defines line\n p : ndarray\n 2D point\n\n Returns\n -------\n distance : float\n distance between line and point\n side : str\n if we are at a, looking at b, p may be at our \"left\" or \"right\"\n '''\n line_len = np.linalg.norm(b-a)\n\n if line_len == 0: # assumes vertical line\n dist = sdist = np.linalg.norm(p-a)\n else:\n sdist = np.cross(b-a,p-a)/line_len\n dist = abs(sdist)\n\n return dist, \"left\" if sdist>0 else \"right\"\n\n @staticmethod\n def distance_point_to_segment(p:np.ndarray, a:np.ndarray, b:np.ndarray):\n ''' Distance from point p to 2d line segment 'ab' '''\n \n ap = p-a\n ab = b-a\n\n ad = Math_Ops.vector_projection(ap,ab)\n\n # Is d in ab? We can find k in (ad = k * ab) without computing any norm\n # we use the largest dimension of ab to avoid division by 0\n k = ad[0]/ab[0] if abs(ab[0])>abs(ab[1]) else ad[1]/ab[1]\n\n if k <= 0: return np.linalg.norm(ap)\n elif k >= 1: return np.linalg.norm(p-b)\n else: return np.linalg.norm(p-(ad + a)) # p-d\n\n @staticmethod\n def distance_point_to_ray(p:np.ndarray, ray_start:np.ndarray, ray_direction:np.ndarray):\n ''' Distance from point p to 2d ray '''\n \n rp = p-ray_start\n rd = Math_Ops.vector_projection(rp,ray_direction)\n\n # Is d in ray? We can find k in (rd = k * ray_direction) without computing any norm\n # we use the largest dimension of ray_direction to avoid division by 0\n k = rd[0]/ray_direction[0] if abs(ray_direction[0])>abs(ray_direction[1]) else rd[1]/ray_direction[1]\n\n if k <= 0: return np.linalg.norm(rp)\n else: return np.linalg.norm(p-(rd + ray_start)) # p-d\n\n @staticmethod\n def closest_point_on_ray_to_point(p:np.ndarray, ray_start:np.ndarray, ray_direction:np.ndarray):\n ''' Point on ray closest to point p '''\n \n rp = p-ray_start\n rd = Math_Ops.vector_projection(rp,ray_direction)\n\n # Is d in ray? We can find k in (rd = k * ray_direction) without computing any norm\n # we use the largest dimension of ray_direction to avoid division by 0\n k = rd[0]/ray_direction[0] if abs(ray_direction[0])>abs(ray_direction[1]) else rd[1]/ray_direction[1]\n\n if k <= 0: return ray_start\n else: return rd + ray_start\n\n @staticmethod\n def does_circle_intersect_segment(p:np.ndarray, r, a:np.ndarray, b:np.ndarray):\n ''' Returns true if circle (center p, radius r) intersect 2d line segment '''\n\n ap = p-a\n ab = b-a\n\n ad = Math_Ops.vector_projection(ap,ab)\n\n # Is d in ab? We can find k in (ad = k * ab) without computing any norm\n # we use the largest dimension of ab to avoid division by 0\n k = ad[0]/ab[0] if abs(ab[0])>abs(ab[1]) else ad[1]/ab[1]\n\n if k <= 0: return np.dot(ap,ap) <= r*r\n elif k >= 1: return np.dot(p-b,p-b) <= r*r\n \n dp = p-(ad + a)\n return np.dot(dp,dp) <= r*r\n\n @staticmethod\n def vector_projection(a:np.ndarray, b:np.ndarray):\n ''' Vector projection of a onto b '''\n b_dot = np.dot(b,b)\n return b * np.dot(a,b) / b_dot if b_dot != 0 else b\n\n @staticmethod\n def do_noncollinear_segments_intersect(a,b,c,d):\n ''' \n Check if 2d line segment 'ab' intersects with noncollinear 2d line segment 'cd' \n Explanation: https://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/ \n '''\n\n ccw = lambda a,b,c: (c[1]-a[1]) * (b[0]-a[0]) > (b[1]-a[1]) * (c[0]-a[0])\n return ccw(a,c,d) != ccw(b,c,d) and ccw(a,b,c) != ccw(a,b,d)\n\n @staticmethod\n def intersection_segment_opp_goal(a:np.ndarray, b:np.ndarray):\n ''' Computes the intersection point of 2d segment 'ab' and the opponents' goal (front line) '''\n vec_x = b[0]-a[0]\n\n # Collinear intersections are not accepted\n if vec_x == 0: return None\n \n k = (15.01-a[0])/vec_x\n\n # No collision\n if k < 0 or k > 1: return None\n\n intersection_pt = a + (b-a) * k\n\n if -1.01 <= intersection_pt[1] <= 1.01:\n return intersection_pt\n else:\n return None\n\n @staticmethod\n def intersection_circle_opp_goal(p:np.ndarray, r):\n ''' \n Computes the intersection segment of circle (center p, radius r) and the opponents' goal (front line)\n Only the y coordinates are returned since the x coordinates are always equal to 15\n '''\n\n x_dev = abs(15-p[0])\n\n if x_dev > r:\n return None # no intersection with x=15\n\n y_dev = sqrt(r*r - x_dev*x_dev)\n\n p1 = max(p[1] - y_dev, -1.01)\n p2 = min(p[1] + y_dev, 1.01)\n\n if p1 == p2:\n return p1 # return the y coordinate of a single intersection point\n elif p2 < p1:\n return None # no intersection\n else:\n return p1, p2 # return the y coordinates of the intersection segment\n\n\n @staticmethod\n def distance_point_to_opp_goal(p:np.ndarray):\n ''' Distance between point 'p' and the opponents' goal (front line) '''\n\n if p[1] < -1.01:\n return np.linalg.norm( p-(15,-1.01) )\n elif p[1] > 1.01:\n return np.linalg.norm( p-(15, 1.01) )\n else:\n return abs(15-p[0])\n\n\n @staticmethod\n def circle_line_segment_intersection(circle_center, circle_radius, pt1, pt2, full_line=True, tangent_tol=1e-9):\n \"\"\" Find the points at which a circle intersects a line-segment. This can happen at 0, 1, or 2 points.\n\n :param circle_center: The (x, y) location of the circle center\n :param circle_radius: The radius of the circle\n :param pt1: The (x, y) location of the first point of the segment\n :param pt2: The (x, y) location of the second point of the segment\n :param full_line: True to find intersections along full line - not just in the segment. False will just return intersections within the segment.\n :param tangent_tol: Numerical tolerance at which we decide the intersections are close enough to consider it a tangent\n :return Sequence[Tuple[float, float]]: A list of length 0, 1, or 2, where each element is a point at which the circle intercepts a line segment.\n\n Note: We follow: http://mathworld.wolfram.com/Circle-LineIntersection.html\n \"\"\"\n\n (p1x, p1y), (p2x, p2y), (cx, cy) = pt1, pt2, circle_center\n (x1, y1), (x2, y2) = (p1x - cx, p1y - cy), (p2x - cx, p2y - cy)\n dx, dy = (x2 - x1), (y2 - y1)\n dr = (dx ** 2 + dy ** 2)**.5\n big_d = x1 * y2 - x2 * y1\n discriminant = circle_radius ** 2 * dr ** 2 - big_d ** 2\n\n if discriminant < 0: # No intersection between circle and line\n return []\n else: # There may be 0, 1, or 2 intersections with the segment\n intersections = [\n (cx + (big_d * dy + sign * (-1 if dy < 0 else 1) * dx * discriminant**.5) / dr ** 2,\n cy + (-big_d * dx + sign * abs(dy) * discriminant**.5) / dr ** 2)\n for sign in ((1, -1) if dy < 0 else (-1, 1))] # This makes sure the order along the segment is correct\n if not full_line: # If only considering the segment, filter out intersections that do not fall within the segment\n fraction_along_segment = [\n (xi - p1x) / dx if abs(dx) > abs(dy) else (yi - p1y) / dy for xi, yi in intersections]\n intersections = [pt for pt, frac in zip(\n intersections, fraction_along_segment) if 0 <= frac <= 1]\n # If line is tangent to circle, return just one point (as both intersections have same location)\n if len(intersections) == 2 and abs(discriminant) <= tangent_tol:\n return [intersections[0]]\n else:\n return intersections\n\n\n\n\n # adapted from https://stackoverflow.com/questions/3252194/numpy-and-line-intersections\n @staticmethod\n def get_line_intersection(a1, a2, b1, b2):\n \"\"\" \n Returns the point of intersection of the lines passing through a2,a1 and b2,b1.\n a1: [x, y] a point on the first line\n a2: [x, y] another point on the first line\n b1: [x, y] a point on the second line\n b2: [x, y] another point on the second line\n \"\"\"\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return np.array([float('inf'), float('inf')])\n return np.array([x/z, y/z],float)"
},
{
"identifier": "Matrix_3x3",
"path": "math_ops/Matrix_3x3.py",
"snippet": "class Matrix_3x3():\n\n def __init__(self, matrix = None) -> None:\n '''\n Constructor examples:\n a = Matrix_3x3( ) # create identity matrix\n b = Matrix_3x3( [[1,1,1],[2,2,2],[3,3,3]] ) # manually initialize matrix\n c = Matrix_3x3( [1,1,1,2,2,2,3,3,3] ) # manually initialize matrix\n d = Matrix_3x3( b ) # copy constructor\n '''\n if matrix is None:\n self.m = np.identity(3)\n elif type(matrix) == Matrix_3x3: \n self.m = np.copy(matrix.m)\n else:\n self.m = np.asarray(matrix)\n self.m.shape = (3,3) #reshape if needed, throw error if impossible\n\n\n self.rotation_shortcuts={(1,0,0):self.rotate_x_rad, (-1, 0, 0):self._rotate_x_neg_rad,\n (0,1,0):self.rotate_y_rad, ( 0,-1, 0):self._rotate_y_neg_rad,\n (0,0,1):self.rotate_z_rad, ( 0, 0,-1):self._rotate_z_neg_rad}\n\n @classmethod\n def from_rotation_deg(cls, euler_vec):\n '''\n Create rotation matrix from Euler angles, in degrees.\n Rotation order: RotZ*RotY*RotX\n\n Parameters\n ----------\n euler_vec : array_like, length 3\n vector with Euler angles (x,y,z) aka (roll, pitch, yaw)\n\n Example\n ----------\n Matrix_3x3.from_rotation_deg((roll,pitch,yaw)) # Creates: RotZ(yaw)*RotY(pitch)*RotX(roll)\n '''\n mat = cls().rotate_z_deg(euler_vec[2], True).rotate_y_deg(euler_vec[1], True).rotate_x_deg(euler_vec[0], True)\n return mat\n\n def get_roll_deg(self):\n ''' Get angle around the x-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[2,1] == 0 and self.m[2,2] == 0: \n return 180\n return atan2(self.m[2,1], self.m[2,2]) * 180 / pi\n\n def get_pitch_deg(self):\n ''' Get angle around the y-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n return atan2(-self.m[2,0], sqrt(self.m[2,1]*self.m[2,1] + self.m[2,2]*self.m[2,2])) * 180 / pi\n\n def get_yaw_deg(self):\n ''' Get angle around the z-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[1,0] == 0 and self.m[0,0] == 0: \n return atan2(self.m[0,1], self.m[1,1]) * 180 / pi\n return atan2(self.m[1,0], self.m[0,0]) * 180 / pi\n\n def get_inclination_deg(self):\n ''' Get inclination of z-axis in relation to reference z-axis '''\n return 90 - (asin(self.m[2,2]) * 180 / pi)\n\n\n def rotate_deg(self, rotation_vec, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_rad(rotation_vec, rotation_deg * (pi/180) , in_place)\n\n \n def rotate_rad(self, rotation_vec, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n\n if rotation_rad == 0: return\n\n shortcut = self.rotation_shortcuts.get(tuple(a for a in rotation_vec))\n if shortcut:\n return shortcut(rotation_rad, in_place)\n \n c = np.math.cos(rotation_rad)\n c1 = 1 - c\n s = np.math.sin(rotation_rad)\n x = rotation_vec[0]\n y = rotation_vec[1]\n z = rotation_vec[2]\n xxc1 = x * x * c1\n yyc1 = y * y * c1\n zzc1 = z * z * c1\n xyc1 = x * y * c1\n xzc1 = x * z * c1\n yzc1 = y * z * c1\n xs = x * s\n ys = y * s\n zs = z * s\n\n mat = np.array([\n [xxc1 + c, xyc1 - zs, xzc1 + ys],\n [xyc1 + zs, yyc1 + c, yzc1 - xs],\n [xzc1 - ys, yzc1 + xs, zzc1 + c]])\n\n return self.multiply(mat, in_place)\n\n\n def _rotate_x_neg_rad(self, rotation_rad, in_place=False):\n self.rotate_x_rad(-rotation_rad, in_place)\n\n def _rotate_y_neg_rad(self, rotation_rad, in_place=False):\n self.rotate_y_rad(-rotation_rad, in_place)\n\n def _rotate_z_neg_rad(self, rotation_rad, in_place=False):\n self.rotate_z_rad(-rotation_rad, in_place)\n\n def rotate_x_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_3x3(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [1, 0, 0],\n [0, c,-s],\n [0, s, c]])\n\n return self.multiply(mat, in_place)\n\n def rotate_y_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_3x3(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c, 0, s],\n [ 0, 1, 0],\n [-s, 0, c]])\n\n return self.multiply(mat, in_place)\n\n def rotate_z_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_3x3(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c,-s, 0],\n [ s, c, 0],\n [ 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_x_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_x_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_y_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_y_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_z_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_z_rad(rotation_deg * (pi/180), in_place)\n\n def invert(self, in_place=False):\n '''\n Inverts the current rotation matrix\n\n Parameters\n ----------\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n\n if in_place:\n self.m = np.linalg.inv(self.m)\n return self\n else:\n return Matrix_3x3(np.linalg.inv(self.m))\n\n def multiply(self,mat, in_place=False, reverse_order=False):\n '''\n Multiplies the current rotation matrix by mat\n\n Parameters\n ----------\n mat : Matrix_3x3 or array_like\n multiplier matrix or 3D vector\n in_place: bool, optional\n - True: the internal matrix is changed in-place\n - False: a new matrix is returned and the current one is not changed (default) \n reverse_order: bool, optional\n - False: self * mat\n - True: mat * self\n \n Returns\n -------\n result : Matrix_3x3 | array_like\n Matrix_3x3 is returned if mat is a matrix (self is returned if in_place is True); \n a 3D vector is returned if mat is a vector\n '''\n # get array from matrix object or convert to numpy array (if needed) \n mat = mat.m if type(mat) == Matrix_3x3 else np.asarray(mat)\n\n a,b = (mat, self.m) if reverse_order else (self.m, mat)\n\n if mat.ndim == 1: \n return np.matmul(a, b) # multiplication by 3D vector\n elif in_place:\n np.matmul(a, b, self.m) # multiplication by matrix, in place\n return self\n else: # multiplication by matrix, return new Matrix_3x3\n return Matrix_3x3(np.matmul(a, b))"
}
] | from math import asin, atan2, pi, sqrt
from math_ops.Math_Ops import Math_Ops as M
from math_ops.Matrix_3x3 import Matrix_3x3
import numpy as np | 7,569 |
class Matrix_4x4():
def __init__(self, matrix = None) -> None:
'''
Constructor examples:
a = Matrix_4x4( ) # create identity matrix
b = Matrix_4x4( [[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4]] ) # manually initialize matrix
c = Matrix_4x4( [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4] ) # manually initialize matrix
d = Matrix_4x4( b ) # copy constructor
'''
if matrix is None:
self.m = np.identity(4)
elif type(matrix) == Matrix_4x4:
self.m = np.copy(matrix.m)
|
class Matrix_4x4():
def __init__(self, matrix = None) -> None:
'''
Constructor examples:
a = Matrix_4x4( ) # create identity matrix
b = Matrix_4x4( [[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4]] ) # manually initialize matrix
c = Matrix_4x4( [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4] ) # manually initialize matrix
d = Matrix_4x4( b ) # copy constructor
'''
if matrix is None:
self.m = np.identity(4)
elif type(matrix) == Matrix_4x4:
self.m = np.copy(matrix.m) | elif type(matrix) == Matrix_3x3: | 1 | 2023-12-16 23:40:23+00:00 | 12k |
Sam-Izdat/tinycio | src/tinycio/util/colorutil.py | [
{
"identifier": "Float2",
"path": "src/tinycio/numerics/vector.py",
"snippet": "class Float2(np.ndarray):\n \"\"\"\n Float2 type using numpy.ndarray.\n \"\"\"\n def __new__(cls, *args):\n if len(args) == 1:\n if isinstance(args[0], list) or isinstance(args[0], tuple):\n assert len(args[0]) == 2, \"list/tuple must have 2 components\"\n arr = np.asarray([args[0][0], args[0][1]], dtype=np.float32).view(cls)\n elif isinstance(args[0], np.ndarray):\n assert len(args[0].squeeze().shape) == 1 and args[0].shape[0] == 2, \\\n \"numpy array must be sized [C=2] or [C=2, H=1, W=1]\"\n arr = np.asarray(args[0].squeeze(), dtype=np.float32).view(cls)\n elif torch.is_tensor(args[0]):\n assert len(args[0].squeeze().size()) == 1 and args[0].size(0) == 2, \\\n \"torch tensor must be sized [C=2] or [C=2, H=1, W=1]\"\n value = args[0].squeeze().float().cpu()\n arr = np.asarray([value[0].item(), value[1].item()], dtype=np.float32).view(cls)\n else:\n value = float(args[0])\n arr = np.asarray([value, value], dtype=np.float32).view(cls)\n elif len(args) == 2:\n arr = np.asarray(args, dtype=np.float32).view(cls)\n else: \n raise TypeError(\"Float2 only accepts 1 or 2 arguments.\")\n return arr\n\n def list(self) -> list:\n \"\"\"Returns values as Python list\"\"\"\n return [self[0], self[1]]\n\n def tuple(self) -> tuple:\n \"\"\"Returns values as Python tuple\"\"\"\n return (self[0], self[1])\n\n @property\n def x(self) -> float:\n return self[0]\n @x.setter\n def x(self, value):\n self[0] = value\n @property\n def y(self) -> float:\n return self[1]\n @y.setter\n def y(self, value):\n self[1] = value\n @property\n def r(self) -> float:\n return self[0]\n @r.setter\n def r(self, value):\n self[0] = value\n @property\n def g(self) -> float:\n return self[1]\n @g.setter\n def g(self, value):\n self[1] = value\n\n @staticmethod\n def zero():\n \"\"\"Returns numeric type filled with zero values\"\"\"\n return Float2(0., 0.)\n @staticmethod\n def one():\n \"\"\"Returns numeric type filled with one values\"\"\"\n return Float2(1., 1.)\n @staticmethod\n def x_axis():\n \"\"\"Returns numeric type with x-axis set to 1 and all others to 0\"\"\"\n return Float2(1., 0.)\n @staticmethod\n def y_axis():\n \"\"\"Returns numeric type with y-axis set to 1 and all others to 0\"\"\"\n return Float2(0., 1.)\n\n @property\n def xx(self): return Float2(self.x, self.x)\n @property\n def xy(self): return self\n @property\n def yx(self): return Float2(self.y, self.x)\n @property\n def yy(self): return Float2(self.y, self.y)\n\n @property\n def rr(self): return Float2(self.r, self.r)\n @property\n def rg(self): return self\n @property\n def gr(self): return Float2(self.g, self.r)\n @property\n def gg(self): return Float2(self.g, self.g)\n\n @property\n def xxx(self): return Float3(self.x, self.x, self.x)\n @property\n def xxy(self): return Float3(self.x, self.x, self.y)\n @property\n def xyx(self): return Float3(self.x, self.y, self.x)\n @property\n def xyy(self): return Float3(self.x, self.y, self.y)\n @property\n def yxx(self): return Float3(self.y, self.x, self.x)\n @property\n def yxy(self): return Float3(self.y, self.x, self.y)\n @property\n def yyx(self): return Float3(self.y, self.y, self.x)\n @property\n def yyy(self): return Float3(self.y, self.y, self.y)\n\n @property\n def rrr(self): return Float3(self.r, self.r, self.r)\n @property\n def rrg(self): return Float3(self.r, self.r, self.g)\n @property\n def rgr(self): return Float3(self.r, self.g, self.r)\n @property\n def rgg(self): return Float3(self.r, self.g, self.g)\n @property\n def grr(self): return Float3(self.g, self.r, self.r)\n @property\n def grg(self): return Float3(self.g, self.r, self.g)\n @property\n def ggr(self): return Float3(self.g, self.g, self.r)\n @property\n def ggg(self): return Float3(self.g, self.g, self.g)\n\n @property\n def xxxx(self): return Float4(self.x, self.x, self.x, self.x)\n @property\n def xxxy(self): return Float4(self.x, self.x, self.x, self.y)\n @property\n def xxyx(self): return Float4(self.x, self.x, self.y, self.x)\n @property\n def xxyy(self): return Float4(self.x, self.x, self.y, self.y)\n @property\n def xyxx(self): return Float4(self.x, self.y, self.x, self.x)\n @property\n def xyxy(self): return Float4(self.x, self.y, self.x, self.y)\n @property\n def xyyx(self): return Float4(self.x, self.y, self.y, self.x)\n @property\n def xyyy(self): return Float4(self.x, self.y, self.y, self.y)\n @property\n def yxxx(self): return Float4(self.y, self.x, self.x, self.x)\n @property\n def yxxy(self): return Float4(self.y, self.x, self.x, self.y)\n @property\n def yxyx(self): return Float4(self.y, self.x, self.y, self.x)\n @property\n def yxyy(self): return Float4(self.y, self.x, self.y, self.y)\n @property\n def yyxx(self): return Float4(self.y, self.y, self.x, self.x)\n @property\n def yyxy(self): return Float4(self.y, self.y, self.x, self.y)\n @property\n def yyyx(self): return Float4(self.y, self.y, self.y, self.x)\n @property\n def yyyy(self): return Float4(self.y, self.y, self.y, self.y)\n\n @property\n def rrrr(self): return Float4(self.r, self.r, self.r, self.r)\n @property\n def rrrg(self): return Float4(self.r, self.r, self.r, self.g)\n @property\n def rrgr(self): return Float4(self.r, self.r, self.g, self.r)\n @property\n def rrgg(self): return Float4(self.r, self.r, self.g, self.g)\n @property\n def rgrr(self): return Float4(self.r, self.g, self.r, self.r)\n @property\n def rgrg(self): return Float4(self.r, self.g, self.r, self.g)\n @property\n def rggr(self): return Float4(self.r, self.g, self.g, self.r)\n @property\n def rggg(self): return Float4(self.r, self.g, self.g, self.g)\n @property\n def grrr(self): return Float4(self.g, self.r, self.r, self.r)\n @property\n def grrg(self): return Float4(self.g, self.r, self.r, self.g)\n @property\n def grgr(self): return Float4(self.g, self.r, self.g, self.r)\n @property\n def grgg(self): return Float4(self.g, self.r, self.g, self.g)\n @property\n def ggrr(self): return Float4(self.g, self.g, self.r, self.r)\n @property\n def ggrg(self): return Float4(self.g, self.g, self.r, self.g)\n @property\n def gggr(self): return Float4(self.g, self.g, self.g, self.r)\n @property\n def gggg(self): return Float4(self.g, self.g, self.g, self.g)"
},
{
"identifier": "Float3",
"path": "src/tinycio/numerics/vector.py",
"snippet": "class Float3(np.ndarray):\n \"\"\"\n Float3 type using numpy.ndarray.\n \"\"\"\n def __new__(cls, *args):\n if len(args) == 1:\n if isinstance(args[0], list) or isinstance(args[0], tuple):\n assert len(args[0]) == 3, \"list/tuple must have 3 components\"\n arr = np.asarray([args[0][0], args[0][1], args[0][2]], dtype=np.float32).view(cls)\n elif isinstance(args[0], np.ndarray):\n assert len(args[0].squeeze().shape) == 1 and args[0].shape[0] == 3, \\\n \"numpy array must be sized [C=3] or [C=3, H=1, W=1]\"\n arr = np.asarray(args[0].squeeze(), dtype=np.float32).view(cls)\n elif torch.is_tensor(args[0]):\n assert len(args[0].squeeze().size()) == 1 and args[0].size(0) == 3, \\\n \"torch tensor must be sized [C=3] or [C=3, H=1, W=1]\"\n value = args[0].squeeze().float().cpu()\n arr = np.asarray([value[0].item(), value[1].item(), value[2].item()], dtype=np.float32).view(cls)\n else:\n value = float(args[0])\n arr = np.asarray([value, value, value], dtype=np.float32).view(cls)\n elif len(args) == 3:\n arr = np.asarray(args, dtype=np.float32).view(cls)\n else: \n raise TypeError(\"Float3 only accepts 1 or 3 arguments.\")\n return arr\n\n def list(self) -> list:\n \"\"\"Returns values as Python list\"\"\"\n return [self[0], self[1], self[2]]\n\n def tuple(self) -> tuple:\n \"\"\"Returns values as Python tuple\"\"\"\n return (self[0], self[1], self[2])\n\n @property\n def x(self) -> float:\n return self[0]\n @x.setter\n def x(self, value):\n self[0] = value\n @property\n def y(self) -> float:\n return self[1]\n @y.setter\n def y(self, value):\n self[1] = value\n @property\n def z(self) -> float:\n return self[2]\n @z.setter\n def z(self, value):\n self[2] = value\n @property\n def r(self) -> float:\n return self[0]\n @r.setter\n def r(self, value):\n self[0] = value\n @property\n def g(self) -> float:\n return self[1]\n @g.setter\n def g(self, value):\n self[1] = value\n @property\n def b(self) -> float:\n return self[2]\n @b.setter\n def b(self, value):\n self[2] = value\n @staticmethod\n def zero():\n \"\"\"Returns numeric type filled with zero values\"\"\"\n return Float3(0., 0., 0.)\n @staticmethod\n def one():\n \"\"\"Returns numeric type filled with one values\"\"\"\n return Float3(1., 1., 1.)\n @staticmethod\n def x_axis():\n \"\"\"Returns numeric type with x-axis set to 1 and all others to 0\"\"\"\n return Float3(1., 0., 0.)\n @staticmethod\n def y_axis():\n \"\"\"Returns numeric type with y-axis set to 1 and all others to 0\"\"\"\n return Float3(0., 1., 0.)\n @staticmethod\n def z_axis():\n \"\"\"Returns numeric type with z-axis set to 1 and all others to 0\"\"\"\n return Float3(0., 0., 1.)\n\n @property\n def xx(self): return Float2(self.x, self.x)\n @property\n def xy(self): return Float2(self.x, self.y)\n @property\n def xz(self): return Float2(self.x, self.z)\n @property\n def yx(self): return Float2(self.y, self.x)\n @property\n def yy(self): return Float2(self.y, self.y)\n @property\n def yz(self): return Float2(self.y, self.z)\n @property\n def zx(self): return Float2(self.z, self.x)\n @property\n def zy(self): return Float2(self.z, self.y)\n @property\n def zz(self): return Float2(self.z, self.z)\n\n @property\n def rr(self): return Float2(self.r, self.r)\n @property\n def rg(self): return Float2(self.r, self.g)\n @property\n def rb(self): return Float2(self.r, self.b)\n @property\n def gr(self): return Float2(self.g, self.r)\n @property\n def gg(self): return Float2(self.g, self.g)\n @property\n def gb(self): return Float2(self.g, self.b)\n @property\n def br(self): return Float2(self.b, self.r)\n @property\n def bg(self): return Float2(self.b, self.g)\n @property\n def bb(self): return Float2(self.b, self.b)\n\n @property\n def xxx(self): return Float3(self.x, self.x, self.x)\n @property\n def xxy(self): return Float3(self.x, self.x, self.y)\n @property\n def xxz(self): return Float3(self.x, self.x, self.z)\n @property\n def xyx(self): return Float3(self.x, self.y, self.x)\n @property\n def xyy(self): return Float3(self.x, self.y, self.y)\n @property\n def xyz(self): return self\n @property\n def xzx(self): return Float3(self.x, self.z, self.x)\n @property\n def xzy(self): return Float3(self.x, self.z, self.y)\n @property\n def xzz(self): return Float3(self.x, self.z, self.z)\n @property\n def yxx(self): return Float3(self.y, self.x, self.x)\n @property\n def yxy(self): return Float3(self.y, self.x, self.y)\n @property\n def yxz(self): return Float3(self.y, self.x, self.z)\n @property\n def yyx(self): return Float3(self.y, self.y, self.x)\n @property\n def yyy(self): return Float3(self.y, self.y, self.y)\n @property\n def yyz(self): return Float3(self.y, self.y, self.z)\n @property\n def yzx(self): return Float3(self.y, self.z, self.x)\n @property\n def yzy(self): return Float3(self.y, self.z, self.y)\n @property\n def yzz(self): return Float3(self.y, self.z, self.z)\n @property\n def zxx(self): return Float3(self.z, self.x, self.x)\n @property\n def zxy(self): return Float3(self.z, self.x, self.y)\n @property\n def zxz(self): return Float3(self.z, self.x, self.z)\n @property\n def zyx(self): return Float3(self.z, self.y, self.x)\n @property\n def zyy(self): return Float3(self.z, self.y, self.y)\n @property\n def zyz(self): return Float3(self.z, self.y, self.z)\n @property\n def zzx(self): return Float3(self.z, self.z, self.x)\n @property\n def zzy(self): return Float3(self.z, self.z, self.y)\n @property\n def zzz(self): return Float3(self.z, self.z, self.z)\n\n @property\n def rrr(self): return Float3(self.r, self.r, self.r)\n @property\n def rrg(self): return Float3(self.r, self.r, self.g)\n @property\n def rrb(self): return Float3(self.r, self.r, self.b)\n @property\n def rgr(self): return Float3(self.r, self.g, self.r)\n @property\n def rgg(self): return Float3(self.r, self.g, self.g)\n @property\n def rgb(self): return self\n @property\n def rbr(self): return Float3(self.r, self.b, self.r)\n @property\n def rbg(self): return Float3(self.r, self.b, self.g)\n @property\n def rbb(self): return Float3(self.r, self.b, self.b)\n @property\n def grr(self): return Float3(self.g, self.r, self.r)\n @property\n def grg(self): return Float3(self.g, self.r, self.g)\n @property\n def grb(self): return Float3(self.g, self.r, self.b)\n @property\n def ggr(self): return Float3(self.g, self.g, self.r)\n @property\n def ggg(self): return Float3(self.g, self.g, self.g)\n @property\n def ggb(self): return Float3(self.g, self.g, self.b)\n @property\n def gbr(self): return Float3(self.g, self.b, self.r)\n @property\n def gbg(self): return Float3(self.g, self.b, self.g)\n @property\n def gbb(self): return Float3(self.g, self.b, self.b)\n @property\n def brr(self): return Float3(self.b, self.r, self.r)\n @property\n def brg(self): return Float3(self.b, self.r, self.g)\n @property\n def brb(self): return Float3(self.b, self.r, self.b)\n @property\n def bgr(self): return Float3(self.b, self.g, self.r)\n @property\n def bgg(self): return Float3(self.b, self.g, self.g)\n @property\n def bgb(self): return Float3(self.b, self.g, self.b)\n @property\n def bbr(self): return Float3(self.b, self.b, self.r)\n @property\n def bbg(self): return Float3(self.b, self.b, self.g)\n @property\n def bbb(self): return Float3(self.b, self.b, self.b)\n\n @property\n def xxxx(self): return Float4(self.x, self.x, self.x, self.x)\n @property\n def xxxy(self): return Float4(self.x, self.x, self.x, self.y)\n @property\n def xxxz(self): return Float4(self.x, self.x, self.x, self.z)\n @property\n def xxyx(self): return Float4(self.x, self.x, self.y, self.x)\n @property\n def xxyy(self): return Float4(self.x, self.x, self.y, self.y)\n @property\n def xxyz(self): return Float4(self.x, self.x, self.y, self.z)\n @property\n def xxzx(self): return Float4(self.x, self.x, self.z, self.x)\n @property\n def xxzy(self): return Float4(self.x, self.x, self.z, self.y)\n @property\n def xxzz(self): return Float4(self.x, self.x, self.z, self.z)\n @property\n def xyxx(self): return Float4(self.x, self.y, self.x, self.x)\n @property\n def xyxy(self): return Float4(self.x, self.y, self.x, self.y)\n @property\n def xyxz(self): return Float4(self.x, self.y, self.x, self.z)\n @property\n def xyyx(self): return Float4(self.x, self.y, self.y, self.x)\n @property\n def xyyy(self): return Float4(self.x, self.y, self.y, self.y)\n @property\n def xyyz(self): return Float4(self.x, self.y, self.y, self.z)\n @property\n def xyzx(self): return Float4(self.x, self.y, self.z, self.x)\n @property\n def xyzy(self): return Float4(self.x, self.y, self.z, self.y)\n @property\n def xyzz(self): return Float4(self.x, self.y, self.z, self.z)\n @property\n def xzxx(self): return Float4(self.x, self.z, self.x, self.x)\n @property\n def xzxy(self): return Float4(self.x, self.z, self.x, self.y)\n @property\n def xzxz(self): return Float4(self.x, self.z, self.x, self.z)\n @property\n def xzyx(self): return Float4(self.x, self.z, self.y, self.x)\n @property\n def xzyy(self): return Float4(self.x, self.z, self.y, self.y)\n @property\n def xzyz(self): return Float4(self.x, self.z, self.y, self.z)\n @property\n def xzzx(self): return Float4(self.x, self.z, self.z, self.x)\n @property\n def xzzy(self): return Float4(self.x, self.z, self.z, self.y)\n @property\n def xzzz(self): return Float4(self.x, self.z, self.z, self.z)\n @property\n def yxxx(self): return Float4(self.y, self.x, self.x, self.x)\n @property\n def yxxy(self): return Float4(self.y, self.x, self.x, self.y)\n @property\n def yxxz(self): return Float4(self.y, self.x, self.x, self.z)\n @property\n def yxyx(self): return Float4(self.y, self.x, self.y, self.x)\n @property\n def yxyy(self): return Float4(self.y, self.x, self.y, self.y)\n @property\n def yxyz(self): return Float4(self.y, self.x, self.y, self.z)\n @property\n def yxzx(self): return Float4(self.y, self.x, self.z, self.x)\n @property\n def yxzy(self): return Float4(self.y, self.x, self.z, self.y)\n @property\n def yxzz(self): return Float4(self.y, self.x, self.z, self.z)\n @property\n def yyxx(self): return Float4(self.y, self.y, self.x, self.x)\n @property\n def yyxy(self): return Float4(self.y, self.y, self.x, self.y)\n @property\n def yyxz(self): return Float4(self.y, self.y, self.x, self.z)\n @property\n def yyyx(self): return Float4(self.y, self.y, self.y, self.x)\n @property\n def yyyy(self): return Float4(self.y, self.y, self.y, self.y)\n @property\n def yyyz(self): return Float4(self.y, self.y, self.y, self.z)\n @property\n def yyzx(self): return Float4(self.y, self.y, self.z, self.x)\n @property\n def yyzy(self): return Float4(self.y, self.y, self.z, self.y)\n @property\n def yyzz(self): return Float4(self.y, self.y, self.z, self.z)\n @property\n def yzxx(self): return Float4(self.y, self.z, self.x, self.x)\n @property\n def yzxy(self): return Float4(self.y, self.z, self.x, self.y)\n @property\n def yzxz(self): return Float4(self.y, self.z, self.x, self.z)\n @property\n def yzyx(self): return Float4(self.y, self.z, self.y, self.x)\n @property\n def yzyy(self): return Float4(self.y, self.z, self.y, self.y)\n @property\n def yzyz(self): return Float4(self.y, self.z, self.y, self.z)\n @property\n def yzzx(self): return Float4(self.y, self.z, self.z, self.x)\n @property\n def yzzy(self): return Float4(self.y, self.z, self.z, self.y)\n @property\n def yzzz(self): return Float4(self.y, self.z, self.z, self.z)\n @property\n def zxxx(self): return Float4(self.z, self.x, self.x, self.x)\n @property\n def zxxy(self): return Float4(self.z, self.x, self.x, self.y)\n @property\n def zxxz(self): return Float4(self.z, self.x, self.x, self.z)\n @property\n def zxyx(self): return Float4(self.z, self.x, self.y, self.x)\n @property\n def zxyy(self): return Float4(self.z, self.x, self.y, self.y)\n @property\n def zxyz(self): return Float4(self.z, self.x, self.y, self.z)\n @property\n def zxzx(self): return Float4(self.z, self.x, self.z, self.x)\n @property\n def zxzy(self): return Float4(self.z, self.x, self.z, self.y)\n @property\n def zxzz(self): return Float4(self.z, self.x, self.z, self.z)\n @property\n def zyxx(self): return Float4(self.z, self.y, self.x, self.x)\n @property\n def zyxy(self): return Float4(self.z, self.y, self.x, self.y)\n @property\n def zyxz(self): return Float4(self.z, self.y, self.x, self.z)\n @property\n def zyyx(self): return Float4(self.z, self.y, self.y, self.x)\n @property\n def zyyy(self): return Float4(self.z, self.y, self.y, self.y)\n @property\n def zyyz(self): return Float4(self.z, self.y, self.y, self.z)\n @property\n def zyzx(self): return Float4(self.z, self.y, self.z, self.x)\n @property\n def zyzy(self): return Float4(self.z, self.y, self.z, self.y)\n @property\n def zyzz(self): return Float4(self.z, self.y, self.z, self.z)\n @property\n def zzxx(self): return Float4(self.z, self.z, self.x, self.x)\n @property\n def zzxy(self): return Float4(self.z, self.z, self.x, self.y)\n @property\n def zzxz(self): return Float4(self.z, self.z, self.x, self.z)\n @property\n def zzyx(self): return Float4(self.z, self.z, self.y, self.x)\n @property\n def zzyy(self): return Float4(self.z, self.z, self.y, self.y)\n @property\n def zzyz(self): return Float4(self.z, self.z, self.y, self.z)\n @property\n def zzzx(self): return Float4(self.z, self.z, self.z, self.x)\n @property\n def zzzy(self): return Float4(self.z, self.z, self.z, self.y)\n @property\n def zzzz(self): return Float4(self.z, self.z, self.z, self.z)\n\n @property\n def rrrr(self): return Float4(self.r, self.r, self.r, self.r)\n @property\n def rrrg(self): return Float4(self.r, self.r, self.r, self.g)\n @property\n def rrrb(self): return Float4(self.r, self.r, self.r, self.b)\n @property\n def rrgr(self): return Float4(self.r, self.r, self.g, self.r)\n @property\n def rrgg(self): return Float4(self.r, self.r, self.g, self.g)\n @property\n def rrgb(self): return Float4(self.r, self.r, self.g, self.b)\n @property\n def rrbr(self): return Float4(self.r, self.r, self.b, self.r)\n @property\n def rrbg(self): return Float4(self.r, self.r, self.b, self.g)\n @property\n def rrbb(self): return Float4(self.r, self.r, self.b, self.b)\n @property\n def rgrr(self): return Float4(self.r, self.g, self.r, self.r)\n @property\n def rgrg(self): return Float4(self.r, self.g, self.r, self.g)\n @property\n def rgrb(self): return Float4(self.r, self.g, self.r, self.b)\n @property\n def rggr(self): return Float4(self.r, self.g, self.g, self.r)\n @property\n def rggg(self): return Float4(self.r, self.g, self.g, self.g)\n @property\n def rggb(self): return Float4(self.r, self.g, self.g, self.b)\n @property\n def rgbr(self): return Float4(self.r, self.g, self.b, self.r)\n @property\n def rgbg(self): return Float4(self.r, self.g, self.b, self.g)\n @property\n def rgbb(self): return Float4(self.r, self.g, self.b, self.b)\n @property\n def rbrr(self): return Float4(self.r, self.b, self.r, self.r)\n @property\n def rbrg(self): return Float4(self.r, self.b, self.r, self.g)\n @property\n def rbrb(self): return Float4(self.r, self.b, self.r, self.b)\n @property\n def rbgr(self): return Float4(self.r, self.b, self.g, self.r)\n @property\n def rbgg(self): return Float4(self.r, self.b, self.g, self.g)\n @property\n def rbgb(self): return Float4(self.r, self.b, self.g, self.b)\n @property\n def rbbr(self): return Float4(self.r, self.b, self.b, self.r)\n @property\n def rbbg(self): return Float4(self.r, self.b, self.b, self.g)\n @property\n def rbbb(self): return Float4(self.r, self.b, self.b, self.b)\n @property\n def grrr(self): return Float4(self.g, self.r, self.r, self.r)\n @property\n def grrg(self): return Float4(self.g, self.r, self.r, self.g)\n @property\n def grrb(self): return Float4(self.g, self.r, self.r, self.b)\n @property\n def grgr(self): return Float4(self.g, self.r, self.g, self.r)\n @property\n def grgg(self): return Float4(self.g, self.r, self.g, self.g)\n @property\n def grgb(self): return Float4(self.g, self.r, self.g, self.b)\n @property\n def grbr(self): return Float4(self.g, self.r, self.b, self.r)\n @property\n def grbg(self): return Float4(self.g, self.r, self.b, self.g)\n @property\n def grbb(self): return Float4(self.g, self.r, self.b, self.b)\n @property\n def ggrr(self): return Float4(self.g, self.g, self.r, self.r)\n @property\n def ggrg(self): return Float4(self.g, self.g, self.r, self.g)\n @property\n def ggrb(self): return Float4(self.g, self.g, self.r, self.b)\n @property\n def gggr(self): return Float4(self.g, self.g, self.g, self.r)\n @property\n def gggg(self): return Float4(self.g, self.g, self.g, self.g)\n @property\n def gggb(self): return Float4(self.g, self.g, self.g, self.b)\n @property\n def ggbr(self): return Float4(self.g, self.g, self.b, self.r)\n @property\n def ggbg(self): return Float4(self.g, self.g, self.b, self.g)\n @property\n def ggbb(self): return Float4(self.g, self.g, self.b, self.b)\n @property\n def gbrr(self): return Float4(self.g, self.b, self.r, self.r)\n @property\n def gbrg(self): return Float4(self.g, self.b, self.r, self.g)\n @property\n def gbrb(self): return Float4(self.g, self.b, self.r, self.b)\n @property\n def gbgr(self): return Float4(self.g, self.b, self.g, self.r)\n @property\n def gbgg(self): return Float4(self.g, self.b, self.g, self.g)\n @property\n def gbgb(self): return Float4(self.g, self.b, self.g, self.b)\n @property\n def gbbr(self): return Float4(self.g, self.b, self.b, self.r)\n @property\n def gbbg(self): return Float4(self.g, self.b, self.b, self.g)\n @property\n def gbbb(self): return Float4(self.g, self.b, self.b, self.b)\n @property\n def brrr(self): return Float4(self.b, self.r, self.r, self.r)\n @property\n def brrg(self): return Float4(self.b, self.r, self.r, self.g)\n @property\n def brrb(self): return Float4(self.b, self.r, self.r, self.b)\n @property\n def brgr(self): return Float4(self.b, self.r, self.g, self.r)\n @property\n def brgg(self): return Float4(self.b, self.r, self.g, self.g)\n @property\n def brgb(self): return Float4(self.b, self.r, self.g, self.b)\n @property\n def brbr(self): return Float4(self.b, self.r, self.b, self.r)\n @property\n def brbg(self): return Float4(self.b, self.r, self.b, self.g)\n @property\n def brbb(self): return Float4(self.b, self.r, self.b, self.b)\n @property\n def bgrr(self): return Float4(self.b, self.g, self.r, self.r)\n @property\n def bgrg(self): return Float4(self.b, self.g, self.r, self.g)\n @property\n def bgrb(self): return Float4(self.b, self.g, self.r, self.b)\n @property\n def bggr(self): return Float4(self.b, self.g, self.g, self.r)\n @property\n def bggg(self): return Float4(self.b, self.g, self.g, self.g)\n @property\n def bggb(self): return Float4(self.b, self.g, self.g, self.b)\n @property\n def bgbr(self): return Float4(self.b, self.g, self.b, self.r)\n @property\n def bgbg(self): return Float4(self.b, self.g, self.b, self.g)\n @property\n def bgbb(self): return Float4(self.b, self.g, self.b, self.b)\n @property\n def bbrr(self): return Float4(self.b, self.b, self.r, self.r)\n @property\n def bbrg(self): return Float4(self.b, self.b, self.r, self.g)\n @property\n def bbrb(self): return Float4(self.b, self.b, self.r, self.b)\n @property\n def bbgr(self): return Float4(self.b, self.b, self.g, self.r)\n @property\n def bbgg(self): return Float4(self.b, self.b, self.g, self.g)\n @property\n def bbgb(self): return Float4(self.b, self.b, self.g, self.b)\n @property\n def bbbr(self): return Float4(self.b, self.b, self.b, self.r)\n @property\n def bbbg(self): return Float4(self.b, self.b, self.b, self.g)\n @property\n def bbbb(self): return Float4(self.b, self.b, self.b, self.b)"
}
] | import typing
import torch
import numpy as np
from typing import Union
from ..numerics import Float2, Float3 | 9,916 | from __future__ import annotations
def srgb_luminance(im_srgb:Union[torch.Tensor, ColorImage]) -> torch.Tensor:
"""
Return relative luminance of linear sRGB image.
:param im_srgb: [C=3, H, W] color image tensor in sRGB color space
:type im_srgb: torch.Tensor | ColorImage
:return: [C=1, H, W] image tensor
"""
lum_r, lum_g, lum_b = 0.2126, 0.7152, 0.0722
return lum_r * im_srgb[0:1,...] + lum_g * im_srgb[1:2,...] + lum_b * im_srgb[2:3,...]
def apply_gamma(im:Union[torch.Tensor, ColorImage], gamma:float) -> torch.Tensor:
"""
Apply arbitrary gamma correction.
:param im: Image tensor
:type im: torch.Tensor | ColorImage
:param gamma: Gamma correction (should be in the range [0.1, 10.0])
:return: Gamma-corrected image tensor
"""
if gamma == 1.: return im
assert 0.1 <= gamma <= 10.0, "gamma value should be in range [0.1, 10.0]"
im = torch.pow(im, gamma)
def apply_hue_oklab(im_oklab:Union[torch.Tensor, ColorImage], hue_delta:float) -> torch.Tensor:
"""
Manually shift hue of an image by a -1 to +1 delta value.
:param im_oklab: Image tensor in OKLAB color space
:type im_oklab: torch.Tensor | ColorImage
:param hue_delta: Hue shift value in the range [-1., 1.]
:return: Image tensor in OKLAB color space with adjusted hue
"""
assert -1. <= hue_delta <= 1., "hue_delta value should be in range [-1., 1.]"
L, a, b = im_oklab[0:1], im_oklab[1:2], im_oklab[2:3]
hue_delta = ((hue_delta * 0.5) % 1.) * 2. * torch.pi
# Calculate angle and magnitude in the a-b plane
angle = torch.atan2(b, a)
magnitude = torch.sqrt(a**2 + b**2)
# Apply hue correction
angle += hue_delta
# Convert back to Cartesian coordinates
a_corrected = magnitude * torch.cos(angle)
b_corrected = magnitude * torch.sin(angle)
corrected = torch.cat([L, a_corrected, b_corrected], dim=0)
return corrected
return im
| from __future__ import annotations
def srgb_luminance(im_srgb:Union[torch.Tensor, ColorImage]) -> torch.Tensor:
"""
Return relative luminance of linear sRGB image.
:param im_srgb: [C=3, H, W] color image tensor in sRGB color space
:type im_srgb: torch.Tensor | ColorImage
:return: [C=1, H, W] image tensor
"""
lum_r, lum_g, lum_b = 0.2126, 0.7152, 0.0722
return lum_r * im_srgb[0:1,...] + lum_g * im_srgb[1:2,...] + lum_b * im_srgb[2:3,...]
def apply_gamma(im:Union[torch.Tensor, ColorImage], gamma:float) -> torch.Tensor:
"""
Apply arbitrary gamma correction.
:param im: Image tensor
:type im: torch.Tensor | ColorImage
:param gamma: Gamma correction (should be in the range [0.1, 10.0])
:return: Gamma-corrected image tensor
"""
if gamma == 1.: return im
assert 0.1 <= gamma <= 10.0, "gamma value should be in range [0.1, 10.0]"
im = torch.pow(im, gamma)
def apply_hue_oklab(im_oklab:Union[torch.Tensor, ColorImage], hue_delta:float) -> torch.Tensor:
"""
Manually shift hue of an image by a -1 to +1 delta value.
:param im_oklab: Image tensor in OKLAB color space
:type im_oklab: torch.Tensor | ColorImage
:param hue_delta: Hue shift value in the range [-1., 1.]
:return: Image tensor in OKLAB color space with adjusted hue
"""
assert -1. <= hue_delta <= 1., "hue_delta value should be in range [-1., 1.]"
L, a, b = im_oklab[0:1], im_oklab[1:2], im_oklab[2:3]
hue_delta = ((hue_delta * 0.5) % 1.) * 2. * torch.pi
# Calculate angle and magnitude in the a-b plane
angle = torch.atan2(b, a)
magnitude = torch.sqrt(a**2 + b**2)
# Apply hue correction
angle += hue_delta
# Convert back to Cartesian coordinates
a_corrected = magnitude * torch.cos(angle)
b_corrected = magnitude * torch.sin(angle)
corrected = torch.cat([L, a_corrected, b_corrected], dim=0)
return corrected
return im
| def col_hsv_to_rgb(hsv:Union[Float3, Color]) -> Float3: | 1 | 2023-12-15 15:39:08+00:00 | 12k |
quocanh34/magic-animate-modified | magicanimate/models/unet_controlnet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "magicanimate/models/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from magicanimate.models.unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
from diffusers.utils import WEIGHTS_NAME
import os
import json
import torch
import torch.nn as nn
import torch.utils.checkpoint | 9,105 | # up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and motion_module_mid_block,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False): | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 4 | 2023-12-15 01:22:37+00:00 | 12k |
cvlab-yonsei/RankMixup | tools/train_net.py | [
{
"identifier": "Trainer",
"path": "calibrate/engine/trainer.py",
"snippet": "class Trainer:\n def __init__(self, cfg: DictConfig) -> None:\n self.cfg = cfg\n self.work_dir = self.cfg.work_dir\n self.device = torch.device(self.cfg.device)\n self.build_data_loader()\n self.build_model()\n self.build_solver()\n self.build_meter()\n self.init_wandb_or_not()\n\n def build_data_loader(self) -> None:\n # data pipeline\n self.train_loader, self.val_loader = instantiate(self.cfg.data.object.trainval)\n logger.info(\"Data pipeline initialized\")\n\n def build_model(self) -> None:\n # network\n self.model = instantiate(self.cfg.model.object)\n self.model.to(self.device)\n if hasattr(self.cfg.loss, 'num_classes'):\n self.cfg.loss.num_classes = self.cfg.model.num_classes\n self.loss_func = instantiate(self.cfg.loss.object)\n self.loss_func.to(self.device)\n logger.info(self.loss_func)\n logger.info(\"Model initialized\")\n self.mixup = self.cfg.train.mixup\n\n def build_solver(self) -> None:\n # build solver\n parameters = [\n {\"params\": self.model.parameters(), \"lr\": self.cfg.optim.lr},\n ]\n if self.cfg.optim.name == 'sgd':\n self.optimizer = torch.optim.SGD(parameters, momentum=self.cfg.optim.momentum, weight_decay=self.cfg.optim.weight_decay)\n else:\n raise NotImplementedError\n self.scheduler = instantiate(\n self.cfg.scheduler.object, self.optimizer\n )\n logger.info(\"Solver initialized\")\n\n def init_wandb_or_not(self) -> None:\n if self.cfg.wandb.enable:\n wandb.init(\n project=self.cfg.wandb.project,\n entity=self.cfg.wandb.entity,\n config=OmegaConf.to_container(self.cfg, resolve=True),\n tags=[\"train\"],\n )\n wandb.run.name = \"{}-{}-{}\".format(\n wandb.run.id, self.cfg.model.name, self.cfg.loss.name\n )\n wandb.run.save()\n wandb.watch(self.model, log=None)\n logger.info(\"Wandb initialized : {}\".format(wandb.run.name))\n\n def start_or_resume(self):\n if self.cfg.train.resume:\n self.start_epoch, self.best_epoch, self.best_score = (\n load_train_checkpoint(\n self.work_dir, self.device, self.model,\n optimizer=self.optimizer,\n scheduler=self.scheduler\n )\n )\n else:\n self.start_epoch, self.best_epoch, self.best_score = 0, -1, None\n self.max_epoch = self.cfg.train.max_epoch\n\n def build_meter(self):\n self.batch_time_meter = AverageMeter()\n self.data_time_meter = AverageMeter()\n self.num_classes = self.cfg.model.num_classes\n if hasattr(self.loss_func, \"names\"):\n self.loss_meter = LossMeter(\n num_terms=len(self.loss_func.names),\n names=self.loss_func.names\n )\n else:\n self.loss_meter = LossMeter()\n if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt':\n self.evaluator = LT_ClassificationEvaluator(self.num_classes)\n else:\n self.evaluator = ClassificationEvaluator(self.num_classes)\n self.calibrate_evaluator = CalibrateEvaluator(\n self.num_classes,\n num_bins=self.cfg.calibrate.num_bins,\n device=self.device,\n )\n self.logits_evaluator = LogitsEvaluator()\n # self.probs_evaluator = ProbsEvaluator(self.num_classes)\n\n def reset_meter(self):\n self.batch_time_meter.reset()\n self.data_time_meter.reset()\n self.loss_meter.reset()\n self.evaluator.reset()\n self.calibrate_evaluator.reset()\n self.logits_evaluator.reset()\n\n def log_iter_info(self, iter, max_iter, epoch, phase=\"Train\"):\n log_dict = {}\n log_dict[\"data_time\"] = self.data_time_meter.val\n log_dict[\"batch_time\"] = self.batch_time_meter.val\n log_dict.update(self.loss_meter.get_vals())\n log_dict.update(self.evaluator.curr_score())\n log_dict.update(self.logits_evaluator.curr_score())\n # log_dict.update(self.probs_evaluator.curr_score())\n logger.info(\"{} Iter[{}/{}][{}]\\t{}\".format(\n phase, iter + 1, max_iter, epoch + 1,\n json.dumps(round_dict(log_dict))\n ))\n if self.cfg.wandb.enable and phase.lower() == \"train\":\n wandb_log_dict = {\"iter\": epoch * max_iter + iter}\n wandb_log_dict.update(dict(\n (\"{}/Iter/{}\".format(phase, key), value) for (key, value) in log_dict.items()\n ))\n wandb.log(wandb_log_dict)\n\n def log_epoch_info(self, epoch, phase=\"Train\"):\n log_dict = {}\n log_dict[\"samples\"] = self.evaluator.num_samples()\n log_dict[\"lr\"] = get_lr(self.optimizer)\n log_dict.update(self.loss_meter.get_avgs())\n if isinstance(self.loss_func, LogitMarginL1):\n log_dict[\"alpha\"] = self.loss_func.alpha\n metric, table_data = self.evaluator.mean_score(print=False)\n log_dict.update(metric)\n log_dict.update(self.logits_evaluator.mean_score())\n # log_dict.update(self.probs_evaluator.mean_score())\n logger.info(\"{} Epoch[{}]\\t{}\".format(\n phase, epoch + 1, json.dumps(round_dict(log_dict))\n ))\n if self.cfg.wandb.enable:\n wandb_log_dict = {\"epoch\": epoch}\n wandb_log_dict.update(dict(\n (\"{}/{}\".format(phase, key), value) for (key, value) in log_dict.items()\n ))\n if phase.lower() != \"train\":\n wandb_log_dict[\"{}/score_table\".format(phase)] = wandb.Table(\n columns=table_data[0], data=table_data[1:]\n )\n wandb.log(wandb_log_dict)\n\n def log_eval_epoch_info(self, epoch, phase=\"Val\"):\n log_dict = {}\n log_dict[\"samples\"] = self.evaluator.num_samples()\n log_dict.update(self.loss_meter.get_avgs())\n classify_metric, classify_table_data = self.evaluator.mean_score(print=False)\n log_dict.update(classify_metric)\n calibrate_metric, calibrate_table_data = self.calibrate_evaluator.mean_score(print=False)\n log_dict.update(calibrate_metric)\n log_dict.update(self.logits_evaluator.mean_score())\n # log_dict.update(self.probs_evaluator.mean_score())\n logger.info(\"{} Epoch[{}]\\t{}\".format(\n phase, epoch + 1, json.dumps(round_dict(log_dict))\n ))\n logger.info(\"\\n\" + AsciiTable(classify_table_data).table)\n logger.info(\"\\n\" + AsciiTable(calibrate_table_data).table)\n if self.cfg.wandb.enable:\n wandb_log_dict = {\"epoch\": epoch}\n wandb_log_dict.update(dict(\n (\"{}/{}\".format(phase, key), value) for (key, value) in log_dict.items()\n ))\n wandb_log_dict[\"{}/classify_score_table\".format(phase)] = (\n wandb.Table(\n columns=classify_table_data[0],\n data=classify_table_data[1:]\n )\n )\n wandb_log_dict[\"{}/calibrate_score_table\".format(phase)] = (\n wandb.Table(\n columns=calibrate_table_data[0],\n data=calibrate_table_data[1:]\n )\n )\n if \"test\" in phase.lower() and self.cfg.calibrate.visualize:\n fig_reliab, fig_hist = self.calibrate_evaluator.plot_reliability_diagram()\n wandb_log_dict[\"{}/calibrate_reliability\".format(phase)] = fig_reliab\n wandb_log_dict[\"{}/confidence_histogram\".format(phase)] = fig_hist\n wandb.log(wandb_log_dict)\n\n def mixup_data(self, x, y, alpha=1.0, use_cuda=True):\n import numpy as np\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n \n return mixed_x, y_a, y_b, lam\n\n\n def mixup_criterion(self, criterion, pred, y_a, y_b, lam):\n return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n\n \n \n def train_epoch(self, epoch: int):\n self.reset_meter()\n self.model.train()\n\n if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt':\n class_num = torch.zeros(self.num_classes).cuda()\n correct = torch.zeros(self.num_classes).cuda()\n\n max_iter = len(self.train_loader)\n # max_iter = len(self.val_loader)\n\n end = time.time()\n for i, (inputs, labels) in enumerate(self.train_loader): #self.train_loader\n # compute the time for data loading\n self.data_time_meter.update(time.time() - end)\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n # forward\n if self.mixup: \n outputs = self.model(inputs)\n mixup, target_re, lam = self.model.forward_multimixup(inputs, labels)\n\n loss = self.loss_func(outputs, labels, mixup, target_re, lam)\n else:\n outputs = self.model(inputs)\n loss = self.loss_func(outputs, labels) \n \n if isinstance(loss, tuple):\n loss_total = loss[0]\n else:\n loss_total = loss\n # backward\n self.optimizer.zero_grad()\n loss_total.backward()\n if self.cfg.train.clip_grad_norm:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 2)\n self.optimizer.step()\n # metric\n self.loss_meter.update(loss, inputs.size(0))\n predicts = F.softmax(outputs, dim=1)\n if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt':\n _, predicted = predicts.max(1)\n target_one_hot = F.one_hot(labels, self.num_classes)\n predict_one_hot = F.one_hot(predicted, self.num_classes)\n class_num = class_num + target_one_hot.sum(dim=0).to(torch.float)\n correct = correct + (target_one_hot + predict_one_hot == 2).sum(dim=0).to(torch.float)\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels), to_numpy(correct), to_numpy(class_num),\n self.cfg.data.head_class_idx, self.cfg.data.med_class_idx, self.cfg.data.tail_class_idx \n )\n else:\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels)\n )\n # pred_labels = torch.argmax(predicts, dim=1)\n self.logits_evaluator.update(to_numpy(outputs))\n # self.probs_evaluator.update(to_numpy(predicts))\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n if (i + 1) % self.cfg.log_period == 0:\n self.log_iter_info(i, max_iter, epoch)\n end = time.time()\n self.log_epoch_info(epoch)\n\n @torch.no_grad()\n def eval_epoch(\n self, data_loader, epoch,\n phase=\"Val\",\n temp=1.0,\n post_temp=False\n ):\n self.reset_meter()\n self.model.eval()\n\n if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt':\n class_num = torch.zeros(self.num_classes).cuda()\n correct = torch.zeros(self.num_classes).cuda()\n max_iter = len(data_loader)\n end = time.time()\n \n for i, (inputs, labels) in enumerate(data_loader):\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n # forward\n if self.mixup: \n outputs = self.model(inputs)\n mixup, target_re, lam = self.model.forward_multimixup(inputs, labels)\n\n loss = self.loss_func(outputs, labels, mixup, target_re, lam)\n else:\n outputs = self.model(inputs)\n loss = self.loss_func(outputs, labels) \n\n # metric\n self.loss_meter.update(loss)\n self.calibrate_evaluator.update(outputs, labels)\n self.logits_evaluator.update(to_numpy(outputs))\n predicts = F.softmax(outputs, dim=1) \n if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt':\n _, predicted = predicts.max(1)\n target_one_hot = F.one_hot(labels, self.num_classes)\n predict_one_hot = F.one_hot(predicted, self.num_classes)\n class_num = class_num + target_one_hot.sum(dim=0).to(torch.float)\n correct = correct + (target_one_hot + predict_one_hot == 2).sum(dim=0).to(torch.float)\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels), to_numpy(correct), to_numpy(class_num),\n self.cfg.data.head_class_idx, self.cfg.data.med_class_idx, self.cfg.data.tail_class_idx \n )\n else:\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels)\n )\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n # logging\n if (i + 1) % self.cfg.log_period == 0:\n self.log_iter_info(i, max_iter, epoch, phase)\n end = time.time()\n if hasattr(self.loss_func, 'margin'):\n logger.info(self.loss_func.margin)\n self.log_eval_epoch_info(epoch, phase)\n\n return self.loss_meter.avg(0), self.evaluator.mean_score(all_metric=False)[0]\n\n def train(self):\n self.start_or_resume()\n logger.info(\n \"Everything is perfect so far. Let's start training. Good luck!\"\n )\n\n for epoch in range(self.start_epoch, self.max_epoch):\n logger.info(\"=\" * 20)\n logger.info(\" Start epoch {}\".format(epoch + 1))\n logger.info(\"=\" * 20)\n self.train_epoch(epoch)\n val_loss, val_score = self.eval_epoch(self.val_loader, epoch, phase=\"Val\")\n # run lr scheduler\n self.scheduler.step()\n if isinstance(self.loss_func, (LogitMarginL1)):\n self.loss_func.schedule_alpha(epoch)\n if self.best_score is None or val_score > self.best_score:\n self.best_score, self.best_epoch = val_score, epoch\n best_checkpoint = True\n else:\n best_checkpoint = False\n save_checkpoint(\n self.work_dir, self.model, self.loss_func, self.optimizer, self.scheduler,\n epoch=epoch,\n best_checkpoint=best_checkpoint,\n val_score=val_score,\n keep_checkpoint_num=self.cfg.train.keep_checkpoint_num,\n keep_checkpoint_interval=self.cfg.train.keep_checkpoint_interval\n )\n # logging best performance on val so far\n logger.info(\n \"Epoch[{}]\\tBest {} on Val : {:.4f} at epoch {}\".format(\n epoch + 1, self.evaluator.main_metric(),\n self.best_score, self.best_epoch + 1\n )\n )\n if self.cfg.wandb.enable and best_checkpoint:\n wandb.log({\n \"epoch\": epoch,\n \"Val/best_epoch\": self.best_epoch,\n \"Val/best_{}\".format(self.evaluator.main_metric()): self.best_score,\n \"Val/best_classify_score_table\": self.evaluator.wandb_score_table(),\n \"Val/best_calibrate_score_table\": self.calibrate_evaluator.wandb_score_table()\n })\n if self.cfg.wandb.enable:\n copyfile(\n osp.join(self.work_dir, \"best.pth\"),\n osp.join(self.work_dir, \"{}-best.pth\".format(wandb.run.name))\n )\n\n def post_temperature(self):\n model_with_temp = ModelWithTemperature(self.model, device=self.device)\n model_with_temp.set_temperature(self.val_loader)\n temp = model_with_temp.get_temperature()\n if self.cfg.wandb.enable:\n wandb.log({\n \"temperature\": temp\n })\n return temp\n\n def test(self):\n logger.info(\"We are almost done : final testing ...\")\n self.test_loader = instantiate(self.cfg.data.object.test)\n # test best pth\n epoch = self.best_epoch\n logger.info(\"#################\")\n logger.info(\" Test at best epoch {}\".format(epoch + 1))\n logger.info(\"#################\")\n logger.info(\"Best epoch[{}] :\".format(epoch + 1))\n load_checkpoint(\n osp.join(self.work_dir, \"best.pth\"), self.model, self.device\n )\n self.eval_epoch(self.test_loader, epoch, phase=\"Test\")\n temp = self.post_temperature()\n self.eval_epoch(self.test_loader, epoch, phase=\"TestPT\", temp=temp, post_temp=True)\n\n def run(self):\n self.train()\n self.test()"
},
{
"identifier": "SegmentTrainer",
"path": "calibrate/engine/segement_trainer.py",
"snippet": "class SegmentTrainer(Trainer):\n def __init__(self, cfg: DictConfig) -> None:\n super().__init__(cfg)\n\n def build_meter(self):\n self.batch_time_meter = AverageMeter()\n self.data_time_meter = AverageMeter()\n self.num_classes = self.cfg.model.num_classes\n if hasattr(self.loss_func, \"names\"):\n self.loss_meter = LossMeter(\n num_terms=len(self.loss_func.names),\n names=self.loss_func.names\n )\n else:\n self.loss_meter = LossMeter()\n self.evaluator = SegmentEvaluator(\n self.train_loader.dataset.classes,\n ignore_index=255\n )\n self.calibrate_evaluator = SegmentCalibrateEvaluator(\n self.num_classes,\n num_bins=self.cfg.calibrate.num_bins,\n ignore_index=255,\n device=self.device\n )\n # self.logits_evaluator = SegmentLogitsEvaluator(ignore_index=255)\n\n def reset_meter(self):\n self.batch_time_meter.reset()\n self.data_time_meter.reset()\n self.loss_meter.reset()\n self.evaluator.reset()\n # self.logits_evaluator.reset()\n\n def log_iter_info(self, iter, max_iter, epoch, phase=\"Train\"):\n log_dict = {}\n log_dict[\"data_time\"] = self.data_time_meter.val\n log_dict[\"batch_time\"] = self.batch_time_meter.val\n log_dict.update(self.loss_meter.get_vals())\n log_dict.update(self.evaluator.curr_score())\n # log_dict.update(self.logits_evaluator.curr_score())\n # log_dict.update(self.probs_evaluator.curr_score())\n logger.info(\"{} Iter[{}/{}][{}]\\t{}\".format(\n phase, iter + 1, max_iter, epoch + 1,\n json.dumps(round_dict(log_dict))\n ))\n if self.cfg.wandb.enable and phase.lower() == \"train\":\n wandb_log_dict = {\"iter\": epoch * max_iter + iter}\n wandb_log_dict.update(dict(\n (\"{}/Iter/{}\".format(phase, key), value) for (key, value) in log_dict.items()\n ))\n wandb.log(wandb_log_dict)\n\n def log_epoch_info(self, epoch, phase=\"Train\"):\n log_dict = {}\n log_dict[\"samples\"] = self.evaluator.num_samples()\n log_dict[\"lr\"] = get_lr(self.optimizer)\n log_dict.update(self.loss_meter.get_avgs())\n if isinstance(self.loss_func, LogitMarginL1):\n log_dict[\"alpha\"] = self.loss_func.alpha\n metric = self.evaluator.mean_score()\n log_dict.update(metric)\n # log_dict.update(self.logits_evaluator.mean_score())\n # log_dict.update(self.probs_evaluator.mean_score())\n logger.info(\"{} Epoch[{}]\\t{}\".format(\n phase, epoch + 1, json.dumps(round_dict(log_dict))\n ))\n if self.cfg.wandb.enable:\n wandb_log_dict = {\"epoch\": epoch}\n wandb_log_dict.update(dict(\n (\"{}/{}\".format(phase, key), value) for (key, value) in log_dict.items()\n ))\n wandb.log(wandb_log_dict)\n\n def post_temperature(self):\n _, self.val_loader = instantiate(self.cfg.data.object.trainval)\n model_with_temp = ModelWithTemperature(self.model, device=self.device)\n model_with_temp.set_temperature_seg(self.val_loader)\n temp = model_with_temp.get_temperature()\n if self.cfg.wandb.enable:\n wandb.log({\n \"temperature\": temp\n })\n return temp\n \n def log_eval_epoch_info(self, epoch, phase=\"Val\"):\n log_dict = {}\n log_dict[\"samples\"] = self.evaluator.num_samples()\n log_dict.update(self.loss_meter.get_avgs())\n metric = self.evaluator.mean_score()\n log_dict.update(metric)\n if phase.lower() == \"test\":\n calibrate_metric, calibrate_table_data = self.calibrate_evaluator.mean_score(print=False)\n log_dict.update(calibrate_metric)\n # log_dict.update(self.logits_evaluator.mean_score())\n # log_dict.update(self.probs_evaluator.mean_score())\n logger.info(\"{} Epoch[{}]\\t{}\".format(\n phase, epoch + 1, json.dumps(round_dict(log_dict))\n ))\n class_table_data = self.evaluator.class_score(print=True, return_dataframe=True)\n if phase.lower() == \"test\":\n logger.info(\"\\n\" + AsciiTable(calibrate_table_data).table)\n if self.cfg.wandb.enable:\n wandb_log_dict = {\"epoch\": epoch}\n wandb_log_dict.update(dict(\n (\"{}/{}\".format(phase, key), value) for (key, value) in log_dict.items()\n ))\n wandb_log_dict[\"{}/segment_score_table\".format(phase)] = (\n wandb.Table(\n dataframe=class_table_data\n )\n )\n if phase.lower() == \"test\":\n wandb_log_dict[\"{}/calibrate_score_table\".format(phase)] = (\n wandb.Table(\n columns=calibrate_table_data[0],\n data=calibrate_table_data[1:]\n )\n )\n # if \"test\" in phase.lower() and self.cfg.calibrate.visualize:\n # fig_reliab, fig_hist = self.calibrate_evaluator.plot_reliability_diagram()\n # wandb_log_dict[\"{}/calibrate_reliability\".format(phase)] = fig_reliab\n # wandb_log_dict[\"{}/confidence_histogram\".format(phase)] = fig_hist\n wandb.log(wandb_log_dict)\n\n def train_epoch(self, epoch: int):\n self.reset_meter()\n self.model.train()\n\n max_iter = len(self.train_loader)\n\n end = time.time()\n for i, (inputs, labels) in enumerate(self.train_loader):\n # compute the time for data loading\n self.data_time_meter.update(time.time() - end)\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n # forward\n outputs = self.model(inputs)\n if isinstance(outputs, Dict):\n outputs = outputs[\"out\"]\n loss = self.loss_func(outputs, labels)\n if isinstance(loss, tuple):\n # For compounding loss, make sure the first term is the overall loss\n loss_total = loss[0]\n else:\n loss_total = loss\n # backward\n self.optimizer.zero_grad()\n loss_total.backward()\n if self.cfg.train.clip_grad_norm:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 2)\n self.optimizer.step()\n # metric\n self.loss_meter.update(loss, inputs.size(0))\n predicts = F.softmax(outputs, dim=1)\n pred_labels = torch.argmax(predicts, dim=1)\n self.evaluator.update(\n pred_labels.detach().cpu().numpy(),\n labels.detach().cpu().numpy()\n )\n # self.logits_evaluator.update(to_numpy(outputs), to_numpy(labels))\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n if (i + 1) % self.cfg.log_period == 0:\n self.log_iter_info(i, max_iter, epoch)\n end = time.time()\n self.log_epoch_info(epoch)\n\n @torch.no_grad()\n def eval_epoch(self, data_loader, epoch, temp=1.0, ts=False, phase=\"Val\"):\n self.reset_meter()\n self.model.eval()\n\n max_iter = len(data_loader)\n end = time.time()\n for i, (inputs, labels) in enumerate(data_loader):\n self.data_time_meter.update(time.time() - end)\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n outputs = self.model(inputs)\n # logits = self.model.forward_logit(inputs)\n if ts:\n outputs = outputs / temp\n \n if isinstance(outputs, Dict):\n outputs = outputs[\"out\"]\n # loss = self.loss_func(outputs, labels)\n # metric\n # self.loss_meter.update(loss)\n predicts = F.softmax(outputs, dim=1)\n pred_labels = torch.argmax(predicts, dim=1)\n self.evaluator.update(\n to_numpy(pred_labels),\n to_numpy(labels)\n )\n if phase.lower() == \"test\":\n self.calibrate_evaluator.update(\n outputs, labels\n )\n # self.logits_evaluator(\n # np.expand_dims(to_numpy(outputs), axis=0),\n # np.expand_dims(to_numpy(labels), axis=0)\n # )\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n # logging\n # if (i + 1) % self.cfg.log_period == 0:\n # self.log_iter_info(i, max_iter, epoch, phase)\n end = time.time()\n self.log_eval_epoch_info(epoch, phase)\n\n return self.loss_meter.avg(0), self.evaluator.mean_score(main=True)\n\n def test(self):\n logger.info(\"We are almost done : final testing ...\")\n self.test_loader = instantiate(self.cfg.data.object.test)\n # test best pth\n # epoch = self.best_epoch\n # logger.info(\"#################\")\n # logger.info(\" Test at best epoch {}\".format(epoch + 1))\n # logger.info(\"#################\")\n # logger.info(\"Best epoch[{}] :\".format(epoch + 1))\n load_checkpoint(\n osp.join(self.work_dir, \"best.pth\"), self.model, self.device\n )\n self.eval_epoch(self.test_loader, epoch=100, phase=\"Test\")\n if self.cfg.test.post_temperature:\n logger.info(\"Test with post-temperature scaling!\")\n temp = self.post_temperature()\n self.eval_epoch(self.test_loader, epoch=100, phase=\"testPT\", temp=temp, ts=True)\n \n def run(self):\n self.train()\n self.test()"
},
{
"identifier": "NLPTrainer",
"path": "calibrate/engine/nlp_trainer.py",
"snippet": "class NLPTrainer(Trainer):\n def __init__(self, cfg: DictConfig) -> None:\n super().__init__(cfg)\n\n def build_data_loader(self) -> None:\n (\n self.embedding_matrix,\n self.train_datas,\n self.train_labels,\n self.val_datas,\n self.val_labels,\n self.test_datas,\n self.test_labels,\n self.num_words,\n self.embedding_dim\n ) = instantiate(self.cfg.data.object.all)\n self.batch_size = self.cfg.data.batch_size\n self.num_classes = 20\n\n def build_model(self) -> None:\n # embedding\n self.embedding_model = nn.Embedding(self.num_words, self.embedding_dim)\n self.embedding_model.to(self.device)\n self.embedding_model.state_dict()[\"weight\"].copy_(self.embedding_matrix)\n # network\n self.model = instantiate(self.cfg.model.object)\n self.model.to(self.device)\n # loss\n self.loss_func = instantiate(self.cfg.loss.object)\n self.loss_func.to(self.device)\n logger.info(self.loss_func)\n logger.info(\"Model initialized\")\n\n def train_epoch(self, epoch: int):\n self.reset_meter()\n self.model.train()\n self.embedding_model.eval()\n\n perm = np.random.permutation(np.arange(len(self.train_datas)))\n perm_train = np.take(self.train_datas, perm, axis=0)\n perm_labels = np.take(self.train_labels, perm, axis=0)\n max_iter = perm_train.shape[0] // self.batch_size\n\n end = time.time()\n for i in range(max_iter):\n inputs = torch.from_numpy(\n perm_train[i * self.batch_size:(i + 1) * self.batch_size]\n ).type(torch.LongTensor).to(self.device)\n labels = torch.from_numpy(\n np.argmax(perm_labels[i * self.batch_size:(i + 1) * self.batch_size], 1)\n ).to(self.device)\n self.data_time_meter.update(time.time() - end)\n\n with torch.no_grad():\n embs = self.embedding_model(inputs)\n outputs = self.model(embs)\n loss = self.loss_func(outputs, labels)\n if isinstance(loss, tuple):\n loss_total = loss[0]\n else:\n loss_total = loss\n # backward\n self.optimizer.zero_grad()\n loss_total.backward()\n self.optimizer.step()\n # metric\n self.loss_meter.update(loss, inputs.size(0))\n predicts = F.softmax(outputs, dim=1)\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels)\n )\n self.logits_evaluator.update(to_numpy(outputs))\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n if (i + 1) % self.cfg.log_period == 0:\n self.log_iter_info(i, max_iter, epoch)\n end = time.time()\n self.log_epoch_info(epoch)\n\n @torch.no_grad()\n def eval_epoch(\n self, eval_data, eval_labels, epoch,\n phase=\"Val\",\n temp=1,\n post_temp=False\n ):\n self.reset_meter()\n self.model.eval()\n self.embedding_model.eval()\n\n max_iter = math.ceil(eval_data.shape[0] // self.batch_size)\n\n end = time.time()\n for i in range(max_iter):\n inputs = torch.from_numpy(\n eval_data[i * self.batch_size:min((i + 1) * self.batch_size, eval_data.shape[0])]\n ).type(torch.LongTensor).to(self.device)\n labels = torch.from_numpy(\n np.argmax(eval_labels[i * self.batch_size:min((i+1) * self.batch_size, eval_data.shape[0])], 1)\n ).to(self.device)\n embs = self.embedding_model(inputs)\n outputs = self.model(embs)\n if post_temp:\n outputs = outputs / temp\n loss = self.loss_func(outputs, labels)\n # metric\n self.loss_meter.update(loss)\n self.calibrate_evaluator.update(outputs, labels)\n self.logits_evaluator.update(to_numpy(outputs))\n predicts = F.softmax(outputs, dim=1)\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels)\n )\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n # logging\n if (i + 1) % self.cfg.log_period == 0:\n self.log_iter_info(i, max_iter, epoch, phase)\n end = time.time()\n self.log_eval_epoch_info(epoch, phase)\n\n return self.loss_meter.avg(0), self.evaluator.mean_score(all_metric=False)[0]\n\n def train(self):\n self.start_or_resume()\n logger.info(\n \"Everything is perfect so far. Let's start training. Good luck!\"\n )\n for epoch in range(self.start_epoch, self.max_epoch):\n logger.info(\"=\" * 20)\n logger.info(\" Start epoch {}\".format(epoch + 1))\n logger.info(\"=\" * 20)\n self.train_epoch(epoch)\n val_loss, val_score = self.eval_epoch(self.val_datas, self.val_labels, epoch, phase=\"Val\")\n # run lr scheduler\n self.scheduler.step()\n if self.best_score is None or val_score > self.best_score:\n self.best_score, self.best_epoch = val_score, epoch\n best_checkpoint = True\n else:\n best_checkpoint = False\n save_checkpoint(\n self.work_dir, self.model, self.optimizer, self.scheduler,\n epoch=epoch,\n best_checkpoint=best_checkpoint,\n val_score=val_score,\n keep_checkpoint_num=self.cfg.train.keep_checkpoint_num\n )\n # logging best performance on val so far\n logger.info(\n \"Epoch[{}]\\tBest {} on Val : {:.4f} at epoch {}\".format(\n epoch + 1, self.evaluator.main_metric(),\n self.best_score, self.best_epoch + 1\n )\n )\n if self.cfg.wandb.enable and best_checkpoint:\n wandb.log({\n \"epoch\": epoch,\n \"Val/best_epoch\": self.best_epoch,\n \"Val/best_{}\".format(self.evaluator.main_metric()): self.best_score,\n \"Val/best_classify_score_table\": self.evaluator.wandb_score_table(),\n \"Val/best_calibrate_score_table\": self.calibrate_evaluator.wandb_score_table()\n })\n if self.cfg.wandb.enable:\n copyfile(\n osp.join(self.work_dir, \"best.pth\"),\n osp.join(self.work_dir, \"{}-best.pth\".format(wandb.run.name))\n )\n\n def post_temperature(self):\n model_with_temp = ModelWithTemperature(self.model, device=self.device)\n model_with_temp.set_temperature_ng(\n self.embedding_model, self.val_datas, self.val_labels,\n batch_size=self.batch_size\n )\n temp = model_with_temp.get_temperature()\n wandb.log({\n \"temperature\": temp\n })\n return temp\n\n def test(self):\n logger.info(\"We are almost done : final testing ...\")\n # test best pth\n epoch = self.best_epoch\n logger.info(\"#################\")\n logger.info(\" Test at best epoch {}\".format(epoch + 1))\n logger.info(\"#################\")\n logger.info(\"Best epoch[{}] :\".format(epoch + 1))\n load_checkpoint(\n osp.join(self.work_dir, \"best.pth\"), self.model, self.device\n )\n self.eval_epoch(self.test_datas, self.test_labels, epoch, phase=\"Test\")\n temp = self.post_temperature()\n self.eval_epoch(self.test_datas, self.test_labels, epoch, phase=\"TestPT\", temp=temp, post_temp=True)"
},
{
"identifier": "set_random_seed",
"path": "calibrate/utils/misc.py",
"snippet": "def set_random_seed(seed: int = None, deterministic: bool = False):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False"
}
] | import os
import sys
import logging
import hydra
from omegaconf import DictConfig, OmegaConf
from omegaconf.omegaconf import open_dict
from calibrate.engine import Trainer, SegmentTrainer, NLPTrainer
from calibrate.utils import set_random_seed | 8,720 |
logger = logging.getLogger(__name__)
TRAINERS = {
"cv": Trainer,
"segment": SegmentTrainer,
"nlp": NLPTrainer,
}
@hydra.main(config_path="../configs", config_name="defaults")
def main(cfg: DictConfig):
logger.info("Launch command : ")
logger.info(" ".join(sys.argv))
with open_dict(cfg):
cfg.work_dir = os.getcwd()
logger.info("\n" + OmegaConf.to_yaml(cfg))
|
logger = logging.getLogger(__name__)
TRAINERS = {
"cv": Trainer,
"segment": SegmentTrainer,
"nlp": NLPTrainer,
}
@hydra.main(config_path="../configs", config_name="defaults")
def main(cfg: DictConfig):
logger.info("Launch command : ")
logger.info(" ".join(sys.argv))
with open_dict(cfg):
cfg.work_dir = os.getcwd()
logger.info("\n" + OmegaConf.to_yaml(cfg))
| set_random_seed( | 3 | 2023-12-17 13:53:18+00:00 | 12k |
daihaojun554/biliscrapy | biliscrapy/views.py | [
{
"identifier": "BiliDanmu",
"path": "biliscrapy/models.py",
"snippet": "class BiliDanmu(models.Model):\n _id = models.CharField(max_length=255)\n cid = models.CharField(max_length=255)\n content = models.TextField()\n color = models.CharField(max_length=255)\n fontsize = models.IntegerField()\n midHash = models.CharField(max_length=255)\n mode = models.CharField(max_length=255)\n progress = models.FloatField()\n ctime = models.DateTimeField()\n\n def __str__(self):\n return self.content"
},
{
"identifier": "BiliComment",
"path": "biliscrapy/models.py",
"snippet": "class BiliComment(models.Model):\n avid = models.CharField(max_length=255)\n uname = models.CharField(max_length=255)\n # 最高等级就是6级\n current_level = models.IntegerField()\n # 用户等级\n like = models.IntegerField()\n # 用户性别 男 女 保密\n sex = models.CharField(max_length=10)\n ctime = models.DateTimeField()\n message = models.TextField()\n\n def __str__(self):\n return self.message"
},
{
"identifier": "BiliVideo",
"path": "biliscrapy/models.py",
"snippet": "class BiliVideo(models.Model):\n bvid = models.CharField(max_length=30, unique=True)\n avid = models.IntegerField(unique=True)\n oid = models.IntegerField(unique=True)\n title = models.CharField(max_length=100)\n author = models.CharField(max_length=100)\n tag = models.CharField(max_length=100)\n pubdate = models.DateField()\n pic = models.URLField()\n desc = models.TextField()\n danmu_fetched = models.BooleanField(default=False)\n comment_fetched = models.BooleanField(default=False)\n danmaku_count = models.IntegerField(default=0)\n comment_count = models.IntegerField(default=0)\n\n def __str__(self):\n return self.title"
},
{
"identifier": "Card",
"path": "biliscrapy/models.py",
"snippet": "class Card(models.Model):\n card_code = models.CharField(max_length=100, unique=True)\n expiration_date = models.DateTimeField()\n last_used_address = models.GenericIPAddressField(null=True, blank=True)\n is_used = models.BooleanField(default=False)\n # action = models.CharField(max_length=100)\n # is_active = models.BooleanField(default=True)\n # is_expired = models.BooleanField(default=False)\n # count = models.IntegerField(default=0)\n\n def __str__(self):\n return self.card_code"
},
{
"identifier": "Comments",
"path": "biliscrapy/network/bilibili_comment.py",
"snippet": "class Comments:\n def __init__(self):\n script_dir = os.path.dirname(os.path.abspath(__file__))\n # 构建文件路径\n file_path = os.path.join(script_dir, 'bilibili_cookies.json')\n if not file_path:\n self.cookies = {}\n with open(file_path, 'r', encoding='utf-8') as file:\n self.cookies_data = json.load(file)\n self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}\n self.utils = bili_utils()\n self.logger = logging.getLogger('log')\n\n def extract_comments(self, replies):\n extracted_comments = []\n if not replies:\n return extracted_comments\n for reply in replies:\n extracted_comment = {\n 'uname': reply['member']['uname'],\n 'current_level': reply['member']['level_info']['current_level'],\n 'like': reply['like'],\n 'sex': reply['member']['sex'],\n 'ctime': reply['ctime'],\n 'message': reply['content']['message']\n }\n extracted_comments.append(extracted_comment)\n\n if 'replies' in reply and reply['replies']:\n nested_replies = self.extract_comments(reply['replies'])\n extracted_comments.extend(nested_replies)\n\n return extracted_comments\n\n def get_comments(self, bvorurl):\n self.logger.info(\"Getting comments for bvorurl:{}\".format(bvorurl))\n bv = self.utils.bv_get(bvorurl)\n avid = self.utils.bv2av(bv)\n count = 1\n while avid is None:\n avid = self.utils.bv2av(bv)\n count += 1\n self.logger.info(f\"avid is None, retrying...count is {count}\")\n time.sleep(3)\n self.logger.info(f\"avid===>{avid}\")\n comments = [] # 使用列表存储评论\n\n # 获取评论总数和每页评论数量\n # 计算总页数\n page_num = 1\n page_size = 20\n\n while True:\n url = f'https://api.bilibili.com/x/v2/reply?type=1&oid={avid}&sort=2&pn={page_num}&ps={page_size}'\n response = requests.get(url, headers=headers, cookies=self.cookies)\n data = response.json()\n if data['code'] != 0:\n break\n # 提取回复信息\n extracted_data = self.extract_comments(data['data']['replies'])\n\n # 过滤重复的评论\n new_comments = [comment for comment in extracted_data if comment not in comments]\n comments.extend(new_comments) # 将新的评论添加到列表中\n self.logger.info(f\"提取到了{len(new_comments)}条评论,从第 {page_num} 页\")\n if len(new_comments) == 0:\n self.logger.info(\"提取完毕所有评论,共提取到{}条评论!=====>avid{}\".format(len(comments), avid))\n break\n # 判断是否有下一页\n total_count = data['data']['page']['count']\n total_pages = (total_count + page_size - 1) // page_size # 计算总页数\n if page_num >= total_pages:\n self.logger.info(\"提取完毕所有评论,共提取到{}条评论!=====>avid{}\".format(len(comments), avid))\n break\n\n # 构建下一页的URL\n page_num += 1\n self.logger.info(\"开始提取第{}页评论\".format(page_num))\n time.sleep(random.uniform(0.5, 1.5))\n self.logger.info(f\"总共{len(comments)}条评论!\")\n\n # 写入JSON文件\n os.makedirs(\"./data/comment/\", exist_ok=True) # 创建多层目录\n file_path = f'./data/comment/{avid}_{page_num}-{page_size}_{len(comments)}.json'\n if len(comments) < 2000:\n with open(file_path, 'w', encoding='utf-8') as f:\n json.dump(comments, f, indent=4, ensure_ascii=False)\n return comments"
},
{
"identifier": "bili_utils",
"path": "biliscrapy/network/bilibili_utils.py",
"snippet": "class bili_utils:\n def __init__(self):\n self.logger = logging.getLogger('log')\n self.header = headers\n self.script_dir = os.path.dirname(os.path.abspath(__file__))\n file_path = os.path.join(self.script_dir, 'bilibili_cookies.json')\n with open(file_path, 'r') as file:\n self.cookies_data = json.load(file)\n self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}\n\n def bv_get(self, bvorurl):\n # https://api.bilibili.com/x/web-interface/view?bvid=BV1uG41197Tf\n # 将bv提取出来\n bv_identifier = \"BV\" # BV号的标识符\n if \"http://\" in bvorurl or \"https://\" in bvorurl: # 检查是否是一个URL\n self.logger.info(\"你输入的是http链接,正在解析...\")\n bv_index = bvorurl.find(bv_identifier)\n if bv_index != -1: # 如果找到了BV号\n bv = bvorurl[bv_index:bv_index + len(bv_identifier) + 10] # 提取BV号\n self.logger.info(f\"BV号为......: {bv}\")\n return bv\n else:\n self.logger.info(\"你输入的链接地址有误!\")\n return\n elif bv_identifier in bvorurl: # 如果输入的是BV号\n self.logger.info(f\"你输入的是BV号{bvorurl},正在解析...\")\n bv = bvorurl\n return bv\n else:\n self.logger.info(f\"请输入正确的链接地址或BV号!,{bvorurl}\")\n return \"BV1111111111\"\n\n '''\n av 就是 oid 评论里面的参数\n '''\n\n def bv2av(self, bv):\n bv2av_url = 'https://api.bilibili.com/x/web-interface/view?bvid='\n if bv.startswith(\"BV\"):\n url = bv2av_url + str(bv)\n retry_count = 0\n max_retries = 10\n retry_delay = 1 # seconds\n while retry_count < max_retries:\n try:\n response = requests.get(url,headers=headers,cookies=self.cookies)\n response.raise_for_status() # 检查请求是否成功\n data = response.json()\n # self.logger.info(data)\n if 'data' in data and 'aid' in data['data']:\n avid = data['data']['aid']\n self.logger.info(f\"找到的avid{avid}\")\n return avid\n else:\n self.logger.info(\"未找到有效的aid值,正在重新尝试获取...\")\n retry_count += 1\n time.sleep(retry_delay)\n except (requests.RequestException, ValueError) as e:\n self.logger.info(f\"请求发生错误:{e}\")\n retry_count += 1\n self.logger.info(\"服务器返回错误!请稍后再试!\")\n self.logger.info(f\"正在重新尝试获取aid,尝试次数==>{retry_count}\")\n time.sleep(retry_delay)\n\n return None\n\n '''\n cid 是弹幕用的参数\n '''\n\n def bv2cid(self, bv):\n url = f\"https://api.bilibili.com/x/player/pagelist?bvid={str(bv)}&jsonp=jsonp\"\n retry_count = 1\n json_s = requests.get(url,headers=headers,cookies=self.cookies).json()\n self.logger.info(\"bv====》\"+bv)\n if json_s['code'] == 0:\n cid = json_s['data'][0]['cid']\n self.logger.info(\"提取出来的cid是:\" + str(cid))\n return cid\n else:\n self.logger.error(\"服务器返回错误!请稍后再试!\")\n retry_count+=1\n if retry_count > 10:\n self.logger.error(\"尝试次数过多,请稍后再试!\")\n return None\n else:\n self.logger.error(\"正在重新尝试获取cid,尝试次数==>\" + str(retry_count))\n return self.bv2cid(bv)\n\n def get_bilibili_cookies(self):\n options = webdriver.ChromeOptions()\n # options.add_argument('--headless')\n # options.add_argument('--disable-gpu')\n # 动态获取路径 不用每次都手动输入路径\n # chromedriver.exe 的路径\n # 获取当前脚本的绝对路径\n current_path = os.path.dirname(os.path.abspath(__file__))\n\n # 构建 chromedriver 的绝对路径\n driver_path = os.path.join(current_path, 'chromedriver.exe')\n\n # 创建 WebDriver 服务\n service = Service(driver_path)\n # service = Service('./chromedriver.exe')\n options.add_argument('--no-sandbox')\n options.binary_location='C:\\\\Program Files\\\\Google\\\\chrome-win64\\\\chrome.exe'\n driver = webdriver.Chrome(options=options, service=service)\n\n # 打开 Bilibili 网站\n driver.get('https://www.bilibili.com/')\n #\n login_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\n '#i_cecream > div.bili-feed4 > div.bili-header.large-header > div.bili-header__bar > ul.right-entry > li:nth-child(1) > li > div.right-entry__outside.go-login-btn')))\n login_btn.click()\n # 等待登录完成成\n time.sleep(10)\n driver.get('https://www.bilibili.com/')\n # 在这里,模拟登录流程(需要输入账号和密码)\n # 扫码登录然后,等待完成,完成的条件是屏幕上出现了某个\n\n search = WebDriverWait(driver, 20).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#nav-searchform > div.nav-search-btn')))\n search.click()\n time.sleep(3)\n cookies = driver.get_cookies()\n # 获取当前脚本的路径\n current_path = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(current_path, 'bilibili_cookies.json'), 'w') as f:\n # 写入当前文件\n f.write(json.dumps(cookies))\n # 写入成功\n self.logger.info('写入成功{}'.format(cookies))\n driver.quit()\n return\n\n def get_info_by_bv(self, bv):\n url = f\"https://api.bilibili.com/x/web-interface/view?bvid={str(bv)}\"\n\n def try_get(url):\n try:\n response = requests.get(url, headers=self.header, cookies=self.cookies)\n js_str = response.json()\n if js_str.get('code', 0) == 0:\n return js_str['data']\n else:\n # 可能需要根据API的设计,记录不同的错误\n self.logger.error(\n f\"Video API returned non-success code: {js_str.get('code', 'Unknown')} with message: {js_str.get('msg', 'Unknown')}\")\n except requests.exceptions.RequestException as e:\n self.logger.error(f\"An error occurred: {e}\")\n return None\n\n result = None\n retry_count = 10\n for _ in range(retry_count):\n result = try_get(url)\n if result:\n break\n\n return result\n\n # 检查url是否合法\n def check_url(self, url):\n if url.startswith(\"BV\"):\n return True\n elif url.startswith(\"https://www.bilibili.com/\"):\n return True\n else:\n return False"
},
{
"identifier": "Video",
"path": "biliscrapy/network/bilibili_video.py",
"snippet": "class Video:\n def __init__(self):\n script_path = os.path.dirname(os.path.abspath(__file__))\n self.dir_path = os.path.join(script_path, 'data', 'video')\n os.makedirs(self.dir_path, exist_ok=True)\n self.utils = bili_utils()\n self.script_dir = os.path.dirname(os.path.abspath(__file__))\n # 构建文件路径\n file_path = os.path.join(self.script_dir, 'bilibili_cookies.json')\n if not file_path:\n self.cookies = {}\n with open(file_path, 'r') as file:\n self.cookies_data = json.load(file)\n self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}\n self.headers = headers\n self.logger = logging.getLogger('log')\n\n def get_video_info(self, url: str) -> str:\n \"\"\"\n 从给定的URL中提取视频信息。\n :param url: 要获取信息的视频的URL。\n :return: 返回包含视频信息的JSON字符串,如果URL无效,则返回字符串'invalid url'。\n \"\"\"\n try:\n isValid = self.utils.check_url(url)\n if not isValid:\n return 'url is invalid'\n resp = requests.get(url, headers=self.headers, cookies=self.cookies)\n cont = re.compile(r\".*?window.__playinfo__=(?P<info1>.*?);\\(function\\(\\)\", re.S)\n a = cont.search(resp.text, re.S)\n info = a.group('info1').replace(\"</script><script>window.__INITIAL_STATE__=\", ',')\n return f\"[{info}]\"\n except requests.RequestException as e:\n self.logger.error(\"Error occurred while getting video info: {}\".format(str(e)))\n return ''\n\n def download_file(self, url, filename):\n \"\"\"\n 下载文件的函数\n\n 参数:\n url (str): 要下载的文件的URL\n filename (str): 下载的文件保存的路径和文件名\n \"\"\"\n try:\n response = requests.get(url, headers=self.headers, stream=True, cookies=self.cookies)\n total_size = int(response.headers.get('Content-Length', 0))\n block_size = 1024\n progress_bar = tqdm(total=total_size, unit='B', unit_scale=True)\n with open(os.path.join(self.dir_path, filename), 'wb') as file:\n for data in response.iter_content(block_size):\n file.write(data)\n progress_bar.update(len(data))\n progress_bar.close()\n self.logger.info(\"Downloading file.{}\".format(filename))\n except requests.exceptions.RequestException as e:\n self.logger.error(\"Error occurred while downloading the file: {}\".format(str(e)))\n\n def merge_video_audio(self, video_file, audio_file):\n \"\"\"\n 合并视频和音频文件。\n\n 参数:\n self: 类自身引用。\n video_file: 视频文件路径。\n audio_file: 音频文件路径。\n 返回值:\n 无\n 异常:\n 如果视频文件或音频文件不存在,则会打印错误消息并返回。\n 注意:\n 合并后的文件以视频文件的基础名称和 '.mp4' 扩展名的形式保存。\n 原始视频和音频文件在合并成功后会被删除。\n \"\"\"\n if not os.path.isfile(os.path.join(self.dir_path, video_file)):\n print(f\"Error: {video_file} 不是文件或不存在。\")\n return\n if not os.path.isfile(os.path.join(self.dir_path, audio_file)):\n print(f\"Error: {audio_file} 不是文件或不存在。\")\n return\n\n # 合并视频和音频文件\n # 使用ffmpeg命令行工具将视频和音频文件合并为mp4格式文件\n cmd = f\"ffmpeg -i {os.path.join(self.dir_path, video_file)} -i {os.path.join(self.dir_path, audio_file)} -c:v copy -c:a aac -strict experimental {os.path.join(self.dir_path, video_file.replace('.flv', ''))}.mp4\"\n self.logger.info(cmd)\n try:\n os.system(cmd)\n except Exception as e:\n print(f\"运行 ffmpeg 时发生错误: {e}\")\n return\n\n # 检查合并后的文件是否成功创建\n output_file = os.path.splitext(os.path.basename(video_file))[0] + '.mp4'\n if not os.path.isfile(os.path.join(self.dir_path, output_file)):\n print(\"文件合并失败。\")\n return\n\n # 删除原始视频和音频文件\n os.remove(os.path.join(self.dir_path, video_file))\n os.remove(os.path.join(self.dir_path, audio_file))\n self.logger.info(f\"成功合并视频和音频,------->{output_file}\")"
}
] | import time
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from django.utils.timezone import make_aware
from .models import BiliDanmu, BiliComment, BiliVideo, Card
from .network.bilibili_danmu import *
from .network.bilibili_comment import Comments
from .network.bilibili_utils import bili_utils
from .network.bilibili_video import Video
from django.utils import timezone
from django.http import JsonResponse, HttpResponse | 7,471 | 'total': paginator.count,
'data': page_obj,
"new_request": not comments_exist,
}
return render(request, 'comment.html', context)
return render(request, 'comment.html')
def reflash_cookie(request):
"""
刷新cookie
:param request:
:return:
"""
utils.get_bilibili_cookies()
return render(request, 'danmaku.html')
def generate_chart(request):
keyword = request.POST.get("keyword")
print(keyword)
"""
生成图表
:param request:
:return:
"""
context = {
'message': 'fail',
'data': [],
'code': -1,
}
videos = BiliVideo.objects.all().values().order_by('pubdate')
# 分页 # 每页显示6个视频
paginator = Paginator(videos, 6)
page_number = request.GET.get('page', 1)
page_obj = paginator.get_page(page_number)
if videos:
context['message'] = 'success'
context['data'] = page_obj
context['code'] = 0
return render(request, 'generate_chart.html', context)
def download_video(request):
context = {}
if request.method == 'POST':
bvid = request.POST.get('bvid')
print(bvid)
if not utils.check_url(bvid):
context['message'] = 'url 不合法!'
context['code'] = -1
return render(request, 'download_video.html', context)
url = base_url + bvid
info = bili_video.get_video_info(url)
if not info:
context['message'] = '获取视频信息失败!'
context['code'] = -1
data = json.loads(info)
video_name = data[1]['videoData']['title']
v_urls = [i['baseUrl'] for i in data[0]['data']['dash']['video']]
a_urls = [i['baseUrl'] for i in data[0]['data']['dash']['audio']]
print(v_urls[0], a_urls[0])
v_suffix = 'flv'
a_suffix = 'mp3'
# 如果已经存在的话不需要合并
if not os.path.exists(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"network",
"data",
"video",
f"{video_name}.mp4")):
logger.info(f"开始合并视频和音频")
bili_video.download_file(v_urls[0], f'{video_name}.{v_suffix}')
bili_video.download_file(a_urls[0], f'{video_name}.{a_suffix}')
bili_video.merge_video_audio(f"{video_name}.{v_suffix}", f"{video_name}.{a_suffix}")
# 返回给前端响应流数据
logger.info(f"视频数据已存在!")
with open(
f'{os.path.join(os.path.dirname(os.path.abspath(__file__)), "network", "data", "video", f"{video_name}.mp4")}',
'rb') as f:
response = HttpResponse(f.read(), content_type='video/mp4')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(f"{video_name}.mp4")
return response
return render(request, 'download_video.html', context)
def parse_video(request):
context = {
'message': 'success',
'data': [],
'code': 0
}
if request.method == 'POST':
url = request.POST.get("_bv")
if not utils.check_url(url):
context['message'] = 'url 不合法!'
context['code'] = -1
return render(request, 'download_video.html', context)
logger.info(url)
bv = utils.bv_get(url)
logger.info(f"bv,--->{bv}")
info = utils.get_info_by_bv(bv)
if info is None:
context.update({"message": "fail", "code": -1})
return render(request, 'download_video.html', context)
context.update({"data": info})
return render(request, 'download_video.html', context)
def enter_card(request):
if request.method == 'POST':
card_code = request.POST.get('card_code')
current_datetime = timezone.now()
try:
|
# Create your views here.
utils = bili_utils()
bili_video = Video()
logger = logging.getLogger('log')
base_url = 'https://www.bilibili.com/video/'
def danmaku(request):
if request.method == 'POST':
bv = request.POST.get('bv') # 获取用户输入的 BV 号或链接
bvid = utils.bv_get(bv)
url = bv
context = {
'result': 'error',
'data': [],
'message': '请输入正确的链接地址或BV号!'
}
if bv.startswith("https://www.bilibili.com/video/BV") or bv.startswith("BV") or bv.startswith("bv"):
danmu = Danmu()
vv = BiliVideo.objects.filter(bvid=bvid).values()
cid = vv[0]['oid'] if vv else danmu.bv2cid(bv)
bvid_exists = BiliDanmu.objects.filter(cid=cid).exists()
if not bvid_exists:
logger.info("bvid_exists,不存在!!!")
dates = danmu.get_available_dates(cid) # 获取视频的所有日期列表
danmu.down_so_files(cid, dates) # 下载所有弹ci幕文件
unique_danmakus = danmu.parse_so_to_json(cid, dates) # 解析并保存为 JSON 文件
if unique_danmakus is None:
return render(request, 'danmaku.html',
context.update({'message': '解析弹幕失败,请检查BV号是否正确!'}))
danmu_objects = [
BiliDanmu(
_id=danmaku['_id'],
cid=cid,
content=danmaku['content'],
color=danmaku['color'],
fontsize=danmaku['fontsize'],
midHash=danmaku['midHash'],
mode=danmaku['mode'],
progress=danmaku['progress'],
ctime=make_aware(datetime.fromtimestamp(danmaku['ctime']))
)
for danmaku in unique_danmakus
]
BiliDanmu.objects.bulk_create(danmu_objects)
# 不存在 弹幕信息
danmaku_count = BiliDanmu.objects.filter(cid=cid).count()
print(danmaku_count)
try:
logger.info("try.....")
# 尝试更新视频的抓取弹幕的状态
logger.info(bvid)
video = BiliVideo.objects.get(bvid=bvid)
video.danmu_fetched = True
video.danmaku_count = danmaku_count
video.save()
except Exception as e:
logger.error("error~~~~~~~~~")
logger.error(e)
# 如果视频记录不存在,则创建新的视频记录
info = utils.get_info_by_bv(bvid)
logger.info("info---->{}".format(info))
if info is None:
return render(request, 'danmaku.html', context)
cid = utils.bv2cid(bvid)
logger.info(f'{cid}, cid')
video = BiliVideo(bvid=bvid,
avid=info['aid'],
oid=cid,
title=info['title'],
author=info['owner']['name'],
tag=info['tname'],
pubdate=make_aware(datetime.fromtimestamp(info['pubdate'])),
pic=info['pic'],
desc=info['desc'],
danmu_fetched=True,
danmaku_count=danmaku_count
) # 设置弹幕抓取状态
video.save()
logger.info("新视频信息已添加")
# 查询数据库并返回结果
# 查询数据库并返回结果
danmakus = BiliDanmu.objects.filter(cid=cid).values().order_by('ctime')
paginator = Paginator(danmakus, 15) # 每页显示10条记录
page_number = request.POST.get('page') if request.POST.get('page') else 1 # 获取页码参数
page_obj = paginator.get_page(page_number) # 获取对应页码的数据
print(paginator.count)
context = {
"url": url,
'result': 'error',
'bvid': bv,
'total': paginator.count,
'data': page_obj,
'new_request': not bvid_exists,
}
if len(danmakus) > 0:
context['result'] = 'success'
return render(request, 'danmaku.html', context)
return render(request, 'danmaku.html')
def comment(request):
if request.method == 'POST':
bv = request.POST.get('bv') # 获取用户输入的 BV 号或链接
url = bv
context = {
'result': 'error',
'data': [],
'message': '请输入正确的链接地址或BV号!',
'cid': ''
}
c = Comments()
bv_ = utils.bv_get(bv) if bv.startswith("https://www.bilibili.com/video/BV") or bv.startswith(
"BV") or bv.startswith("bv") else bv
logger.info(f'bv_====>{bv_}')
vv = BiliVideo.objects.filter(bvid=bv_).values()
# logger.info(vv[0]['avid'], 'sadjkaskjadssajasjdsjkaaashhakads')
av = utils.bv2av(bv_)
av_count = 1
while av is None:
logger.info(f"av is None, retrying...{av_count}")
av_count += 1
av = utils.bv2av(bv_)
avid = vv[0]['avid'] if vv else av
logger.info(f"avid=====>{avid}")
if avid is None:
context = {
'result': 'error',
'data': [],
'message': 'b站服务器返回错误,请重新尝试'
}
return render(request, 'comment.html', context)
comments_exist = BiliComment.objects.filter(avid=avid).exists()
if not comments_exist:
comments = c.get_comments(bv)
comment_obj = [BiliComment(
avid=avid,
uname=cmt['uname'],
current_level=cmt['current_level'],
like=cmt['like'],
sex=cmt['sex'],
ctime=make_aware(datetime.fromtimestamp(cmt['ctime'])),
message=cmt['message']
) for cmt in comments]
BiliComment.objects.bulk_create(comment_obj)
bili_comment_count = BiliComment.objects.filter(avid=avid).count()
try:
# 尝试更新视频的抓取弹幕的状态
video = BiliVideo.objects.get(avid=avid)
video.comment_fetched = True
video.comment_count = bili_comment_count
video.save()
except BiliVideo.DoesNotExist:
# 如果视频记录不存在,则创建新的视频记录
info = utils.get_info_by_bv(bv_)
if info is None:
return render(request, 'comment.html', context)
cid = utils.bv2cid(bv_)
# 如果cid 为空的话就一直重新尝试获取cid
cid_count = 1
while cid is None:
cid = utils.bv2cid(bv_)
logger.info(f'{cid}, cid,尝试了{cid_count}次')
cid_count += 1
time.sleep(3)
video = BiliVideo(avid=avid,
bvid=bv_,
oid=cid,
title=info['title'],
author=info['owner']['name'],
tag=info['tname'],
pubdate=make_aware(datetime.fromtimestamp(info['pubdate'])),
pic=info['pic'],
desc=info['desc'],
comment_fetched=True,
comment_count=bili_comment_count
) # 设置弹幕抓取状态
video.save()
comments = BiliComment.objects.filter(avid=avid).values().order_by('ctime')
paginator = Paginator(comments, 15)
page_number = request.POST.get('page', 1)
page_obj = paginator.get_page(page_number)
context = {
"url": url,
'result': 'success',
'bvid': bv,
'total': paginator.count,
'data': page_obj,
"new_request": not comments_exist,
}
return render(request, 'comment.html', context)
return render(request, 'comment.html')
def reflash_cookie(request):
"""
刷新cookie
:param request:
:return:
"""
utils.get_bilibili_cookies()
return render(request, 'danmaku.html')
def generate_chart(request):
keyword = request.POST.get("keyword")
print(keyword)
"""
生成图表
:param request:
:return:
"""
context = {
'message': 'fail',
'data': [],
'code': -1,
}
videos = BiliVideo.objects.all().values().order_by('pubdate')
# 分页 # 每页显示6个视频
paginator = Paginator(videos, 6)
page_number = request.GET.get('page', 1)
page_obj = paginator.get_page(page_number)
if videos:
context['message'] = 'success'
context['data'] = page_obj
context['code'] = 0
return render(request, 'generate_chart.html', context)
def download_video(request):
context = {}
if request.method == 'POST':
bvid = request.POST.get('bvid')
print(bvid)
if not utils.check_url(bvid):
context['message'] = 'url 不合法!'
context['code'] = -1
return render(request, 'download_video.html', context)
url = base_url + bvid
info = bili_video.get_video_info(url)
if not info:
context['message'] = '获取视频信息失败!'
context['code'] = -1
data = json.loads(info)
video_name = data[1]['videoData']['title']
v_urls = [i['baseUrl'] for i in data[0]['data']['dash']['video']]
a_urls = [i['baseUrl'] for i in data[0]['data']['dash']['audio']]
print(v_urls[0], a_urls[0])
v_suffix = 'flv'
a_suffix = 'mp3'
# 如果已经存在的话不需要合并
if not os.path.exists(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"network",
"data",
"video",
f"{video_name}.mp4")):
logger.info(f"开始合并视频和音频")
bili_video.download_file(v_urls[0], f'{video_name}.{v_suffix}')
bili_video.download_file(a_urls[0], f'{video_name}.{a_suffix}')
bili_video.merge_video_audio(f"{video_name}.{v_suffix}", f"{video_name}.{a_suffix}")
# 返回给前端响应流数据
logger.info(f"视频数据已存在!")
with open(
f'{os.path.join(os.path.dirname(os.path.abspath(__file__)), "network", "data", "video", f"{video_name}.mp4")}',
'rb') as f:
response = HttpResponse(f.read(), content_type='video/mp4')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(f"{video_name}.mp4")
return response
return render(request, 'download_video.html', context)
def parse_video(request):
context = {
'message': 'success',
'data': [],
'code': 0
}
if request.method == 'POST':
url = request.POST.get("_bv")
if not utils.check_url(url):
context['message'] = 'url 不合法!'
context['code'] = -1
return render(request, 'download_video.html', context)
logger.info(url)
bv = utils.bv_get(url)
logger.info(f"bv,--->{bv}")
info = utils.get_info_by_bv(bv)
if info is None:
context.update({"message": "fail", "code": -1})
return render(request, 'download_video.html', context)
context.update({"data": info})
return render(request, 'download_video.html', context)
def enter_card(request):
if request.method == 'POST':
card_code = request.POST.get('card_code')
current_datetime = timezone.now()
try: | card = Card.objects.get(card_code=card_code) | 3 | 2023-12-14 10:14:24+00:00 | 12k |
mjavadpur/Sadtalker_LongVideos | src/facerender/animate.py | [
{
"identifier": "HEEstimator",
"path": "src/facerender/modules/keypoint_detector.py",
"snippet": "class HEEstimator(nn.Module):\n \"\"\"\n Estimating head pose and expression.\n \"\"\"\n\n def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, num_bins=66, estimate_jacobian=True):\n super(HEEstimator, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=image_channel, out_channels=block_expansion, kernel_size=7, padding=3, stride=2)\n self.norm1 = BatchNorm2d(block_expansion, affine=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.conv2 = nn.Conv2d(in_channels=block_expansion, out_channels=256, kernel_size=1)\n self.norm2 = BatchNorm2d(256, affine=True)\n\n self.block1 = nn.Sequential()\n for i in range(3):\n self.block1.add_module('b1_'+ str(i), ResBottleneck(in_features=256, stride=1))\n\n self.conv3 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1)\n self.norm3 = BatchNorm2d(512, affine=True)\n self.block2 = ResBottleneck(in_features=512, stride=2)\n\n self.block3 = nn.Sequential()\n for i in range(3):\n self.block3.add_module('b3_'+ str(i), ResBottleneck(in_features=512, stride=1))\n\n self.conv4 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1)\n self.norm4 = BatchNorm2d(1024, affine=True)\n self.block4 = ResBottleneck(in_features=1024, stride=2)\n\n self.block5 = nn.Sequential()\n for i in range(5):\n self.block5.add_module('b5_'+ str(i), ResBottleneck(in_features=1024, stride=1))\n\n self.conv5 = nn.Conv2d(in_channels=1024, out_channels=2048, kernel_size=1)\n self.norm5 = BatchNorm2d(2048, affine=True)\n self.block6 = ResBottleneck(in_features=2048, stride=2)\n\n self.block7 = nn.Sequential()\n for i in range(2):\n self.block7.add_module('b7_'+ str(i), ResBottleneck(in_features=2048, stride=1))\n\n self.fc_roll = nn.Linear(2048, num_bins)\n self.fc_pitch = nn.Linear(2048, num_bins)\n self.fc_yaw = nn.Linear(2048, num_bins)\n\n self.fc_t = nn.Linear(2048, 3)\n\n self.fc_exp = nn.Linear(2048, 3*num_kp)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.norm1(out)\n out = F.relu(out)\n out = self.maxpool(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n out = F.relu(out)\n\n out = self.block1(out)\n\n out = self.conv3(out)\n out = self.norm3(out)\n out = F.relu(out)\n out = self.block2(out)\n\n out = self.block3(out)\n\n out = self.conv4(out)\n out = self.norm4(out)\n out = F.relu(out)\n out = self.block4(out)\n\n out = self.block5(out)\n\n out = self.conv5(out)\n out = self.norm5(out)\n out = F.relu(out)\n out = self.block6(out)\n\n out = self.block7(out)\n\n out = F.adaptive_avg_pool2d(out, 1)\n out = out.view(out.shape[0], -1)\n\n yaw = self.fc_roll(out)\n pitch = self.fc_pitch(out)\n roll = self.fc_yaw(out)\n t = self.fc_t(out)\n exp = self.fc_exp(out)\n\n return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp}"
},
{
"identifier": "KPDetector",
"path": "src/facerender/modules/keypoint_detector.py",
"snippet": "class KPDetector(nn.Module):\n \"\"\"\n Detecting canonical keypoints. Return keypoint position and jacobian near each keypoint.\n \"\"\"\n\n def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, reshape_channel, reshape_depth,\n num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False):\n super(KPDetector, self).__init__()\n\n self.predictor = KPHourglass(block_expansion, in_features=image_channel,\n max_features=max_features, reshape_features=reshape_channel, reshape_depth=reshape_depth, num_blocks=num_blocks)\n\n # self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=7, padding=3)\n self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=3, padding=1)\n\n if estimate_jacobian:\n self.num_jacobian_maps = 1 if single_jacobian_map else num_kp\n # self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=7, padding=3)\n self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=3, padding=1)\n '''\n initial as:\n [[1 0 0]\n [0 1 0]\n [0 0 1]]\n '''\n self.jacobian.weight.data.zero_()\n self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float))\n else:\n self.jacobian = None\n\n self.temperature = temperature\n self.scale_factor = scale_factor\n if self.scale_factor != 1:\n self.down = AntiAliasInterpolation2d(image_channel, self.scale_factor)\n\n def gaussian2kp(self, heatmap):\n \"\"\"\n Extract the mean from a heatmap\n \"\"\"\n shape = heatmap.shape\n heatmap = heatmap.unsqueeze(-1)\n grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0)\n value = (heatmap * grid).sum(dim=(2, 3, 4))\n kp = {'value': value}\n\n return kp\n\n def forward(self, x):\n if self.scale_factor != 1:\n x = self.down(x)\n\n feature_map = self.predictor(x)\n prediction = self.kp(feature_map)\n\n final_shape = prediction.shape\n heatmap = prediction.view(final_shape[0], final_shape[1], -1)\n heatmap = F.softmax(heatmap / self.temperature, dim=2)\n heatmap = heatmap.view(*final_shape)\n\n out = self.gaussian2kp(heatmap)\n\n if self.jacobian is not None:\n jacobian_map = self.jacobian(feature_map)\n jacobian_map = jacobian_map.reshape(final_shape[0], self.num_jacobian_maps, 9, final_shape[2],\n final_shape[3], final_shape[4])\n heatmap = heatmap.unsqueeze(2)\n\n jacobian = heatmap * jacobian_map\n jacobian = jacobian.view(final_shape[0], final_shape[1], 9, -1)\n jacobian = jacobian.sum(dim=-1)\n jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 3, 3)\n out['jacobian'] = jacobian\n\n return out"
},
{
"identifier": "MappingNet",
"path": "src/facerender/modules/mapping.py",
"snippet": "class MappingNet(nn.Module):\n def __init__(self, coeff_nc, descriptor_nc, layer, num_kp, num_bins):\n super( MappingNet, self).__init__()\n\n self.layer = layer\n nonlinearity = nn.LeakyReLU(0.1)\n\n self.first = nn.Sequential(\n torch.nn.Conv1d(coeff_nc, descriptor_nc, kernel_size=7, padding=0, bias=True))\n\n for i in range(layer):\n net = nn.Sequential(nonlinearity,\n torch.nn.Conv1d(descriptor_nc, descriptor_nc, kernel_size=3, padding=0, dilation=3))\n setattr(self, 'encoder' + str(i), net) \n\n self.pooling = nn.AdaptiveAvgPool1d(1)\n self.output_nc = descriptor_nc\n\n self.fc_roll = nn.Linear(descriptor_nc, num_bins)\n self.fc_pitch = nn.Linear(descriptor_nc, num_bins)\n self.fc_yaw = nn.Linear(descriptor_nc, num_bins)\n self.fc_t = nn.Linear(descriptor_nc, 3)\n self.fc_exp = nn.Linear(descriptor_nc, 3*num_kp)\n\n def forward(self, input_3dmm):\n out = self.first(input_3dmm)\n for i in range(self.layer):\n model = getattr(self, 'encoder' + str(i))\n out = model(out) + out[:,:,3:-3]\n out = self.pooling(out)\n out = out.view(out.shape[0], -1)\n #print('out:', out.shape)\n\n yaw = self.fc_yaw(out)\n pitch = self.fc_pitch(out)\n roll = self.fc_roll(out)\n t = self.fc_t(out)\n exp = self.fc_exp(out)\n\n return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp} "
},
{
"identifier": "OcclusionAwareGenerator",
"path": "src/facerender/modules/generator.py",
"snippet": "class OcclusionAwareGenerator(nn.Module):\n \"\"\"\n Generator follows NVIDIA architecture.\n \"\"\"\n\n def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,\n num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):\n super(OcclusionAwareGenerator, self).__init__()\n\n if dense_motion_params is not None:\n self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,\n estimate_occlusion_map=estimate_occlusion_map,\n **dense_motion_params)\n else:\n self.dense_motion_network = None\n\n self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))\n\n down_blocks = []\n for i in range(num_down_blocks):\n in_features = min(max_features, block_expansion * (2 ** i))\n out_features = min(max_features, block_expansion * (2 ** (i + 1)))\n down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))\n self.down_blocks = nn.ModuleList(down_blocks)\n\n self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)\n\n self.reshape_channel = reshape_channel\n self.reshape_depth = reshape_depth\n\n self.resblocks_3d = torch.nn.Sequential()\n for i in range(num_resblocks):\n self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))\n\n out_features = block_expansion * (2 ** (num_down_blocks))\n self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)\n self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)\n\n self.resblocks_2d = torch.nn.Sequential()\n for i in range(num_resblocks):\n self.resblocks_2d.add_module('2dr' + str(i), ResBlock2d(out_features, kernel_size=3, padding=1))\n\n up_blocks = []\n for i in range(num_down_blocks):\n in_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i)))\n out_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i - 1)))\n up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))\n self.up_blocks = nn.ModuleList(up_blocks)\n\n self.final = nn.Conv2d(block_expansion, image_channel, kernel_size=(7, 7), padding=(3, 3))\n self.estimate_occlusion_map = estimate_occlusion_map\n self.image_channel = image_channel\n\n def deform_input(self, inp, deformation):\n _, d_old, h_old, w_old, _ = deformation.shape\n _, _, d, h, w = inp.shape\n if d_old != d or h_old != h or w_old != w:\n deformation = deformation.permute(0, 4, 1, 2, 3)\n deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear')\n deformation = deformation.permute(0, 2, 3, 4, 1)\n return F.grid_sample(inp, deformation)\n\n def forward(self, source_image, kp_driving, kp_source):\n # Encoding (downsampling) part\n out = self.first(source_image)\n for i in range(len(self.down_blocks)):\n out = self.down_blocks[i](out)\n out = self.second(out)\n bs, c, h, w = out.shape\n # print(out.shape)\n feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w) \n feature_3d = self.resblocks_3d(feature_3d)\n\n # Transforming feature representation according to deformation and occlusion\n output_dict = {}\n if self.dense_motion_network is not None:\n dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,\n kp_source=kp_source)\n output_dict['mask'] = dense_motion['mask']\n\n if 'occlusion_map' in dense_motion:\n occlusion_map = dense_motion['occlusion_map']\n output_dict['occlusion_map'] = occlusion_map\n else:\n occlusion_map = None\n deformation = dense_motion['deformation']\n out = self.deform_input(feature_3d, deformation)\n\n bs, c, d, h, w = out.shape\n out = out.view(bs, c*d, h, w)\n out = self.third(out)\n out = self.fourth(out)\n\n if occlusion_map is not None:\n if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:\n occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear')\n out = out * occlusion_map\n\n # output_dict[\"deformed\"] = self.deform_input(source_image, deformation) # 3d deformation cannot deform 2d image\n\n # Decoding part\n out = self.resblocks_2d(out)\n for i in range(len(self.up_blocks)):\n out = self.up_blocks[i](out)\n out = self.final(out)\n out = F.sigmoid(out)\n\n output_dict[\"prediction\"] = out\n\n return output_dict"
},
{
"identifier": "OcclusionAwareSPADEGenerator",
"path": "src/facerender/modules/generator.py",
"snippet": "class OcclusionAwareSPADEGenerator(nn.Module):\n\n def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,\n num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):\n super(OcclusionAwareSPADEGenerator, self).__init__()\n\n if dense_motion_params is not None:\n self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,\n estimate_occlusion_map=estimate_occlusion_map,\n **dense_motion_params)\n else:\n self.dense_motion_network = None\n\n self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1))\n\n down_blocks = []\n for i in range(num_down_blocks):\n in_features = min(max_features, block_expansion * (2 ** i))\n out_features = min(max_features, block_expansion * (2 ** (i + 1)))\n down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))\n self.down_blocks = nn.ModuleList(down_blocks)\n\n self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)\n\n self.reshape_channel = reshape_channel\n self.reshape_depth = reshape_depth\n\n self.resblocks_3d = torch.nn.Sequential()\n for i in range(num_resblocks):\n self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))\n\n out_features = block_expansion * (2 ** (num_down_blocks))\n self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)\n self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)\n\n self.estimate_occlusion_map = estimate_occlusion_map\n self.image_channel = image_channel\n\n self.decoder = SPADEDecoder()\n\n def deform_input(self, inp, deformation):\n _, d_old, h_old, w_old, _ = deformation.shape\n _, _, d, h, w = inp.shape\n if d_old != d or h_old != h or w_old != w:\n deformation = deformation.permute(0, 4, 1, 2, 3)\n deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear')\n deformation = deformation.permute(0, 2, 3, 4, 1)\n return F.grid_sample(inp, deformation)\n\n def forward(self, source_image, kp_driving, kp_source):\n # Encoding (downsampling) part\n out = self.first(source_image)\n for i in range(len(self.down_blocks)):\n out = self.down_blocks[i](out)\n out = self.second(out)\n bs, c, h, w = out.shape\n # print(out.shape)\n feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w) \n feature_3d = self.resblocks_3d(feature_3d)\n\n # Transforming feature representation according to deformation and occlusion\n output_dict = {}\n if self.dense_motion_network is not None:\n dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,\n kp_source=kp_source)\n output_dict['mask'] = dense_motion['mask']\n\n # import pdb; pdb.set_trace()\n\n if 'occlusion_map' in dense_motion:\n occlusion_map = dense_motion['occlusion_map']\n output_dict['occlusion_map'] = occlusion_map\n else:\n occlusion_map = None\n deformation = dense_motion['deformation']\n out = self.deform_input(feature_3d, deformation)\n\n bs, c, d, h, w = out.shape\n out = out.view(bs, c*d, h, w)\n out = self.third(out)\n out = self.fourth(out)\n\n # occlusion_map = torch.where(occlusion_map < 0.95, 0, occlusion_map)\n \n if occlusion_map is not None:\n if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:\n occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear')\n out = out * occlusion_map\n\n # Decoding part\n out = self.decoder(out)\n\n output_dict[\"prediction\"] = out\n \n return output_dict"
},
{
"identifier": "make_animation",
"path": "src/facerender/modules/make_animation.py",
"snippet": "def make_animation(args, audio_path, save_dir, video_name, img_size, crop_info, source_image, source_semantics, target_semantics,\n generator, kp_detector, he_estimator, mapping, \n yaw_c_seq=None, pitch_c_seq=None, roll_c_seq=None,\n use_exp=True, use_half=False):\n import tempfile\n temp_dir = tempfile.gettempdir()\n temp_dir = Path(temp_dir)/'sad'\n # temp_dir = Path.cwd()/'sad'\n print('temp dir',temp_dir)\n frame_dir = temp_dir/'frames'\n remove_directory_contents(str(temp_dir))\n frame_dir.mkdir(exist_ok=True, parents=True)\n print(f'tempdir: {temp_dir}\\nframedir: {frame_dir}')\n with torch.no_grad():\n kp_canonical = kp_detector(source_image)\n he_source = mapping(source_semantics)\n kp_source = keypoint_transformation(kp_canonical, he_source)\n \n for frame_idx in tqdm(range(target_semantics.shape[1]), 'Face Renderer:'):\n # still check the dimension\n target_semantics_frame = target_semantics[:, frame_idx]\n he_driving = mapping(target_semantics_frame)\n if yaw_c_seq is not None:\n he_driving['yaw_in'] = yaw_c_seq[:, frame_idx]\n if pitch_c_seq is not None:\n he_driving['pitch_in'] = pitch_c_seq[:, frame_idx] \n if roll_c_seq is not None:\n he_driving['roll_in'] = roll_c_seq[:, frame_idx] \n \n kp_driving = keypoint_transformation(kp_canonical, he_driving)\n \n kp_norm = kp_driving\n out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)\n video=[]\n for img in out['prediction']:\n image = np.transpose(img.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)\n video.append(image)\n result = img_as_ubyte(video)\n original_size = crop_info[0]\n if original_size:\n # print(f'original size: {original_size}. resizing...')\n result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]\n for i, frame in enumerate(result):\n cv2.imwrite(str(frame_dir/f'{i}_{frame_idx:04d}.png'), frame[:,:,::-1])\n\n # write png to mp4\n size1, size2= frame.shape[:2]\n path = os.path.join(str(save_dir), 'temp_' + video_name + '.avi')\n print(f'video size {size1, size2}')\n openVideo = cv2.VideoWriter(path, \n cv2.VideoWriter_fourcc(*'DIVX'), 25, (size2, size1))\n # openVideo = cv2.VideoWriter(path, -1, 25, (size2, size1))\n for pngFile in frame_dir.iterdir():\n if pngFile.suffix!=\".png\": continue\n f = cv2.imread(str(pngFile))\n # print(f)\n openVideo.write(f)\n openVideo.release()\n print(f'succesfully wrote png to mp4 at {path}')\n # video_name_full = video_name + '_full.mp4'\n # full_video_path = temp_dir/video_name_full\n # print(f'full_video_path final video: {full_video_path}')\n # new_audio_path = audio_path\n # return_path = full_video_path\n print('Pasting faces back into frame (SeamlessClone)')\n full_video_path = paste_pic_stream(temp_dir, path, args.source_image, crop_info, extended_crop= True if 'ext' in args.preprocess.lower() else False)\n print(f'full video path {full_video_path}')\n full_video_path_final = temp_dir/f'{video_name}_full_audio.mp4'\n # predictions.append(out['prediction'])\n # predictions_ts = torch.stack(predictions, dim=1)\n import subprocess\n import platform\n print(f'final video {full_video_path_final}')\n command = 'ffmpeg -y -init_hw_device cuda -hwaccel nvdec -hwaccel_output_format cuda -i {} -i {} -c:v h264_nvenc -preset:v p1 {}'.format(audio_path, str(full_video_path), str(full_video_path_final))\n subprocess.call(command, shell=platform.system() != 'Windows')\n return full_video_path_final, temp_dir"
},
{
"identifier": "enhancer_generator_with_len",
"path": "src/utils/face_enhancer.py",
"snippet": "def enhancer_generator_with_len(images, method='gfpgan', bg_upsampler='realesrgan'):\n \"\"\" Provide a generator with a __len__ method so that it can passed to functions that\n call len()\"\"\"\n\n if os.path.isfile(images): # handle video to images\n # TODO: Create a generator version of load_video_to_cv2\n images = load_video_to_cv2(images)\n\n gen = enhancer_generator_no_len(images, method=method, bg_upsampler=bg_upsampler)\n gen_with_len = GeneratorWithLen(gen, len(images))\n return gen_with_len"
},
{
"identifier": "enhancer_list",
"path": "src/utils/face_enhancer.py",
"snippet": "def enhancer_list(images, method='gfpgan', bg_upsampler='realesrgan'):\n gen = enhancer_generator_no_len(images, method=method, bg_upsampler=bg_upsampler)\n return list(gen)"
},
{
"identifier": "paste_pic_stream",
"path": "src/utils/paste_pic.py",
"snippet": "def paste_pic_stream(temp_dir, video_path, pic_path, crop_info, extended_crop=False):\n \n if not os.path.isfile(pic_path):\n raise ValueError('pic_path must be a valid path to video/image file')\n elif pic_path.split('.')[-1] in ['jpg', 'png', 'jpeg']:\n # loader for first frame\n full_img = cv2.imread(pic_path)\n print(f'source image: {pic_path}')\n else:\n # loader for videos\n input_stream = cv2.VideoCapture(pic_path)\n fps = input_stream.get(cv2.CAP_PROP_FPS)\n while 1:\n still_reading, frame = input_stream.read()\n if not still_reading:\n input_stream.release()\n break \n break \n full_img = frame\n frame_h = full_img.shape[0]\n frame_w = full_img.shape[1]\n print(f'reading {video_path}')\n video_stream = cv2.VideoCapture(video_path)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n \n r_w, r_h = crop_info[0]\n clx, cly, crx, cry = crop_info[1]\n lx, ly, rx, ry = crop_info[2]\n lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)\n # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n\n if extended_crop:\n oy1, oy2, ox1, ox2 = cly, cry, clx, crx\n else:\n oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n\n # tmp_path = str(uuid.uuid4())+'.avi'\n tmp_path = 'full.avi'\n tmp_path = temp_dir/tmp_path\n # print(f'temppath: {tmp_path}')\n out_tmp = cv2.VideoWriter(str(tmp_path), cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))\n\n while True:\n ret, crop_frame = video_stream.read()\n if not ret:\n print(\"End of video stream.\")\n break\n p = cv2.resize(crop_frame.astype(np.uint8), (ox2-ox1, oy2 - oy1)) \n mask = 255*np.ones(p.shape, p.dtype)\n location = ((ox1+ox2) // 2, (oy1+oy2) // 2)\n gen_img = cv2.seamlessClone(p, full_img, mask, location, cv2.NORMAL_CLONE)\n out_tmp.write(gen_img)\n \n video_stream.release()\n out_tmp.release()\n cv2.destroyAllWindows()\n return tmp_path\n # print(tmp_path, new_audio_path, full_video_path)\n # save_video_with_watermark(str(tmp_path), new_audio_path, full_video_path, watermark=False)\n # os.remove(tmp_path)"
},
{
"identifier": "save_video_with_watermark",
"path": "src/utils/videoio.py",
"snippet": "def save_video_with_watermark(video, audio, save_path, watermark=False):\n temp_file = str(uuid.uuid4())+'.mp4'\n cmd = r'ffmpeg -y -hide_banner -loglevel error -i \"%s\" -i \"%s\" -vcodec copy \"%s\"' % (video, audio, temp_file)\n os.system(cmd)\n\n if watermark is False:\n shutil.move(temp_file, save_path)\n else:\n # watermark\n try:\n ##### check if stable-diffusion-webui\n import webui\n from modules import paths\n watarmark_path = paths.script_path+\"/extensions/SadTalker/docs/sadtalker_logo.png\"\n except:\n # get the root path of sadtalker.\n dir_path = os.path.dirname(os.path.realpath(__file__))\n watarmark_path = dir_path+\"/../../docs/sadtalker_logo.png\"\n\n cmd = r'ffmpeg -y -hide_banner -loglevel error -i \"%s\" -i \"%s\" -filter_complex \"[1]scale=100:-1[wm];[0][wm]overlay=(main_w-overlay_w)-10:10\" \"%s\"' % (temp_file, watarmark_path, save_path)\n os.system(cmd)\n os.remove(temp_file)"
}
] | import os
import cv2
import yaml
import numpy as np
import warnings
import safetensors
import safetensors.torch
import imageio
import torch
import torchvision
import webui # in webui
from skimage import img_as_ubyte
from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector
from src.facerender.modules.mapping import MappingNet
from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
from src.facerender.modules.make_animation import make_animation
from pydub import AudioSegment
from src.utils.face_enhancer import enhancer_generator_with_len, enhancer_list
from src.utils.paste_pic import paste_pic_stream
from src.utils.videoio import save_video_with_watermark | 9,008 | self.kp_extractor.eval()
self.generator.eval()
self.he_estimator.eval()
self.mapping.eval()
self.device = device
def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None,
kp_detector=None, he_estimator=None,
device="cpu"):
checkpoint = safetensors.torch.load_file(checkpoint_path)
if generator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'generator' in k:
x_generator[k.replace('generator.', '')] = v
generator.load_state_dict(x_generator)
if kp_detector is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'kp_extractor' in k:
x_generator[k.replace('kp_extractor.', '')] = v
kp_detector.load_state_dict(x_generator)
if he_estimator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'he_estimator' in k:
x_generator[k.replace('he_estimator.', '')] = v
he_estimator.load_state_dict(x_generator)
return None
def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
kp_detector=None, he_estimator=None, optimizer_generator=None,
optimizer_discriminator=None, optimizer_kp_detector=None,
optimizer_he_estimator=None, device="cpu"):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if generator is not None:
generator.load_state_dict(checkpoint['generator'])
if kp_detector is not None:
kp_detector.load_state_dict(checkpoint['kp_detector'])
if he_estimator is not None:
he_estimator.load_state_dict(checkpoint['he_estimator'])
if discriminator is not None:
try:
discriminator.load_state_dict(checkpoint['discriminator'])
except:
print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
if optimizer_generator is not None:
optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
if optimizer_discriminator is not None:
try:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
except RuntimeError as e:
print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
if optimizer_kp_detector is not None:
optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
if optimizer_he_estimator is not None:
optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
return checkpoint['epoch']
def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if mapping is not None:
mapping.load_state_dict(checkpoint['mapping'])
if discriminator is not None:
discriminator.load_state_dict(checkpoint['discriminator'])
if optimizer_mapping is not None:
optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
if optimizer_discriminator is not None:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
return checkpoint['epoch']
def generate(self, args, x, save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):
source_image=x['source_image'].type(torch.FloatTensor)
source_semantics=x['source_semantics'].type(torch.FloatTensor)
target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
source_image=source_image.to(self.device)
source_semantics=source_semantics.to(self.device)
target_semantics=target_semantics.to(self.device)
if 'yaw_c_seq' in x:
yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
yaw_c_seq = x['yaw_c_seq'].to(self.device)
else:
yaw_c_seq = None
if 'pitch_c_seq' in x:
pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
pitch_c_seq = x['pitch_c_seq'].to(self.device)
else:
pitch_c_seq = None
if 'roll_c_seq' in x:
roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
roll_c_seq = x['roll_c_seq'].to(self.device)
else:
roll_c_seq = None
frame_num = x['frame_num']
audio_path = x['audio_path']
video_name = x['video_name']
full_video_path, temp_dir = make_animation(args, audio_path, save_dir, video_name, img_size, crop_info, source_image, source_semantics, target_semantics,
self.generator, self.kp_extractor, self.he_estimator, self.mapping,
yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)
#### paste back then enhancers
if enhancer:
video_name_enhancer = x['video_name'] + '_enhanced.mp4'
enhanced_path = os.path.join(save_dir, 'temp_'+video_name_enhancer)
av_path_enhancer = os.path.join(save_dir, video_name_enhancer)
return_path = av_path_enhancer
try:
enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)
imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))
except:
| warnings.filterwarnings('ignore')
try:
in_webui = True
except:
in_webui = False
class AnimateFromCoeff():
def __init__(self, sadtalker_path, device):
with open(sadtalker_path['facerender_yaml']) as f:
config = yaml.safe_load(f)
generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],
**config['model_params']['common_params'])
mapping = MappingNet(**config['model_params']['mapping_params'])
generator.to(device)
kp_extractor.to(device)
he_estimator.to(device)
mapping.to(device)
for param in generator.parameters():
param.requires_grad = False
for param in kp_extractor.parameters():
param.requires_grad = False
for param in he_estimator.parameters():
param.requires_grad = False
for param in mapping.parameters():
param.requires_grad = False
if sadtalker_path is not None:
if 'checkpoint' in sadtalker_path: # use safe tensor
self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None)
else:
self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)
else:
raise AttributeError("Checkpoint should be specified for video head pose estimator.")
if sadtalker_path['mappingnet_checkpoint'] is not None:
self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping)
else:
raise AttributeError("Checkpoint should be specified for video head pose estimator.")
self.kp_extractor = kp_extractor
self.generator = generator
self.he_estimator = he_estimator
self.mapping = mapping
self.kp_extractor.eval()
self.generator.eval()
self.he_estimator.eval()
self.mapping.eval()
self.device = device
def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None,
kp_detector=None, he_estimator=None,
device="cpu"):
checkpoint = safetensors.torch.load_file(checkpoint_path)
if generator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'generator' in k:
x_generator[k.replace('generator.', '')] = v
generator.load_state_dict(x_generator)
if kp_detector is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'kp_extractor' in k:
x_generator[k.replace('kp_extractor.', '')] = v
kp_detector.load_state_dict(x_generator)
if he_estimator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'he_estimator' in k:
x_generator[k.replace('he_estimator.', '')] = v
he_estimator.load_state_dict(x_generator)
return None
def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
kp_detector=None, he_estimator=None, optimizer_generator=None,
optimizer_discriminator=None, optimizer_kp_detector=None,
optimizer_he_estimator=None, device="cpu"):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if generator is not None:
generator.load_state_dict(checkpoint['generator'])
if kp_detector is not None:
kp_detector.load_state_dict(checkpoint['kp_detector'])
if he_estimator is not None:
he_estimator.load_state_dict(checkpoint['he_estimator'])
if discriminator is not None:
try:
discriminator.load_state_dict(checkpoint['discriminator'])
except:
print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
if optimizer_generator is not None:
optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
if optimizer_discriminator is not None:
try:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
except RuntimeError as e:
print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
if optimizer_kp_detector is not None:
optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
if optimizer_he_estimator is not None:
optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
return checkpoint['epoch']
def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if mapping is not None:
mapping.load_state_dict(checkpoint['mapping'])
if discriminator is not None:
discriminator.load_state_dict(checkpoint['discriminator'])
if optimizer_mapping is not None:
optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
if optimizer_discriminator is not None:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
return checkpoint['epoch']
def generate(self, args, x, save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):
source_image=x['source_image'].type(torch.FloatTensor)
source_semantics=x['source_semantics'].type(torch.FloatTensor)
target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
source_image=source_image.to(self.device)
source_semantics=source_semantics.to(self.device)
target_semantics=target_semantics.to(self.device)
if 'yaw_c_seq' in x:
yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
yaw_c_seq = x['yaw_c_seq'].to(self.device)
else:
yaw_c_seq = None
if 'pitch_c_seq' in x:
pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
pitch_c_seq = x['pitch_c_seq'].to(self.device)
else:
pitch_c_seq = None
if 'roll_c_seq' in x:
roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
roll_c_seq = x['roll_c_seq'].to(self.device)
else:
roll_c_seq = None
frame_num = x['frame_num']
audio_path = x['audio_path']
video_name = x['video_name']
full_video_path, temp_dir = make_animation(args, audio_path, save_dir, video_name, img_size, crop_info, source_image, source_semantics, target_semantics,
self.generator, self.kp_extractor, self.he_estimator, self.mapping,
yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)
#### paste back then enhancers
if enhancer:
video_name_enhancer = x['video_name'] + '_enhanced.mp4'
enhanced_path = os.path.join(save_dir, 'temp_'+video_name_enhancer)
av_path_enhancer = os.path.join(save_dir, video_name_enhancer)
return_path = av_path_enhancer
try:
enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)
imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))
except: | enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer) | 7 | 2023-12-19 11:01:35+00:00 | 12k |
Angryrou/udao | udao/optimization/moo/weighted_sum.py | [
{
"identifier": "Objective",
"path": "udao/optimization/concepts/objective.py",
"snippet": "class Objective(Constraint):\n \"\"\"\n\n Parameters\n ----------\n name : str\n Name of the objective.\n minimize : bool\n Direction of the objective: if True, minimize, else maximize.\n type: VarTypes\n Type of the objective, by default VarTypes.FLOAT\n \"\"\"\n\n def __init__(\n self,\n name: str,\n minimize: bool,\n function: Union[UdaoFunction, th.nn.Module, Callable[..., th.Tensor]],\n lower: Optional[float] = None,\n upper: Optional[float] = None,\n type: VarTypes = VarTypes.FLOAT,\n ):\n super().__init__(function=function, lower=lower, upper=upper)\n self.name = name\n self.minimize = minimize\n self.type = type\n\n @property\n def direction(self) -> int:\n \"\"\"Get gradient direction from optimization type\"\"\"\n if self.minimize:\n return 1\n else:\n return -1\n\n def __repr__(self) -> str:\n return (\n f\"Objective(name={self.name}, \"\n f\"direction={'min' if self.minimize else 'max'}, \"\n f\"lower={self.lower}, upper={self.upper})\"\n )"
},
{
"identifier": "MOProblem",
"path": "udao/optimization/concepts/problem.py",
"snippet": "class MOProblem(BaseProblem):\n \"\"\"Multi-objective optimization problem.\"\"\"\n\n def __init__(\n self,\n objectives: Sequence[Objective],\n variables: Dict[str, Variable],\n constraints: Sequence[Constraint],\n data_processor: Optional[DataProcessor] = None,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> None:\n self.objectives = objectives\n super().__init__(\n variables,\n constraints,\n data_processor=data_processor,\n input_parameters=input_parameters,\n )\n\n def __repr__(self) -> str:\n return (\n f\"MOProblem(objectives={self.objectives}, \"\n f\"variables={self.variables}, \"\n f\"constraints={self.constraints}, \"\n f\"input_parameters={self.input_parameters})\"\n )"
},
{
"identifier": "MOGD",
"path": "udao/optimization/soo/mogd.py",
"snippet": "class MOGD(SOSolver):\n \"\"\"MOGD solver for single-objective optimization.\n\n Performs gradient descent on input variables by minimizing an\n objective loss and a constraint loss.\n \"\"\"\n\n @dataclass\n class Params:\n learning_rate: float\n \"\"\"learning rate of Adam optimizer applied to input variables\"\"\"\n max_iters: int\n \"\"\"maximum number of iterations for a single local search\"\"\"\n patience: int\n \"\"\"maximum number of iterations without improvement\"\"\"\n multistart: int\n \"\"\"number of random starts for gradient descent\"\"\"\n objective_stress: float = 10.0\n \"\"\"stress term for objective functions\"\"\"\n constraint_stress: float = 1e5\n \"\"\"stress term for constraint functions\"\"\"\n strict_rounding: bool = False\n \"\"\"whether strictly rounding integer variables at each iteration. \"\"\"\n batch_size: int = 1\n \"\"\"batch size for gradient descent\"\"\"\n device: Optional[th.device] = field(default_factory=get_default_device)\n \"\"\"device on which to perform torch operations, by default available device.\"\"\"\n dtype: th.dtype = th.float32\n \"\"\"type of the tensors\"\"\"\n\n def __init__(self, params: Params) -> None:\n super().__init__()\n self.lr = params.learning_rate\n self.max_iter = params.max_iters\n self.patience = params.patience\n self.multistart = params.multistart\n self.objective_stress = params.objective_stress\n self.constraint_stress = params.constraint_stress\n self.strict_rounding = params.strict_rounding\n self.batch_size = params.batch_size\n self.device = params.device\n self.dtype = params.dtype\n\n def _get_unprocessed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[Dict[str, th.Tensor], Dict[str, Any]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables for which to get random values\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed : Optional[int], optional\n Random seed, by default None\n\n Returns\n -------\n Tuple[Dict[str, th.Tensor], Dict[str, Any]]\n - random values as a tensor for each numeric variable\n - input parameters valuies\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n return derive_unprocessed_input(\n input_variables=numeric_values,\n input_parameters=input_parameters,\n device=self.device,\n )\n\n def _get_processed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[UdaoInput, UdaoItemShape, Callable[[th.Tensor], TabularContainer]]:\n \"\"\"Get random values for numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInputShape, Callable[[th.Tensor], TabularContainer]]\n - random values for numeric variables\n - shape of the input\n - function to convert a tensor to a TabularContainer\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n input_data, iterator = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n make_tabular_container = cast(\n UdaoIterator, iterator\n ).get_tabular_features_container\n\n input_data_shape = iterator.shape\n\n return (\n input_data,\n input_data_shape,\n make_tabular_container,\n )\n\n def _get_unprocessed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n ) -> Tuple[Dict[str, float], Dict[str, float]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Variables for which to get bounds\n\n Returns\n -------\n Tuple[Dict[str, float], Dict[str, float]]\n - lower bounds of numeric variables\n - upper bounds of numeric variables\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n return lower_numeric_values, upper_numeric_values\n\n def _get_processed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> Tuple[UdaoInput, UdaoInput]:\n \"\"\"Get bounds of numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Input parameters, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInput]\n Lower and upper bounds of numeric\n variables in the form of a UdaoInput\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n lower_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=lower_numeric_values,\n )\n upper_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=upper_numeric_values,\n )\n if self.device:\n return lower_input.to(self.device), upper_input.to(self.device)\n else:\n return lower_input, upper_input\n\n def _gradient_descent(\n self,\n problem: co.SOProblem,\n input_data: Union[UdaoInput, Dict],\n optimizer: th.optim.Optimizer,\n ) -> Tuple[int, float, float]:\n \"\"\"Perform a gradient descent step on input variables\n\n Parameters\n ----------\n problem : co.SOProblem\n Single-objective optimization problem\n input_data : Union[UdaoInput, Dict]\n Input data - can have different types depending on whether\n the input variables are processed or not.\n - UdaoInput: the naive input\n - Dict: {\"input_variables\": ..., \"input_parameters\": ...}\n\n optimizer : th.optim.Optimizer\n PyTorch optimizer\n\n Returns\n -------\n Tuple[int, float, float]\n - index of minimum loss\n - minimum loss\n - objective value at minimum loss\n\n Raises\n ------\n UncompliantSolutionError\n If no solution within bounds is found\n \"\"\"\n # Compute objective, constraints and corresponding losses\n\n loss_meta = self._compute_loss(problem, input_data)\n sum_loss = loss_meta[\"sum_loss\"]\n min_loss = loss_meta[\"min_loss\"]\n min_loss_id = loss_meta[\"min_loss_id\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n\n optimizer.zero_grad()\n sum_loss.backward() # type: ignore\n optimizer.step()\n\n if is_within_constraint and (\n self.within_objective_bounds(best_obj, problem.objective)\n ):\n return min_loss_id, min_loss, best_obj\n else:\n raise UncompliantSolutionError(\"No solution within bounds found!\")\n\n def _log_success(\n self,\n problem: co.SOProblem,\n iter: int,\n best_obj: float,\n best_iter: int,\n best_feature_input: Any,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, best local {problem.objective.name} \"\n f\"found {best_obj:.5f}\"\n f\" \\nat iteration {best_iter},\"\n f\" \\nwith vars: {best_feature_input}, for \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _log_failure(\n self,\n problem: co.SOProblem,\n iter: int,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, no valid {problem.objective.name}\"\n f\" found for input parameters {problem.input_parameters} with \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _unprocessed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n no data processor is defined.\n The input variables are transformed to a dictionary of tensors and are\n optimized directly, by being passed to the objective function along\n with the input parameters.\n \"\"\"\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[Dict[str, th.Tensor]] = None\n\n (\n input_variable_values,\n input_parameter_values,\n ) = self._get_unprocessed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n lower_input, upper_input = self._get_unprocessed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables)\n )\n for name in input_variable_values:\n input_variable_values[name].requires_grad_(True)\n optimizer = optim.Adam([t for t in input_variable_values.values()], lr=self.lr)\n i = 0\n while i < self.max_iter:\n with th.no_grad():\n input_variable_values_backup = {\n k: v.detach().clone() for k, v in input_variable_values.items()\n }\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n {\n \"input_variables\": input_variable_values,\n \"input_parameters\": input_parameter_values,\n },\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = {\n k: v[min_loss_id].reshape(1, -1)\n for k, v in input_variable_values_backup.items()\n }\n best_iter = i\n\n with th.no_grad():\n # Update input_variable_values with constrained values\n for k in input_variable_values:\n input_variable_values[k].data = th.clip(\n input_variable_values[k].data,\n lower_input[k],\n upper_input[k],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n for k in input_variable_values:\n if isinstance(problem.variables[k], co.IntegerVariable):\n input_variable_values[k].data = input_variable_values[\n k\n ].data.round()\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n if not self.strict_rounding:\n for k in best_feature_input:\n if isinstance(problem.variables[k], co.IntegerVariable):\n best_feature_input[k].data = best_feature_input[k].data.round()\n loss_meta = self._compute_loss(\n problem,\n {\n \"input_variables\": best_feature_input,\n \"input_parameters\": input_parameter_values,\n },\n )\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n\n best_raw_vars = {\n name: best_feature_input[name]\n .cpu()\n .numpy()\n .squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _processed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n a data processor is defined.\n\n input variables and parameters are processed by the data processor.\n Gradient descent is performed on the processed input variables.\n Variables are then inverse transformed to get the raw variables.\n \"\"\"\n if not problem.data_processor:\n raise Exception(\"Data processor is not defined!\")\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[th.Tensor] = None\n # Random numeric variables and their characteristics\n (\n input_data,\n input_data_shape,\n make_tabular_container,\n ) = self._get_processed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n # Bounds of numeric variables\n lower_input, upper_input = self._get_processed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n )\n # Indices of numeric variables on which to apply gradients\n mask = th.tensor(\n [i in problem.variables for i in input_data_shape.feature_names],\n device=self.device,\n )\n grad_indices = th.nonzero(mask, as_tuple=False).squeeze()\n input_vars_subvector = input_data.features[:, grad_indices].clone().detach()\n input_vars_subvector.requires_grad_(True)\n\n optimizer = optim.Adam([input_vars_subvector], lr=self.lr)\n i = 0\n while i < self.max_iter:\n input_data.features = input_data.features.clone().detach()\n input_data.features[:, grad_indices] = input_vars_subvector\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n input_data,\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = (\n input_data.features.detach()[min_loss_id].clone().reshape(1, -1)\n )\n best_iter = i\n\n with th.no_grad():\n # Update input_vars_subvector with constrained values\n input_vars_subvector.data = th.clip(\n input_vars_subvector.data,\n # Use .data to avoid gradient tracking during update\n lower_input.features[0, grad_indices],\n upper_input.features[0, grad_indices],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n input_data.features[:, grad_indices] = input_vars_subvector.data\n feature_container = make_tabular_container(\n input_data.features.detach()\n )\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n numeric_values: Dict[str, np.ndarray] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n input_vars_subvector.data = input_data_raw.features[:, grad_indices]\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n with th.no_grad():\n best_feature_input = cast(th.Tensor, best_feature_input)\n feature_container = make_tabular_container(best_feature_input)\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n if not self.strict_rounding:\n best_raw_vars: Dict[str, Any] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_best_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=best_raw_vars,\n device=self.device,\n )\n loss_meta = self._compute_loss(problem, input_data_best_raw)\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n else:\n best_raw_vars = {\n name: best_raw_df[[name]]\n .values.squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization.\n Categorical variables are fixed to the values in input_parameters.\n (a grid search of categorical variables is performed in solve)\n This is where gradient descent is performed.\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n objective : co.Objective\n Objective to be optimized\n constraints : Sequence[co.Constraint]\n Constraints to be satisfied\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed: int, by default None\n random seed\n\n Returns\n -------\n Tuple[float, Dict[str, float], flat]\n - objective value\n - variables\n - best loss value\n\n Raises\n ------\n NoSolutionError\n No valid solution is found\n \"\"\"\n\n if not problem.data_processor:\n return self._unprocessed_single_start_opt(problem, seed=seed)\n else:\n return self._processed_single_start_opt(problem, seed=seed)\n\n def solve(\n self, problem: co.SOProblem, seed: Optional[int] = None\n ) -> Tuple[float, Dict[str, float]]:\n if seed is not None:\n th.manual_seed(seed)\n if self.device:\n for constraint in problem.constraints:\n constraint.to(self.device)\n problem.objective.to(self.device)\n\n categorical_variables = [\n name\n for name, variable in problem.variables.items()\n if isinstance(variable, co.EnumVariable)\n ]\n numeric_variables = {\n name: variable\n for name, variable in problem.variables.items()\n if isinstance(variable, co.NumericVariable)\n }\n\n meshed_categorical_vars = self.get_meshed_categorical_vars(problem.variables)\n\n if meshed_categorical_vars is None:\n meshed_categorical_vars = np.array([0])\n\n best_loss_list: List[float] = []\n obj_list: List[float] = []\n vars_list: List[Dict] = []\n for i in range(self.multistart):\n for categorical_cell in meshed_categorical_vars:\n categorical_values = {\n name: categorical_cell[ind]\n for ind, name in enumerate(categorical_variables)\n } # from {id: value} to {name: value}\n fixed_values = {\n **categorical_values,\n **(problem.input_parameters or {}),\n }\n try:\n (\n obj_pred,\n best_raw_vars,\n best_loss,\n ) = self._single_start_opt(\n co.SOProblem(\n variables=numeric_variables, # type: ignore\n input_parameters=fixed_values,\n objective=problem.objective,\n constraints=problem.constraints or [],\n data_processor=problem.data_processor,\n ),\n seed=seed + i if seed is not None else None,\n )\n except NoSolutionError:\n continue\n else:\n best_loss_list.append(best_loss)\n obj_list.append(obj_pred)\n vars_list.append(best_raw_vars)\n if not obj_list:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n idx = np.argmin(best_loss_list)\n vars_cand = vars_list[idx]\n if vars_cand is not None:\n obj_cand = obj_list[idx]\n if obj_cand is None:\n raise Exception(f\"Unexpected objs_list[{idx}] is None.\")\n else:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n return obj_cand, vars_cand\n\n ##################\n ## _loss ##\n ##################\n def constraints_loss(\n self, constraint_values: List[th.Tensor], constraints: Sequence[co.Constraint]\n ) -> th.Tensor:\n \"\"\"\n compute loss of the values of each constraint function fixme: double-check\n\n Parameters\n ----------\n constraint_values : List[th.Tensor]\n values of each constraint function\n constraints : Sequence[co.Constraint]\n constraint functions\n\n Returns\n -------\n th.Tensor\n loss of the values of each constraint function\n\n \"\"\"\n\n # vars: a tensor\n # get loss for constraint functions defined in the problem setting\n total_loss = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n for i, (constraint_value, constraint) in enumerate(\n zip(constraint_values, constraints)\n ):\n stress = (\n self.objective_stress\n if isinstance(constraint, co.Objective)\n else self.constraint_stress\n )\n constraint_violation = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n if constraint.upper is not None and constraint.lower is not None:\n if constraint.upper == constraint.lower:\n constraint_violation = th.abs(constraint_value - constraint.upper)\n else:\n normed_constraint = (constraint_value - constraint.lower) / (\n constraint.upper - constraint.lower\n )\n constraint_violation = th.where(\n (normed_constraint < 0) | (normed_constraint > 1),\n (normed_constraint - 0.5),\n 0,\n )\n elif constraint.lower is not None:\n constraint_violation = th.relu(constraint.lower - constraint_value)\n elif constraint.upper is not None:\n constraint_violation = th.relu(constraint_value - constraint.upper)\n total_loss += (\n constraint_violation**2 + stress * (constraint_violation > 0).float()\n )\n\n return total_loss\n\n def objective_loss(\n self, objective_value: th.Tensor, objective: co.Objective\n ) -> th.Tensor:\n \"\"\"Compute the objective loss for a given objective value:\n - if no bounds are specified, use the squared objective value\n - if both bounds are specified, use the squared normalized\n objective value if it is within the bounds, otherwise\n add a stress term to a squared distance to middle of the bounds\n\n Parameters\n ----------\n objective_value : th.Tensor\n Tensor of objective values\n objective : co.Objective\n Objective function\n\n Returns\n -------\n th.Tensor\n Tensor of objective losses\n\n Raises\n ------\n NotImplementedError\n If only one bound is specified for the objective\n\n \"\"\"\n\n if objective.upper is None and objective.lower is None:\n loss = (\n th.sign(objective_value) * (objective_value**2) * objective.direction\n )\n elif objective.upper is not None and objective.lower is not None:\n norm_cst_obj_pred = (objective_value - objective.lower) / (\n objective.upper - objective.lower\n ) # scaled\n loss = th.where(\n (norm_cst_obj_pred < 0) | (norm_cst_obj_pred > 1),\n (norm_cst_obj_pred - 0.5) ** 2 + self.objective_stress,\n norm_cst_obj_pred * objective.direction,\n )\n else:\n raise NotImplementedError(\"Objective with only one bound is not supported\")\n return loss\n\n def _obj_forward(\n self,\n optimization_element: co.Constraint,\n input_data: Union[UdaoInput, Dict],\n ) -> th.Tensor:\n if isinstance(input_data, UdaoInput):\n return optimization_element.function(input_data) # type: ignore\n else:\n # Dict when unprocessed inputs\n return optimization_element.function(**input_data)\n\n def _compute_loss(\n self, problem: co.SOProblem, input_data: Union[UdaoInput, Dict]\n ) -> Dict[str, Any]:\n obj_output = self._obj_forward(problem.objective, input_data)\n objective_loss = self.objective_loss(obj_output, problem.objective)\n constraint_loss = th.zeros_like(objective_loss, device=self.device)\n\n if problem.constraints:\n const_outputs = [\n self._obj_forward(constraint, input_data)\n for constraint in problem.constraints\n ]\n constraint_loss = self.constraints_loss(const_outputs, problem.constraints)\n\n loss = objective_loss + constraint_loss\n min_loss_id = int(th.argmin(loss).cpu().item())\n\n return {\n \"sum_loss\": th.sum(loss),\n \"min_loss\": th.min(loss).cpu().item(),\n \"min_loss_id\": min_loss_id,\n \"best_obj\": obj_output[min_loss_id].cpu().item(),\n \"is_within_constraint\": bool((constraint_loss[min_loss_id] == 0).item()),\n }\n\n ##################\n ## _get (vars) ##\n ##################\n\n def get_meshed_categorical_vars(\n self, variables: Dict[str, co.Variable]\n ) -> Optional[np.ndarray]:\n \"\"\"\n Get combinations of all categorical (binary, enum) variables\n\n Parameters\n ----------\n variables : Dict[str, co.Variable]\n Variables to be optimized\n\n Returns\n -------\n Optional[np.ndarray]\n Combinations of all categorical variables\n of shape (n_samples, n_vars)\n \"\"\"\n cv_value_list = [\n variable.values\n for variable in variables.values()\n if isinstance(variable, co.EnumVariable)\n ]\n if not cv_value_list:\n return None\n meshed_cv_value_list = [x_.reshape(-1, 1) for x_ in np.meshgrid(*cv_value_list)]\n meshed_cv_value = np.concatenate(meshed_cv_value_list, axis=1)\n return meshed_cv_value\n\n ##################\n ## _check ##\n ##################\n\n @staticmethod\n def within_objective_bounds(obj_value: float, objective: co.Objective) -> bool:\n \"\"\"\n check whether violating the objective value var_ranges\n :param pred_dict: dict, keys are objective names,\n values are objective values\n :param obj_bounds: dict, keys are objective names,\n values are lower and upper var_ranges of each objective value\n :return: True or False\n \"\"\"\n within_bounds = True\n if objective.upper is not None:\n within_bounds = obj_value <= objective.upper\n if objective.lower is not None:\n within_bounds = within_bounds and obj_value >= objective.lower\n return within_bounds"
},
{
"identifier": "SOSolver",
"path": "udao/optimization/soo/so_solver.py",
"snippet": "class SOSolver(ABC):\n @abstractmethod\n def solve(\n self,\n problem: SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float]]:\n \"\"\"Solve a single-objective optimization problem\n\n Parameters\n ----------\n problem : SOProblem\n Single-objective optimization problem to solve\n seed : Optional[int], optional\n Random seed, by default None\n\n Returns\n -------\n Tuple[float, Dict[str, float]]\n A tuple of the objective value and the variables\n that optimize the objective\n \"\"\"\n ..."
},
{
"identifier": "moo_utils",
"path": "udao/optimization/utils/moo_utils.py",
"snippet": "class Point:\nclass Rectangle:\n def __init__(self, objs: np.ndarray, vars: Optional[Dict] = None) -> None:\n def __repr__(self) -> str:\n def __eq__(self, other: \"Point\") -> bool: # type: ignore\n def __init__(self, utopia: Point, nadir: Point) -> None:\n def __repr__(self) -> str:\n def cal_volume(self, upper_bounds: np.ndarray, lower_bounds: np.ndarray) -> float:\n def __lt__(self, other: \"Rectangle\") -> bool:\n def __eq__(self, other: \"Rectangle\") -> bool: # type: ignore\ndef is_pareto_efficient(costs: np.ndarray, return_mask: bool = True) -> np.ndarray:\ndef summarize_ret(\n po_obj_list: Sequence, po_var_list: Sequence\n) -> Tuple[np.ndarray, np.ndarray]:\ndef even_weights(stepsize: float, n_objectives: int) -> np.ndarray:\ndef plot_po(po: np.ndarray, n_obj: int = 2, title: str = \"pf_ap\") -> None:\ndef get_default_device() -> th.device:"
},
{
"identifier": "NoSolutionError",
"path": "udao/optimization/utils/exceptions.py",
"snippet": "class NoSolutionError(ValueError):\n \"Raised when no solution is found for an MOO problem\"\n ..."
},
{
"identifier": "Point",
"path": "udao/optimization/utils/moo_utils.py",
"snippet": "class Point:\n def __init__(self, objs: np.ndarray, vars: Optional[Dict] = None) -> None:\n \"\"\"\n A point in the objective space.\n Variables are optional, and are not specified for imaginary points\n (e.g., utopia and nadir)\n\n Parameters\n ----------\n objs : np.ndarray\n Array of objective values of shape (n_objs,)\n vars :np.ndarray, optional\n Array of variable values of shape (n_vars,), by default None\n \"\"\"\n self.objs = objs\n self.vars = vars\n self.n_objs = objs.shape[0]\n\n def __repr__(self) -> str:\n return f\"Point(objs={self.objs}, vars={self.vars})\"\n\n def __eq__(self, other: \"Point\") -> bool: # type: ignore\n return bool(np.all(self.objs == other.objs) and np.all(self.vars == other.vars))"
},
{
"identifier": "get_default_device",
"path": "udao/optimization/utils/moo_utils.py",
"snippet": "def get_default_device() -> th.device:\n return th.device(\"cuda\") if th.cuda.is_available() else th.device(\"cpu\")"
},
{
"identifier": "MOSolver",
"path": "udao/optimization/moo/mo_solver.py",
"snippet": "class MOSolver(ABC):\n def __init__(\n self,\n ) -> None:\n pass\n\n @abstractmethod\n def solve(\n self,\n problem: MOProblem,\n seed: Optional[int] = None,\n ) -> Any:\n \"\"\"_summary_\n\n Parameters\n ----------\n problem : MOProblem\n Multi-objective optimization problem to solve\n seed : Optional[int], optional\n Random seed, by default None\n\n Returns\n -------\n Any\n A tuple of the objectives values and the variables\n that optimize the objective\n \"\"\"\n ..."
}
] | import json
import numpy as np
import torch as th
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple
from ..concepts import Objective
from ..concepts.problem import MOProblem
from ..soo.mogd import MOGD
from ..soo.so_solver import SOSolver
from ..utils import moo_utils as moo_ut
from ..utils.exceptions import NoSolutionError
from ..utils.moo_utils import Point, get_default_device
from .mo_solver import MOSolver | 9,647 | ws: List[float],
allow_cache: bool = False,
normalize: bool = True,
device: Optional[th.device] = None,
) -> None:
self.device = device or get_default_device()
self.problem = problem
self.ws = ws
super().__init__(name="weighted_sum", function=self.function, minimize=True)
self._cache: Dict[str, th.Tensor] = {}
self.allow_cache = allow_cache
self.normalize = normalize
def _function(self, *args: Any, **kwargs: Any) -> th.Tensor:
hash_var = ""
if self.allow_cache:
hash_var = json.dumps(str(args) + str(kwargs))
if hash_var in self._cache:
return self._cache[hash_var]
objs: List[th.Tensor] = []
for objective in self.problem.objectives:
obj = objective(*args, **kwargs) * objective.direction
objs.append(obj.squeeze())
objs_tensor = th.vstack(objs).T
# shape (n_feasible_samples/grids, n_objs)
if self.allow_cache:
self._cache[hash_var] = objs_tensor
return objs_tensor
def function(self, *args: Any, **kwargs: Any) -> th.Tensor:
"""Sum of weighted normalized objectives"""
objs_tensor = self._function(*args, **kwargs)
if self.normalize:
objs_tensor = self._normalize_objective(objs_tensor)
return th.sum(objs_tensor * th.tensor(self.ws, device=self.device), dim=1)
def _normalize_objective(self, objs_array: th.Tensor) -> th.Tensor:
"""Normalize objective values to [0, 1]
Parameters
----------
objs_array : np.ndarray
shape (n_feasible_samples/grids, n_objs)
Returns
-------
np.ndarray
shape (n_feasible_samples/grids, n_objs)
Raises
------
NoSolutionError
if lower bounds of objective values are
higher than their upper bounds
"""
objs_min, objs_max = th.min(objs_array, 0).values, th.max(objs_array, 0).values
if th.any((objs_min - objs_max) > 0):
raise NoSolutionError(
"Cannot do normalization! Lower bounds of "
"objective values are higher than their upper bounds."
)
elif th.all((objs_min - objs_max) == 0):
return th.zeros_like(objs_array)
return (objs_array - objs_min) / (objs_max - objs_min)
def to(self, device: Optional[th.device] = None) -> "WeightedSumObjective":
"""Move objective to device"""
if device is None:
device = get_default_device()
self.device = device
for objective in self.problem.objectives:
objective.to(device)
for constraint in self.problem.constraints:
constraint.to(device)
self._cache = {k: v.to(device) for k, v in self._cache.items()}
return self
class WeightedSum(MOSolver):
"""
Weighted Sum (WS) algorithm for MOO
Parameters
----------
ws_pairs: np.ndarray,
weight settings for all objectives, of shape (n_weights, n_objs)
inner_solver: BaseSolver,
the solver used in Weighted Sum
objectives: List[Objective],
objective functions
constraints: List[Constraint],
constraint functions
"""
@dataclass
class Params:
ws_pairs: np.ndarray
"""weight sets for all objectives, of shape (n_weights, n_objs)"""
so_solver: SOSolver
"""solver for SOO"""
normalize: bool = True
"""whether to normalize objective values to [0, 1] before applying WS"""
allow_cache: bool = False
"""whether to cache the objective values"""
device: Optional[th.device] = field(default_factory=get_default_device)
"""device on which to perform torch operations, by default available device."""
def __init__(
self,
params: Params,
):
super().__init__()
self.so_solver = params.so_solver
self.ws_pairs = params.ws_pairs
self.allow_cache = params.allow_cache
self.normalize = params.normalize
self.device = params.device
|
class WeightedSumObjective(Objective):
"""Weighted Sum Objective"""
def __init__(
self,
problem: MOProblem,
ws: List[float],
allow_cache: bool = False,
normalize: bool = True,
device: Optional[th.device] = None,
) -> None:
self.device = device or get_default_device()
self.problem = problem
self.ws = ws
super().__init__(name="weighted_sum", function=self.function, minimize=True)
self._cache: Dict[str, th.Tensor] = {}
self.allow_cache = allow_cache
self.normalize = normalize
def _function(self, *args: Any, **kwargs: Any) -> th.Tensor:
hash_var = ""
if self.allow_cache:
hash_var = json.dumps(str(args) + str(kwargs))
if hash_var in self._cache:
return self._cache[hash_var]
objs: List[th.Tensor] = []
for objective in self.problem.objectives:
obj = objective(*args, **kwargs) * objective.direction
objs.append(obj.squeeze())
objs_tensor = th.vstack(objs).T
# shape (n_feasible_samples/grids, n_objs)
if self.allow_cache:
self._cache[hash_var] = objs_tensor
return objs_tensor
def function(self, *args: Any, **kwargs: Any) -> th.Tensor:
"""Sum of weighted normalized objectives"""
objs_tensor = self._function(*args, **kwargs)
if self.normalize:
objs_tensor = self._normalize_objective(objs_tensor)
return th.sum(objs_tensor * th.tensor(self.ws, device=self.device), dim=1)
def _normalize_objective(self, objs_array: th.Tensor) -> th.Tensor:
"""Normalize objective values to [0, 1]
Parameters
----------
objs_array : np.ndarray
shape (n_feasible_samples/grids, n_objs)
Returns
-------
np.ndarray
shape (n_feasible_samples/grids, n_objs)
Raises
------
NoSolutionError
if lower bounds of objective values are
higher than their upper bounds
"""
objs_min, objs_max = th.min(objs_array, 0).values, th.max(objs_array, 0).values
if th.any((objs_min - objs_max) > 0):
raise NoSolutionError(
"Cannot do normalization! Lower bounds of "
"objective values are higher than their upper bounds."
)
elif th.all((objs_min - objs_max) == 0):
return th.zeros_like(objs_array)
return (objs_array - objs_min) / (objs_max - objs_min)
def to(self, device: Optional[th.device] = None) -> "WeightedSumObjective":
"""Move objective to device"""
if device is None:
device = get_default_device()
self.device = device
for objective in self.problem.objectives:
objective.to(device)
for constraint in self.problem.constraints:
constraint.to(device)
self._cache = {k: v.to(device) for k, v in self._cache.items()}
return self
class WeightedSum(MOSolver):
"""
Weighted Sum (WS) algorithm for MOO
Parameters
----------
ws_pairs: np.ndarray,
weight settings for all objectives, of shape (n_weights, n_objs)
inner_solver: BaseSolver,
the solver used in Weighted Sum
objectives: List[Objective],
objective functions
constraints: List[Constraint],
constraint functions
"""
@dataclass
class Params:
ws_pairs: np.ndarray
"""weight sets for all objectives, of shape (n_weights, n_objs)"""
so_solver: SOSolver
"""solver for SOO"""
normalize: bool = True
"""whether to normalize objective values to [0, 1] before applying WS"""
allow_cache: bool = False
"""whether to cache the objective values"""
device: Optional[th.device] = field(default_factory=get_default_device)
"""device on which to perform torch operations, by default available device."""
def __init__(
self,
params: Params,
):
super().__init__()
self.so_solver = params.so_solver
self.ws_pairs = params.ws_pairs
self.allow_cache = params.allow_cache
self.normalize = params.normalize
self.device = params.device
| if self.allow_cache and isinstance(params.so_solver, MOGD): | 2 | 2023-12-20 09:10:42+00:00 | 12k |
XLearning-SCU/2023-TPAMI-SMILE | Net.py | [
{
"identifier": "get_dist_release",
"path": "DistComput.py",
"snippet": "def get_dist_release(loader, dist_path):\r\n if not os.path.exists(dist_path):\r\n # loader = test_loader\r\n num_data = [10]\r\n with torch.no_grad():\r\n dist_list = [[] for i in range(len(num_data))]\r\n for j, data_t in enumerate(loader, 0):\r\n # get all inputs\r\n fea0, fea1, class_labels0, class_labels1, mask, is_pair, idx = data_t\r\n inputs_t = fea0.cuda()\r\n # inputs_t = torch.cat([fea0,fea1]).cuda()\r\n # labels_t = torch.cat([class_labels0,class_labels1]).cuda()\r\n # inputs_t, _, labels_t, _ = data_t\r\n # inputs_t, labels_t = inputs_t.cuda(), labels_t.cuda()\r\n for i in range(len(inputs_t)):\r\n if i % 1000 == 0:\r\n print(i)\r\n aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])\r\n # dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))\r\n # dist_m = dist[:, 0]\r\n # print(aa.shape)\r\n dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))\r\n dist_m[i] = 1000\r\n sorted_dist = np.sort(dist_m.cpu().numpy())\r\n for jj in range(len(num_data)):\r\n dist_list[jj].append(sorted_dist[num_data[jj]])\r\n inputs_t = fea1.cuda()\r\n for i in range(len(inputs_t)):\r\n if i % 1000 == 0:\r\n print(i)\r\n aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])\r\n # dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))\r\n # dist_m = dist[:, 0]\r\n # print(aa.shape)\r\n dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))\r\n dist_m[i] = 1000\r\n sorted_dist = np.sort(dist_m.cpu().numpy())\r\n for jj in range(len(num_data)):\r\n dist_list[jj].append(sorted_dist[num_data[jj]])\r\n for ii in range(len(num_data)):\r\n DirectoryOperator(dist_path).make_fold()\r\n np.savetxt(dist_path, np.array(dist_list[ii]))\r\n\r\n dist = torch.from_numpy(\r\n np.loadtxt(\r\n dist_path\r\n ).astype(np.float32)\r\n )\r\n return dist\r"
},
{
"identifier": "get_nearest_k",
"path": "_Utils/Calculator.py",
"snippet": "def get_nearest_k(h0, h1, k=1, sp_size=1000):\r\n hh0 = h0.half()\r\n hh1 = h1.half()\r\n split = int(np.ceil(len(hh0) / sp_size))\r\n near = []\r\n for i in range(split):\r\n dist = torch.cdist(hh0[i * sp_size:(i + 1) * sp_size], hh1)\r\n nearest = torch.argsort(dist, dim=1)[:, :k]\r\n near.append(nearest)\r\n nearest = torch.cat(near)\r\n return nearest\r"
},
{
"identifier": "update_log",
"path": "_Utils/Logs.py",
"snippet": "def update_log(dic, path='../log/res.csv'):\r\n index = 'Epoch'\r\n val = []\r\n name = []\r\n for na, v in dic.items():\r\n val.append(v)\r\n name.append(na)\r\n dt = pd.DataFrame([val], columns=name)\r\n dt = dt.set_index(index)\r\n if os.path.exists(path):\r\n dt_old = pd.read_csv(path, index_col=index)\r\n dt = merge_csv(dt_old, dt)\r\n DirectoryOperator(path).make_fold()\r\n dt.to_csv(path)\r"
},
{
"identifier": "visualize2",
"path": "_Utils/Scatter.py",
"snippet": "def visualize2(feature_vec, type_vec, group_vec, pred_vec, prefix, ):\r\n fv = feature_vec.reshape((len(feature_vec), -1))\r\n for perplexity in []:# 50\r\n vis_fea_multi = TSNE(perplexity=perplexity).fit_transform(\r\n np.concatenate((fv[group_vec == 0], fv[group_vec == 1]), axis=1)\r\n )\r\n for s in [5]:\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea_multi,\r\n fig_path='{}Multi.svg'.format(prefix2),\r\n label_color=type_vec[group_vec == 0],\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n\r\n for perplexity in [50]:\r\n vis_fea = TSNE(perplexity=perplexity).fit_transform(fv)\r\n for s in [5]: # 5\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Type.svg'.format(prefix2),\r\n label_color=type_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n # visualize_scatter(vis_fea,\r\n # fig_path='{}Cluster.svg'.format(prefix),\r\n # label_color=pred_vec,\r\n # label_shape=type_vec,\r\n #\r\n # )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Group.svg'.format(prefix2),\r\n label_color=group_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r"
},
{
"identifier": "visualize",
"path": "_Utils/Visualize.py",
"snippet": "def visualize(feature_vec, type_vec, group_vec, pred_vec, prefix='../Visualization/E{:03d}'.format(0)):\r\n vis_fea = tsne(feature_vec)\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Type.jpg'.format(prefix),\r\n label_color=type_vec,\r\n label_shape=type_vec,\r\n )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Cluster.jpg'.format(prefix),\r\n label_color=pred_vec,\r\n label_shape=type_vec,\r\n )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Group.jpg'.format(prefix),\r\n label_color=group_vec,\r\n label_shape=type_vec,\r\n )\r"
},
{
"identifier": "visual_matrix_console",
"path": "_Utils/Visualize.py",
"snippet": "def visual_matrix_console(x):\r\n if len(x.shape) <= 2:\r\n x = x.reshape((*x.shape, 1))\r\n base_wid = int(np.log10(np.max(x) + 0.5)) + 1\r\n head_wid = x.shape[2] * (1 + base_wid)\r\n head_sep = int(head_wid // 2) + 1\r\n print('t\\\\c ', end='')\r\n for i in range(x.shape[1]):\r\n print(('{:' + '{}'.format(head_sep) + 'd}').format(i), end=' ' * (head_wid - head_sep))\r\n print()\r\n for i, line in enumerate(x):\r\n print('{:2d}: '.format(i), end='')\r\n for cl in line:\r\n sg = True\r\n for g in cl:\r\n if sg:\r\n sg = False\r\n else:\r\n print(' ', end='')\r\n if g != 0:\r\n # print('base_wid == {}'.format(base_wid))\r\n # print('g == {}'.format(g))\r\n print(('{:' + str(base_wid) + 'd}').format(g), end='')\r\n else:\r\n print(' ' * base_wid, end='')\r\n print('|', end='')\r\n print()\r"
},
{
"identifier": "visualize_image",
"path": "_Utils/Visualize.py",
"snippet": "def visualize_image(x, verbose=0, show=False, fig_path=None):\r\n \"\"\"\r\n\r\n :param show:\r\n :param fig_path:\r\n :param x:\r\n (row, line, pic_h, pic_w) or (row, line, pic_h, pic_w, pic_c), pic_c = 1,3,4\r\n :return:\r\n \"\"\"\r\n x = np.asarray(x)\r\n if verbose:\r\n print('img.min() == {}'.format(np.min(x)))\r\n print('img.max() == {}'.format(np.max(x)))\r\n x -= np.min(x)\r\n x /= np.max(x)\r\n row, line = x.shape[:2]\r\n w, h = x.shape[1] * x.shape[3] / 90, x.shape[0] * x.shape[2] / 90\r\n plt.figure(figsize=(w, h)) # w, h\r\n count = 0\r\n for rx in x:\r\n for image in rx:\r\n count += 1\r\n plt.subplot(row, line, count)\r\n plt.imshow(image, cmap='gray', )\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n plt.subplots_adjust(left=0, right=1, top=1, bottom=0, hspace=0.1 / h, wspace=0.1 / w)\r\n\r\n if not show and fig_path is None:\r\n fig_path = '../_fig/fig.jpg'\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r"
},
{
"identifier": "plot_heat_map",
"path": "_Utils/Visualize.py",
"snippet": "def plot_heat_map(z, xticks=None, yticks=None, xlabel=None, ylabel=None, title=None, show=False, fig_path=None):\r\n \"\"\"\r\n\r\n :param z: z[i,j] shown in i-th row, j-th line\r\n :param xlabel:\r\n :param ylabel:\r\n :param show:\r\n :param fig_path:\r\n :return:\r\n \"\"\"\r\n left = 0.15\r\n right = 1\r\n top = 0.95\r\n bottom = 0.15\r\n w, h = z.shape\r\n plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n\r\n # plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n # plt.subplots_adjust(left=left, right=right, top=top, bottom=bottom)\r\n\r\n if xticks is not None:\r\n plt.xticks(np.arange(len(xticks)), np.round(xticks, 2), rotation=45)\r\n if yticks is not None:\r\n plt.yticks(np.arange(len(yticks)), np.round(yticks, 2))\r\n for i in range(z.shape[0]):\r\n for j in range(z.shape[1]):\r\n # plt.text(j, i, accs[i, j].round(2), ha=\"center\", va=\"center\", color=\"b\", fontsize=12,\r\n # fontname='Times New Roman')\r\n plt.text(j, i, z[i, j], ha=\"center\", va=\"center\")\r\n\r\n if xlabel is not None:\r\n plt.xlabel(xlabel)\r\n if ylabel is not None:\r\n plt.ylabel(ylabel)\r\n if title is not None:\r\n plt.title(title)\r\n plt.imshow(z, interpolation='nearest', aspect='auto')\r\n\r\n plt.colorbar()\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r"
},
{
"identifier": "TimeOperator",
"path": "_Utils/TimeOperator.py",
"snippet": "class TimeOperator:\r\n def __init__(self):\r\n self.time_buffer = None\r\n self.time_record = 0\r\n self.time_sum = 0\r\n self.time_count = 0\r\n\r\n def time(self, output=False, promt=''):\r\n if self.time_buffer is None:\r\n self.time_buffer = time()\r\n else:\r\n self.time_record = time() - self.time_buffer\r\n self.time_buffer = None\r\n self.time_sum += self.time_record\r\n self.time_count += 1\r\n if output:\r\n print('{}Time == {:7.05f}'.format(promt, self.time_record))\r\n\r\n def get_time_sum(self):\r\n return self.time_sum\r\n\r\n def show_time_sum(self):\r\n print('{:.02f}'.format(self.get_time_sum()))\r\n\r\n def get_fps(self):\r\n return self.time_count / self.time_sum\r\n\r\n def __get_speed(self, to_metric=None):\r\n speed = self.get_fps()\r\n metric = 'Second'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 60\r\n metric = 'Minute'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 60\r\n metric = 'Hour'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 24\r\n metric = 'Day'\r\n return speed, metric\r\n\r\n def show_process(self, process_now, process_total, name='Epoch'):\r\n if self.time_sum <= 0:\r\n return\r\n speed = self.time_sum / self.time_count\r\n print('{:<5s} [{:3.0f}/{:3.0f}] [{:8.02f}/{:8.02f}]: {:5.02f}({:5.02f}) '.format(\r\n name, process_now, process_total,\r\n process_now * speed, process_total * speed,\r\n self.time_record, speed\r\n ))\r\n\r\n def show_speed(self):\r\n speed, metric = self.__get_speed()\r\n print('{:4.01f} Frames/{}'.format(speed, metric))\r"
},
{
"identifier": "DirectoryOperator",
"path": "_Utils/DirectoryOperator.py",
"snippet": "class DirectoryOperator:\r\n def __init__(self, directory: str):\r\n self.directory = directory\r\n\r\n def make_fold(self):\r\n if not TestMode:\r\n # print('mk dir {}'.format(os.path.dirname(self.directory)))\r\n os.makedirs(os.path.dirname(self.directory), exist_ok=True)\r\n\r\n def modification_time(self):\r\n if os.path.exists(self.directory):\r\n return os.path.getmtime(self.directory)\r\n else:\r\n warnings.warn('Time_now is returned since the modification time for non-exist file is not available. File: {}'.format(self.directory))\r\n return time.time()\r"
},
{
"identifier": "get_clusters",
"path": "DataSetMaster/dataset.py",
"snippet": "def get_clusters(args):\n item_path = os.path.join(path_operator.get_checkpoint_path(level=1), 'Items0321')\n file_mnist_test = os.path.join(item_path, 'mnist_test_clusters89.67.txt')\n file_mnist_train = os.path.join(item_path, 'MnistTrain94.31B256.txt')\n file_amazon = os.path.join(item_path, 'amazon72.81B032ReValue.txt')\n file_webcam = os.path.join(item_path, 'webcamOurLoaderRevalveBatchWiseB032_84.03.txt')\n file_usps = os.path.join(item_path, 'usps_train_clusters85.10.txt')\n root_har = os.path.join(item_path, 'HAR')\n root_mtfl = os.path.join(item_path, 'MTFL')\n\n if args.dataset == 'MNISTUSPS': # 87.75 93.31\n if args.MnistTrain:\n file_mnist = file_mnist_train\n else:\n file_mnist = file_mnist_test\n file_list = [\n file_mnist,\n file_usps,\n ]\n elif args.dataset == 'ReverseMNIST': # 89.67 94.31\n if args.MnistTrain:\n file_mnist = file_mnist_train\n else:\n file_mnist = file_mnist_test\n file_list = [\n file_mnist,\n file_mnist,\n ]\n elif args.dataset == 'Office': # 75.28\n file_list = [\n file_amazon,\n file_webcam,\n ]\n elif args.dataset == 'MTFL':\n file_list = np.sort([os.path.join(root_mtfl, f) for f in os.listdir(root_mtfl) if f.endswith('txt')])\n elif args.dataset == 'HAR': # 81.70\n file_list = np.sort([os.path.join(root_har, f) for f in os.listdir(root_har) if f.endswith('txt')])\n else:\n raise NotImplementedError(\"\")\n\n def debug(x):\n print(x.shape)\n return x\n\n clusters = torch.cat(\n [debug(torch.from_numpy(np.loadtxt(c).astype(np.float32)).long()) for c in file_list],\n dim=0,\n ).cuda()\n return clusters"
},
{
"identifier": "svm_classify",
"path": "classification.py",
"snippet": "def svm_classify(data, data_gt, label, test_prop, C):\n \"\"\"\n trains a linear SVM on the data\n input C specifies the penalty factor of SVM\n \"\"\"\n seed = random.randint(0, 1000)\n train_idx, test_idx = TT_split(data.shape[1], test_prop, seed)\n train_data = np.concatenate([data[0][train_idx], data[1][train_idx]], axis=1)\n test_data = np.concatenate([data_gt[0][test_idx], data_gt[1][test_idx]], axis=1)\n test_label = label[test_idx]\n train_label = label[train_idx]\n\n # print('training SVM...')\n clf = svm.LinearSVC(C=C, dual=False)\n clf.fit(train_data, train_label.ravel())\n\n p = clf.predict(test_data)\n test_acc = accuracy_score(test_label, p)\n\n return test_acc"
},
{
"identifier": "UMAP",
"path": "evaluate.py",
"snippet": "def UMAP(feature_vec, type_vec, group_vec, pred_vec, n_type, n_batch, args, epoch, dst_root='../Visualization'):\n t = time.time()\n # print(\"Performing UMAP Visualization...\")\n # print('feature_vec.shape == {}'.format(feature_vec.shape))\n sc.set_figure_params(figsize=(4, 4), dpi=300)\n\n # type_vec = pd.DataFrame(type_vec)\n # for key in cell_type_dict.keys():\n # type_vec.replace(key, cell_type_dict[key], inplace=True)\n # group_vec = pd.DataFrame(group_vec)\n # for key in batch_dict.keys():\n # batch_vec.replace(key, batch_dict[key], inplace=True)\n\n adata = sc.AnnData(feature_vec)\n # print('adata.shape == {}'.format(adata.shape))\n sc.pp.neighbors(adata)\n adata.obs['cluster'] = pd.DataFrame(pred_vec).values.astype(np.str_)\n adata.obs['type'] = pd.DataFrame(type_vec).values.astype(np.str_)\n adata.obs['group'] = pd.DataFrame(group_vec).values.astype(np.str_)\n\n sc.tl.umap(adata)\n sc.pl.umap(adata,\n color=['cluster'],\n palette=sns.color_palette(\"husl\", n_type),\n save='E{:03d}UmapCluster{}.png'.format(epoch, str(args.dataset)),\n show=False)\n sc.pl.umap(adata,\n color=['type'],\n palette=sns.color_palette(\"husl\", n_type),\n save='E{:03d}UmapType{}.png'.format(epoch, str(args.dataset)),\n show=False)\n sc.pl.umap(adata,\n color=['group'],\n palette=sns.color_palette(\"hls\", n_batch),\n save='E{:03d}UmapGroup{}.png'.format(epoch, str(args.dataset)),\n show=False)\n roott = './figures/'\n for root, dirs, files in os.walk(roott):\n # print(root)\n # print(dirs)\n # print(files)\n for f in files:\n # print(os.path.join('../Visualization', f))\n FileOperator(\n os.path.join(root, f)\n ).rename(\n os.path.join(dst_root, f.replace('umapE', 'E')),\n auto_rename=False\n )\n if PrintTimer:\n print('VisualizeScatter finished with in {:.03f} seconds (x.shape == {}).'.format(\n time.time() - t,\n feature_vec.shape,\n ))"
},
{
"identifier": "evaluate2",
"path": "evaluate.py",
"snippet": "def evaluate2(feature_vec, pred_vec, type_vec, group_vec):\n nmi, ari, acc, pred_adjusted = cluster_metrics(type_vec, pred_vec)\n gs = np.unique(group_vec)\n ts = np.unique(type_vec)\n class_num = len(ts)\n group_num = len(gs)\n if group_vec is not None and group_num > 1:\n balance, entro = my_balance(pred_vec, group_vec, cluster_num=np.unique(type_vec).shape[0],\n group_num=np.unique(group_vec).shape[0])\n O = torch.zeros((class_num, group_num)).cuda()\n\n for b in gs:\n ind_g = b == group_vec\n pred_vec_g = pred_vec[ind_g]\n for t in ts:\n O[t, b] = np.sum(pred_vec_g == t)\n O += 1e-6\n O = (O / torch.sum(O))\n NmiFair = normalized_mutual_information(O).cpu().numpy()\n Fmeasure = FMeasure(beta=1)(acc, NmiFair)\n else:\n balance, entro = 0, 0\n NmiFair = 0\n Fmeasure = 0\n entro_v = np.mean(entro)\n global BestAcc, BestAri, BestNmi, BestBalance, BestEntropy, BestFairness, BestNmiFair, BestFmeasure\n if BestAcc < acc:\n BestAcc = acc\n if BestAri < ari:\n BestAri = ari\n if BestNmi < nmi:\n BestNmi = nmi\n if BestBalance < balance:\n BestBalance = balance\n # if BestFairness < fairness:\n # BestFairness = fairness\n if BestNmiFair < NmiFair:\n BestNmiFair = NmiFair\n if BestFmeasure < Fmeasure:\n BestFmeasure = Fmeasure\n if BestEntropy < entro_v:\n BestEntropy = entro_v\n\n print(\n 'NMI={:5.02f}|{:5.02f}, ARI={:5.02f}|{:5.02f}, ACC={:5.02f}|{:5.02f}, Balance={:5.02f}|{:5.02f}, NmiFair={:5.02f}|{:5.02f}, Fmeasure={:5.02f}|{:5.02f}, Entropy={:5.02f}|{:5.02f}[{}],'.format(\n nmi * 100, BestNmi * 100,\n ari * 100, BestAri * 100,\n acc * 100, BestAcc * 100,\n balance * 100, BestBalance * 100,\n # fairness * 100, BestFairness * 100,\n NmiFair * 100, BestNmiFair * 100,\n Fmeasure * 100, BestFmeasure * 100,\n entro_v, BestEntropy, entro\n )\n )\n met = {\n 'nmi' : nmi,\n 'ari' : ari,\n 'acc' : acc,\n 'balance' : balance,\n 'NmiFair' : NmiFair,\n 'Fmeasure': Fmeasure,\n }\n return pred_adjusted, met\n # tqdm.write('NMI=%.4f, ACC=%.4f, ARI=%.4f' % (nmi, acc, ari), end='')\n # if fair_metric:\n # kl, ari_b = fair_metrics(feature_vec, group_vec, pred_vec, type_vec)\n # print(', KL=%.4f, ARI_b=%.4f' % (kl, ari_b), end='')\n # tqdm.write('')"
},
{
"identifier": "visual_image_scatter",
"path": "figures/ScatterMaster.py",
"snippet": "def visual_image_scatter():\r\n np_path = os.path.join(\r\n 'D:/VirtualMachine/Codes/230904/SMAIL_RunSet_Visual/ --QuickConfig C100 --VisualFreq 5 --VisualRandom 1 --dataset NoisyMNIST30000 --seed 1999 --train_epoch 100/Checkpoints/Epoch099.npz')\r\n # np_path_row = os.path.join(root, np_paths[np_names.index(np_tag)], 'NpPoints', np_epoch)\r\n\r\n data = np.load(np_path, allow_pickle=False)\r\n data_vec = data['data_vec']\r\n feature_vec = data['feature_vec']\r\n group_vec = data['group_vec']\r\n type_vec = data['type_vec']\r\n\r\n # visualize_image(x=[\r\n # [it.reshape([28, 28]) for it in data_vec[:10]],\r\n # [it.reshape([28, 28]) for it in data_vec[10:20]],\r\n # [it.reshape([28, 28]) for it in data_vec[20:30]],\r\n # ], show=True)\r\n\r\n DrawMax = 3000\r\n if len(feature_vec) > DrawMax:\r\n it = np.arange(len(feature_vec))\r\n np.random.shuffle(it)\r\n ind = it[:DrawMax]\r\n feature_vec = feature_vec[ind]\r\n type_vec = type_vec[ind]\r\n group_vec = group_vec[ind]\r\n data_vec = data_vec[ind]\r\n vis_fea = TSNE(perplexity=50).fit_transform(feature_vec)\r\n\r\n _, ax = plt.subplots(figsize=(5 * 1 * 2, 5 * 1 * 2 / 1.6))\r\n\r\n label_color = np.unique(type_vec)\r\n color_num = len(np.unique(type_vec))\r\n # if color_num <= 2:\r\n # cmap = None\r\n if color_num <= 10:\r\n cmap = 'tab10'\r\n elif color_num <= 20:\r\n cmap = 'tab20'\r\n else:\r\n cmap = 'gist_ncar'\r\n for digit in np.unique(type_vec):\r\n ax.scatter(\r\n *vis_fea[type_vec == digit].T,\r\n # marker=f\"${digit}$\",\r\n s=0.5,\r\n # color=plt.cm.Dark2(digit),\r\n alpha=0.7,\r\n c=type_vec[type_vec == digit],\r\n cmap=cmap,\r\n vmax=max(4, np.max(label_color)),\r\n vmin=min(0, np.min(label_color)),\r\n zorder=2,\r\n )\r\n w = int(np.sqrt(len(data_vec[0])))\r\n h = w\r\n shown_images = np.array([[1.0, 1.0]]) # just something big\r\n for i in range(data_vec.shape[0]):\r\n # plot every digit on the embedding\r\n # show an annotation box for a group of digits\r\n dist = np.sum((vis_fea[i] - shown_images) ** 2, 1)\r\n if np.min(dist) < 2e1:\r\n # don't show points that are too close\r\n continue\r\n if np.min(dist) < 2e1:\r\n # don't show points that are too close\r\n continue\r\n shown_images = np.concatenate([shown_images, [vis_fea[i]]], axis=0)\r\n # img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, )\r\n img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, zoom=0.5)\r\n # img.ti\r\n imagebox = offsetbox.AnnotationBbox(\r\n img, # [w, h, 3]\r\n vis_fea[i],\r\n pad=0,\r\n frameon=False\r\n )\r\n imagebox.set(zorder=1)\r\n ax.add_artist(imagebox)\r\n\r\n ax.set_title('title')\r\n ax.axis(\"off\")\r\n plt.tight_layout()\r\n plt.savefig('D:/Pengxin/Temp/tmp.pdf')\r\n plt.show()\r\n\r\n print()\r\n pass\r"
}
] | import math
import os
import time
import warnings
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
import evaluate
import faiss
import scipy.io as sio
from torch import nn
from torch.autograd import Variable
from DistComput import get_dist_release
from _Utils.Calculator import get_nearest_k
from _Utils.Logs import update_log
from _Utils.Scatter import visualize2
from _Utils.Visualize import visualize, visual_matrix_console, visualize_image, plot_heat_map
from _Utils import TimeOperator, DirectoryOperator
from DataSetMaster.dataset import get_clusters
from classification import svm_classify
from evaluate import UMAP, evaluate2
from sklearn import metrics
from munkres import Munkres
from figures.ScatterMaster import visual_image_scatter
| 10,313 | # type_vec = type_vec[ind]
# visualize2(feature_vec=feature_vec, type_vec=type_vec, group_vec=group_vec,
# pred_vec=None,
# prefix=os.path.join('../', 'Visualization/E{:03d}N{:04d}'.format(epoch, len(type_vec))))
# visual_image_scatter(
# data_vec,
# feature_vec,
# group_vec,
# type_vec,
# )
raw_dataset = torchvision.datasets.ImageFolder(
'D:/VirtualMachine/Data/caltech-101/101_ObjectCategories',
transform=torchvision.transforms.Resize([256, 256])
)
# mat = sio.loadmat('D:/VirtualMachine/Data/Caltech101-all.mat')
# data = mat['X'][0][3:5]
# label = np.squeeze(mat['Y'])-1
raw_data_ind = np.ones(len(data_vec), dtype=int) * -1
class_num = len(np.unique(type_vec))
class_num_s = len(np.unique(raw_dataset.targets))
raw_dataset.targets = np.asarray(raw_dataset.targets) - 1
for t in range(class_num_s):
print('{: 4d} {: 4d} {: 4d} {: 4d}'.format(
t,
np.sum(t == type_vec),
np.sum(t == raw_dataset.targets),
np.sum(t == 0),
))
for t in np.unique(type_vec):
bank_inds = np.arange(len(raw_dataset.targets))[raw_dataset.targets == t]
raw_data_ind[t == type_vec] = np.concatenate([bank_inds, bank_inds])
# raw_data = raw_dataset[np.asarray(raw_data_ind, dtype=int)]
raw_data = np.asarray([np.asarray(raw_dataset[it][0]) for it in raw_data_ind])
np.savez(
os.path.join(args.resume.replace('.checkpoint', 'Raw2.npz')),
data_vec=raw_data,
feature_vec=feature_vec,
group_vec=group_vec,
type_vec=type_vec,
pred_adjusted=pred_adjusted,
)
return
if (epoch + 1) == epochs or (epoch + 1) % args.VisualFreq == 0:
met_mul2 = {}
if args.EvalMulti:
print('EvalMulti')
multi_modality_feature = np.concatenate(
[feature_vec[group_vec == view] for view in np.unique(group_vec)],
axis=1)
_, met_mul, _ = cluster_and_measure(
features=multi_modality_feature, types=type_vec[group_vec == 0],
groups=group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['Multi-' + nv] = v
if args.EvalMean:
print('EvalMean')
_, met_mul, _ = cluster_and_measure(
features=np.mean(
np.asarray([feature_vec[group_vec == view] for view in np.unique(group_vec)]),
axis=0
), types=type_vec[group_vec == 0], groups=group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['Mean-' + nv] = v
if args.EvalSingel0:
print('EvalSingel0')
_, met_mul, _ = cluster_and_measure(features=feature_vec_cluster[group_vec_cluster == 0],
types=type_vec_cluster[group_vec_cluster == 0],
groups=group_vec_cluster[group_vec_cluster == 0])
for nv, v in met_mul.items():
met_mul2['Singel0-' + nv] = v
if args.EvalSingel1:
print('EvalSingel1')
_, met_mul, _ = cluster_and_measure(features=feature_vec_cluster[group_vec_cluster == 1],
types=type_vec_cluster[group_vec_cluster == 1],
groups=group_vec_cluster[group_vec_cluster == 1] - 1)
for nv, v in met_mul.items():
met_mul2['Singel1-' + nv] = v
if args.EvalOriMean:
print('EvalOriMean')
mean_fea = np.mean(
np.asarray([feature_vec[group_vec == view] for view in np.unique(group_vec)]),
axis=0
)
score = self.soft_ass(mean_fea, centroids.cpu().numpy())
pred_vec = np.argmax(score, axis=1)
_, met_mul = evaluate2(None, pred_vec, type_vec[group_vec == 0], group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['OriMean-' + nv] = v
if args.EvalOriScoreMean:
print('EvalOriScoreMean')
score = self.soft_ass(torch.from_numpy(feature_vec).cuda(), centroids).cpu().numpy()
pred_vec = np.argmax(np.mean(
np.asarray([score[group_vec == view] for view in np.unique(group_vec)]),
axis=0
), axis=1)
_, met_mul = evaluate2(None, pred_vec, type_vec[group_vec == 0], group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['OriScoreMean-' + nv] = v
if args.EvalOriPredMean:
print('EvalOriPredMean')
pred = torch.softmax(self.soft_ass(torch.from_numpy(feature_vec).cuda(), centroids) / 0.2,
dim=1).cpu().numpy()
pred_vec = np.argmax(np.mean(
np.asarray([pred[group_vec == view] for view in np.unique(group_vec)]),
axis=0
), axis=1)
_, met_mul = evaluate2(None, pred_vec, type_vec[group_vec == 0], group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['EvalOriPredMean-' + nv] = v
if args.EvalCla:
mv_f = np.asarray([feature_vec[group_vec == view] for view in np.unique(group_vec)])
mv_gt = np.asarray([feature_vec_classification[group_vec == view] for view in np.unique(group_vec)])
for test_prop in [0.2, 0.5, 0.8]:
met_mul2['ClassificationACC{:.01f}'.format(test_prop)] = np.mean(
|
def show_distribution_ct(type_vec, group_vec, pred_vec, class_num, group_num):
v = np.zeros((class_num, class_num, group_num), dtype=int)
for t, c, g in zip(type_vec, pred_vec, group_vec):
v[t, c, g] += 1
visual_matrix_console(x=v)
def kmeans(feature_vec, class_num):
d = feature_vec.shape[1]
kmeans = faiss.Clustering(d, class_num)
kmeans.verbose = False
kmeans.niter = 300
kmeans.nredo = 10
# kmeans.spherical = True
# if LimitKmeans:
# kmeans.max_points_per_centroid = 1000
# kmeans.min_points_per_centroid = 10
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = True
cfg.device = 0
index = faiss.GpuIndexFlatL2(res, d, cfg)
# print(feature_vec.shape)
kmeans.train(feature_vec, index)
centroids = faiss.vector_to_array(kmeans.centroids).reshape(class_num, d)
return centroids
def show_distribution(cluster_vec, group_vec, class_num, group_num):
for it in np.arange(group_num):
print('{:4d}, '.format(it), end='')
print('')
cluster_group = torch.zeros((class_num, group_num), dtype=torch.int)
for i, j in zip(cluster_vec, group_vec):
cluster_group[i, j] += 1
# cluster_group = cluster_group[torch.argsort(torch.sum(cluster_group, dim=1))]
for line in cluster_group:
print('{:4d}: '.format(torch.sum(line)), end='')
for it in line:
print('{:4d}, '.format(it), end='')
print('')
def save_checkpoint(state, epoch):
"""
it has been trained for *epoch* epochs
"""
filename = 'Epoch{:03d}.checkpoint'.format(epoch)
checkpoint_dir = os.path.join(
os.path.dirname(os.getcwd()),
'Checkpoints',
filename
)
DirectoryOperator.FoldOperator(directory=checkpoint_dir).make_fold()
if os.path.exists(checkpoint_dir):
warnings.warn('Checkpoint exist and been replaced.({})'.format(checkpoint_dir))
print('Save check point into {}'.format(checkpoint_dir))
torch.save(state, checkpoint_dir)
def get_ffn(dims, last_layers=None, with_bn=False, drop_out=0):
layers = []
for ind in range(len(dims) - 1):
in_dim = dims[ind]
out_dim = dims[ind + 1]
layers.append(nn.Linear(in_dim, out_dim))
if with_bn:
layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.ReLU())
if drop_out:
layers.append(nn.Dropout(drop_out))
if last_layers is not None:
layers.extend(last_layers)
return nn.Sequential(*layers)
def get_cov(dims, strides, last_layers=None, with_bn=False, drop_out=0):
layers = []
for ind in range(len(dims) - 1):
in_dim = dims[ind]
out_dim = dims[ind + 1]
stride = strides[ind]
# layers.append(nn.Linear(in_dim, out_dim))
if stride >= 0:
layers.append(nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride, padding=1))
else:
layers.append(nn.ConvTranspose2d(
in_dim, out_dim, kernel_size=3, stride=-stride, padding=1, output_padding=0 if stride == -1 else 1))
if with_bn:
# layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.BatchNorm2d(out_dim))
layers.append(nn.ReLU())
if drop_out:
layers.append(nn.Dropout(drop_out))
if last_layers is not None:
layers.extend(last_layers)
return nn.Sequential(*layers)
class Net(nn.Module):
def __init__(self, args, in_dims, class_num, group_num):
super(Net, self).__init__()
self.encoder_adaption = nn.ModuleList([
get_ffn([in_dims[i], 1024], with_bn=args.BatchNormType[0] == '1', drop_out=args.Dropout)
for i in range(group_num if args.GroupWiseLayer[0] == '1' else 1)])
self.encoder = nn.ModuleList([
get_ffn([1024, 1024, 512], with_bn=args.BatchNormType[1] == '1', drop_out=args.Dropout)
for _ in range(group_num if args.GroupWiseLayer[1] == '1' else 1)])
if args.representation_dim == 0:
args.representation_dim = class_num
self.class_num = class_num
self.group_num = group_num
self.pred_cac = None
self.pred_center_cac = None
if args.ElActivationType == 'None':
el_activation_ = []
elif args.ElActivationType == 'Normalize':
el_activation_ = []
elif args.ElActivationType == 'BnNormalize':
el_activation_ = [nn.BatchNorm1d(args.representation_dim)]
elif args.ElActivationType == 'BnReNormalize':
el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()]
elif args.ElActivationType == 'BnRe':
el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()]
else:
raise NotImplementedError('')
self.el_activation_ = el_activation_
self.encoder_linear = nn.ModuleList([
get_ffn([512, 256], with_bn=args.BatchNormType[2] == '1', drop_out=args.Dropout,
last_layers=[nn.Linear(256, args.representation_dim)] + self.el_activation_)
for _ in range(group_num if args.GroupWiseLayer[2] == '1' else 1)])
dec_in = args.representation_dim
if args.McDecoder:
dec_in *= group_num
self.dec_in = dec_in
self.decoder_linear = nn.ModuleList([
get_ffn([self.dec_in, 256, 512], with_bn=args.BatchNormType[3] == '1', drop_out=args.Dropout)
for _ in range(group_num if args.GroupWiseLayer[3] == '1' else 1)])
if args.ActivationType == 'None':
final_activation_ = []
elif args.ActivationType == 'Sigmoid':
final_activation_ = [nn.Sigmoid()]
elif args.ActivationType == 'Tanh':
final_activation_ = [nn.Tanh()]
else:
raise NotImplementedError('')
self.final_activation_ = final_activation_
self.decoder = nn.ModuleList([
get_ffn([512, 1024, 1024], with_bn=args.BatchNormType[4] == '1', drop_out=args.Dropout)
for _ in range(group_num if args.GroupWiseLayer[4] == '1' else 1)])
self.decoder_adaption = nn.ModuleList([
get_ffn([], last_layers=[nn.Linear(1024, in_dims[i])] + self.final_activation_)
for i in range(group_num if args.GroupWiseLayer[5] == '1' else 1)])
self.args = args
self.in_dims = in_dims
# def update_cluster_center(self, center):
# self.cluster_centers = F.normalize(torch.from_numpy(center), dim=1).cuda()
def forward(self, x, **kwargs):
return self.decode(self.encode([x]))
def encode(self, xs: list):
hs = []
for g, x in enumerate(xs):
if self.args.noise_type == 'None':
pass
elif self.args.noise_type == 'Drop':
x = x * (Variable(x.data.new(x.size()).normal_(0, 0.1)) < self.args.noise_weight).type_as(x)
elif self.args.noise_type == 'Add':
x = x + Variable(x.data.new(x.size()).normal_(0, self.args.noise_weight)).type_as(x)
else:
raise NotImplementedError('')
if len(x) != 0:
if len(x) == 1:
x = torch.concat([x, x])
# print(x.shape)
# x = x.view((len(x), -1))
# print(x.shape)
x = self.encoder_adaption[g if self.args.GroupWiseLayer[0] == '1' else 0](x)
x = self.encoder[g if self.args.GroupWiseLayer[1] == '1' else 0](x)
x = self.encoder_linear[g if self.args.GroupWiseLayer[2] == '1' else 0](x)
if len(x) == 1:
x = x[[0]]
if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']:
x = F.normalize(x, dim=1)
else:
x = torch.zeros([0, self.args.representation_dim], device=torch.device('cuda:0'))
hs.append(x)
return hs
def soft_ass(self, h, centroids):
if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']:
return h @ centroids.T
else:
dst = torch.cdist(h, centroids)
# return (torch.mean(dst) - dst) / (torch.amax(dst) - torch.amin(dst)) * 2
return -dst / 2
# def encode_class(self, hs):
# cs = []
# for h in hs:
# c = h @ self.cluster_centers.T
# cs.append(c)
# return cs
def decode(self, hs):
xs = []
for g, h in enumerate(hs):
if self.args.McDecoder:
h = torch.cat(hs, dim=1)
if len(h) != 0:
if len(h) == 1:
h = torch.concat([h, h])
h = self.decoder_linear[g if self.args.GroupWiseLayer[3] == '1' else 0](h)
h = self.decoder[g if self.args.GroupWiseLayer[4] == '1' else 0](h)
h = self.decoder_adaption[g if self.args.GroupWiseLayer[5] == '1' else 0](h)
if len(h) == 1:
h = h[[0]]
else:
h = torch.zeros([0, self.in_dims[g]], device=torch.device('cuda:0'))
xs.append(h)
return xs
def run(self, epochs, train_dataloader, test_dataloader, args):
# if args.loss_self_cons:
# clusters = get_clusters(args=args)
optimizer_g = torch.optim.Adam(
self.parameters(),
lr=args.LearnRate,
betas=(args.betas_a, args.betas_v),
weight_decay=args.WeightDecay
)
mse_loss = nn.MSELoss().cuda()
timer_all = TimeOperator.Timer()
timer_train = TimeOperator.Timer()
timer_save = TimeOperator.Timer()
ce_loss = nn.CrossEntropyLoss().cuda()
type_detail_shown = False
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# if args.gpu is None:
# checkpoint = torch.load(args.resume)
# else:
# # Map model to be loaded to specified single gpu.
# loc = 'cuda:{}'.format(args.gpu)
# checkpoint = torch.load(args.resume, map_location=loc)
start_epoch = checkpoint['epoch']
self.load_state_dict(checkpoint['state_dict'])
optimizer_g.load_state_dict(checkpoint['optimizer']['optimizer_g'])
# self.__dict__ = checkpoint['self_dic']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
# self.args = args
# warnings.warn('This is not equal to start from the beginning due to different rands states.')
#
else:
raise NotImplementedError("=> no checkpoint found at '{}'".format(args.resume))
if args.CodeTest:
args.train_epoch = start_epoch + 1
epochs = start_epoch + 1
best_acc = 0
for epoch in range(start_epoch, epochs):
if (epoch + 1) <= args.LearnRateWarm:
lr = args.LearnRate * (epoch + 1) / args.LearnRateWarm
else:
if args.LearnRateDecayType == 'None':
lr = args.LearnRate
elif args.LearnRateDecayType == 'Exp':
lr = args.LearnRate * ((1 + 10 * (epoch + 1 - args.LearnRateWarm) / (
args.train_epoch - args.LearnRateWarm)) ** -0.75)
elif args.LearnRateDecayType == 'Cosine':
lr = args.LearnRate * 0.5 * (1. + math.cos(
math.pi * (epoch + 1 - args.LearnRateWarm) / (args.train_epoch - args.LearnRateWarm)))
else:
raise NotImplementedError('args.LearnRateDecayType')
if lr != args.LearnRate:
def adjust_learning_rate(optimizer):
print('adjust_learning_rate: {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
adjust_learning_rate(optimizer_g)
timer_all_time = time.time()
# inf_t = time.time()
# print('start epoch {}'.format(epoch))
self.eval()
feature_vec, type_vec, group_vec = [], [], []
feature_vec_cluster = []
group_vec_cluster = []
feature_vec_classification = []
type_vec_cluster = []
data_vec = []
is_pair_all = []
timer_infer_data = TimeOperator.Timer()
rnmse_vec = [[], []] # mask = 0 1
with torch.no_grad():
inf_data_t = time.time()
for (fea0, fea1, class_labels0, class_labels1, mask, is_pair, index) in test_dataloader:
timer_infer_data.update(time.time() - inf_data_t)
# timer_infer_data.show(prefix='InferDataTime', total_count=len(test_dataloader),
# print_end_time=False)
fea0 = fea0.cuda()
fea1 = fea1.cuda()
if args.Rev:
h1, h0 = self.encode([fea0, fea1])
if args.SingleView != -1:
for v in range(len(mask[0])):
if v != 1 - args.SingleView:
mask[:, v] = 0
else:
h0, h1 = self.encode([fea0, fea1])
if args.SingleView != -1:
for v in range(len(mask[0])):
if v != args.SingleView:
mask[:, v] = 0
cluster_h0 = h0[mask[:, 0] == 1]
cluster_h1 = h1[mask[:, 1] == 1]
# if args.SingleView != -1:
# mask[:, args.SingleView] = 0
# # if args.SingleView == 0:
# # cluster_h1 = cluster_h1[[]]
# # class_labels1 = class_labels1[[]]
# # elif args.SingleView == 1:
# # class_labels0 = class_labels0[[]]
# # cluster_h0 = cluster_h0[[]]
# # else:
# # raise NotImplementedError('')
is_pair_all.extend(is_pair)
feature_vec_cluster.extend(torch.cat([cluster_h0, cluster_h1]).detach().cpu().numpy())
group_vec_cluster.extend(torch.concat((torch.zeros(len(cluster_h0), dtype=torch.int),
torch.ones(len(cluster_h1), dtype=torch.int))).numpy())
type_vec_cluster.extend(torch.concat((class_labels0[mask[:, 0] == 1],
class_labels1[mask[:, 1] == 1])).numpy())
feature_vec_classification.extend(torch.cat([h0, h1]).detach().cpu().numpy())
if (epoch + 1) == epochs or (epoch + 1) % args.VisualFreq == 0:
if torch.sum(torch.logical_not(torch.logical_or(mask[:, 1], mask[:, 0]))):
raise NotImplementedError('存在一个pair两个模态都缺失')
if args.reFill == 'Copy':
if torch.sum(mask[:, 0] == 0):
h0[mask[:, 0] == 0] = h1[mask[:, 0] == 0]
if torch.sum(mask[:, 1] == 0):
h1[mask[:, 1] == 0] = h0[mask[:, 1] == 0]
elif args.reFill == 'Center':
# raise NotImplementedError('')
if self.pred_center_cac is None:
pass
warnings.warn('self.pred_center_cac == None')
else:
centors = torch.zeros((len(mask), 2, len(self.pred_center_cac[0]))).cuda()
centors[mask[:, 0] == 1, 0] = self.pred_center_cac[
self.pred_cac[:torch.sum(mask[:, 0] == 1)]]
centors[mask[:, 1] == 1, 1] = self.pred_center_cac[
self.pred_cac[torch.sum(mask[:, 0] == 1):]]
if torch.sum(mask[:, 0] == 0):
h0[mask[:, 0] == 0] = centors[mask[:, 0] == 0, 1]
if torch.sum(mask[:, 1] == 0):
h1[mask[:, 1] == 0] = centors[mask[:, 1] == 0, 0]
elif args.reFill == 'KnnMapMean':
if torch.sum(mask[:, 0] == 0):
nearest = get_nearest_k(h1[mask[:, 0] == 0], h1[is_pair], args.reAlignK)
h0p = h0[is_pair]
h1[mask[:, 0] == 0] = torch.cat([torch.mean(h0p[ns], dim=0) for ns in nearest])
if torch.sum(mask[:, 1] == 0):
nearest = get_nearest_k(h0[mask[:, 1] == 0], h0[is_pair], args.reAlignK)
h1p = h1[is_pair]
h1[mask[:, 1] == 0] = torch.cat([torch.mean(h1p[ns], dim=0) for ns in nearest])
# raise NotImplementedError('')
elif args.reFill == 'KnnMean':
# 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean
if torch.sum(mask[:, 1] == 0):
hs0 = h0[mask[:, 1] == 0]
he1 = h1[mask[:, 1] == 1]
nearest = get_nearest_k(hs0, he1, args.reAlignK)
# nearest = torch.argsort(torch.cdist(hs0.cpu(), he1.cpu()), dim=1)[:, :args.reAlignK]
h1[mask[:, 1] == 0] = torch.cat([torch.mean(he1[ns], dim=0) for ns in nearest])
# class_labels1[mask[:, 1] == 0] = class_labels1[mask[:, 1] == 1][nearest[:, 0]]
if torch.sum(mask[:, 0] == 0):
hs1 = h1[mask[:, 0] == 0]
he0 = h0[mask[:, 0] == 1]
nearest = get_nearest_k(hs1, he0, args.reAlignK)
# nearest = torch.argsort(torch.cdist(hs1.cpu(), he0.cpu()), dim=1)[:, :args.reAlignK]
h0[mask[:, 0] == 0] = torch.cat([torch.mean(he0[ns], dim=0) for ns in nearest])
# class_labels0[mask[:, 0] == 0] = class_labels0[mask[:, 0] == 1][nearest[:, 0]]
###############################################################
# 缺失补全, xi2 = mean(离xi1最近的k个view2的点)
# fill_num = k
# C = euclidean_dist(h0, h1)
# row_idx = C.argsort()
# col_idx = (C.t()).argsort()
# # Mij denotes the flag of i-th sample in view 0 and j-th sample in view 1
# M = torch.logical_and((mask[:, 0].repeat(test_num, 1)).t(), mask[:, 1].repeat(test_num, 1))
# for i in range(test_num):
# idx0 = col_idx[i, :][
# M[col_idx[i, :], i]] # idx for view 0 to sort and find the non-missing neighbors
# idx1 = row_idx[i, :][
# M[i, row_idx[i, :]]] # idx for view 1 to sort and find the non-missing neighbors
# if len(idx1) != 0 and len(idx0) == 0: # i-th sample in view 1 is missing
# avg_fill = h1[idx1[0:fill_num], :].sum(dim=0) / fill_num
# cnt += (class_labels1[idx1[0:fill_num]] == class_labels1[i]).sum()
# missing_cnt += 1
# recover_out0[i, :] = h0[i, :]
# recover_out1[i, :] = avg_fill # missing
# elif len(idx0) != 0 and len(idx1) == 0:
# avg_fill = h0[idx0[0:fill_num], :].sum(dim=0) / fill_num
# cnt += (class_labels0[idx0[0:fill_num]] == class_labels0[i]).sum()
# missing_cnt += 1
# recover_out0[i, :] = avg_fill # missing
# recover_out1[i, :] = h1[i, :]
# elif len(idx0) != 0 and len(idx1) != 0:
# recover_out0[i, :] = h0[i, :]
# recover_out1[i, :] = h1[i, :]
# else:
# raise Exception('error')
# if setting == 1:
# align_out0.extend((recover_out0.cpu()).numpy())
# align_out1.extend((recover_out1.cpu()).numpy())
# continue
#
else:
raise NotImplementedError('')
to_realign = torch.logical_and(is_pair == 0, torch.logical_and(mask[:, 1], mask[:, 0]))
if args.reAlign == 'KnnMean':
# 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean
if torch.sum(to_realign):
ha1 = h1[to_realign]
nearest = get_nearest_k(h0[to_realign], ha1, args.reAlignK)
# dist = torch.cdist(h0[to_realign].cpu(), ha1.cpu())
# nearest = torch.argsort(dist, dim=1)[:, :args.reAlignK]
h1[to_realign] = torch.cat([torch.mean(ha1[ns], dim=0) for ns in nearest])
# class_labels1[is_pair == 0] = class_labels1[is_pair == 0][nearest[:, 0]]
elif args.reAlign == 'Copy':
if torch.sum(to_realign):
h1[to_realign] = h0[to_realign]
# class_labels1[is_pair == 0] = class_labels0[is_pair == 0]
elif args.reAlign == 'KnnMapMean':
if torch.sum(to_realign):
targ_v1 = h1[is_pair]
nearest = get_nearest_k(h0[to_realign], h0[is_pair], args.reAlignK)
h1[to_realign] = torch.cat([torch.mean(targ_v1[ns], dim=0) for ns in nearest])
# class_labels1[is_pair == 0] = ...
elif args.reAlign == 'Ignore':
pass
else:
raise NotImplementedError('')
if args.Rev:
fea0_rec, fea1_rec = self.decode([h1, h0])
else:
fea0_rec, fea1_rec = self.decode([h0, h1])
# if len(fea0_rec[0]) == len(fea1_rec[0]):
# fea_rec = torch.concat([fea0_rec, fea1_rec])
# fea = torch.concat([fea0, fea1])
# mask_c = torch.concat([mask[:, 0], mask[:, 1]])
# if torch.sum(mask_c == 0):
# rnmse_vec[0].extend(
# evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 0], xs=fea[mask_c == 0]).cpu().numpy())
# if torch.sum(mask_c == 1):
# rnmse_vec[1].extend(
# evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 1], xs=fea[mask_c == 1]).cpu().numpy())
# else:
# if torch.sum(mask == 0):
# n0_v0 = evaluate.get_rnmse(
# xs_hat=fea0_rec[mask[:, 0] == 0], xs=fea0[mask[:, 0] == 0]).cpu().numpy()
# n0_v1 = evaluate.get_rnmse(
# xs_hat=fea1_rec[mask[:, 1] == 0], xs=fea1[mask[:, 1] == 0]).cpu().numpy()
# rnmse_vec[0].extend(n0_v0)
# rnmse_vec[0].extend(n0_v1)
# if torch.sum(mask == 1):
# n1_v0 = evaluate.get_rnmse(
# xs_hat=fea0_rec[mask[:, 0] == 1], xs=fea0[mask[:, 0] == 1]).cpu().numpy()
# n1_v1 = evaluate.get_rnmse(
# xs_hat=fea1_rec[mask[:, 1] == 1], xs=fea1[mask[:, 1] == 1]).cpu().numpy()
# rnmse_vec[1].extend(n1_v0)
# rnmse_vec[1].extend(n1_v1)
g = torch.concat((torch.zeros(len(fea0), device=fea0.device, dtype=torch.int),
torch.ones(len(fea1), device=fea0.device, dtype=torch.int)))
h = torch.cat([h0, h1]).detach().cpu().numpy()
feature_vec.extend(h)
data_vec.extend(torch.cat([fea0, fea1]).detach().cpu().numpy())
group_vec.extend(g.cpu().numpy())
type_vec.extend(torch.concat((class_labels0, class_labels1)).numpy())
inf_data_t = time.time()
feature_vec = np.array(feature_vec)
data_vec = np.array(data_vec)
feature_vec_cluster = np.array(feature_vec_cluster)
is_pair_all = np.array(is_pair_all)
feature_vec_classification = np.array(feature_vec_classification)
group_vec = np.array(group_vec)
group_vec_cluster = np.array(group_vec_cluster)
type_vec = np.array(type_vec)
type_vec_cluster = np.array(type_vec_cluster)
rnmse_vec[0] = np.array(rnmse_vec[0])
rnmse_vec[1] = np.array(rnmse_vec[1])
kmeans_time = TimeOperator.Timer()
if args.ShowReconstruct:
if args.dataset == 'MNISTUSPS':
dims = [np.product(d.data.shape[1:]) for d in test_dataloader.dataset.datasets]
data_list = [np.asarray(it.data, dtype=np.float32) for it in test_dataloader.dataset.datasets]
Y = test_dataloader.dataset.datasets[0].targets
else:
dims = [d.shape[1] for d in test_dataloader.dataset.data]
data_list = [np.asarray(it, dtype=np.float32) for it in test_dataloader.dataset.data]
Y = test_dataloader.dataset.class_labels0
mask = test_dataloader.dataset.mask
n_per_cat = 10
rec0, rec1 = self.decode([
torch.from_numpy(feature_vec[group_vec == 0]).cuda(),
torch.from_numpy(feature_vec[group_vec == 1]).cuda()])
rec0 = rec0.detach().cpu().numpy()
rec1 = rec1.detach().cpu().numpy()
show_img = np.asarray([])
inds_map = np.asarray([])
for v in range(2):
col = np.asarray([])
inds_map_col = np.asarray([])
for y in range(10):
inds = np.arange(len(Y))[
np.logical_and(np.logical_and(mask[:, v] == 1, mask[:, 1 - v] == 0), Y == y)
]
np.random.shuffle(inds)
assert len(inds) >= n_per_cat
inds = inds[:n_per_cat]
raw_imgs = data_list[v][inds]
missing_imgs = data_list[1 - v][inds]
rec_imgs = [rec0, rec1][v][inds]
rec_imgs_miss = [rec0, rec1][1 - v][inds]
pack = np.asarray(
[raw_imgs, rec_imgs, missing_imgs, rec_imgs_miss]).reshape([-1, n_per_cat, 28, 28])
if len(col):
col = np.concatenate([col, pack], axis=0)
else:
col = pack
if len(inds_map_col):
inds_map_col = np.concatenate([inds_map_col, inds.reshape([1, -1])], axis=0)
else:
inds_map_col = inds.reshape([1, -1])
if len(show_img):
show_img = np.concatenate([show_img, col], axis=1)
else:
show_img = col
if len(inds_map):
inds_map = np.concatenate([inds_map, inds_map_col], axis=1)
else:
inds_map = inds_map_col
plot_heat_map(inds_map, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecIM.svg')
visualize_image(show_img, show=True, fig_path='/xlearning/pengxin/Temp/MissingRec.svg')
selected_ind = [
[8, 2, 8, 9, 7, 2, 5, 9, 9, 9],
[0, 2, 2, 3, 5, 7, 7, 9, 7, 0],
]
# ToMouxin
inds_to_mouxin = [
[im[si] for im, si in zip(inds_map[:, :n_per_cat], selected_ind[0])],
[im[si] for im, si in zip(inds_map[:, n_per_cat:], selected_ind[1])],
]
re_dt = np.load(
'/xlearning/pengxin/Checkpoints/MultiClustering/RunSets/230105/IMvC_RunSet0114_Ablation_FakeSampleWise/ --QuickConfig X50C50 --dataset MNISTUSPS --loss_sim_contras 0.02 --seed 1998/SampleCache/Np.npz')
np.savez('/xlearning/pengxin/Temp/MNISTUSPS_show.npz',
feature_vec=np.asarray([
re_dt['d0_data'][inds_to_mouxin[0]],
re_dt['d1_data'][inds_to_mouxin[1]]
]))
selected_ind_global = np.concatenate(
(np.asarray(selected_ind[0]).reshape([-1, 1]),
np.asarray(selected_ind[1]).reshape([-1, 1]) + n_per_cat),
axis=1
)
show_img_final = np.concatenate(
[show_img[4 * i:4 * i + 4, selected_ind_global[i]] for i in range(len(selected_ind_global))],
axis=1
)[:, [i * 2 for i in range(10)] + [i * 2 + 1 for i in range(10)]]
visualize_image(show_img_final, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecFinal.svg')
return
def cluster_and_measure(features, types, groups, row_pred=False):
kst = time.time()
centroids = torch.from_numpy(kmeans(features, self.class_num))
if args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']:
centroids = F.normalize(centroids, dim=1)
pred_vec = np.argmax(self.soft_ass(torch.from_numpy(features), centroids).numpy(), axis=1)
pred_adjusted, met = evaluate2(features, pred_vec, types, groups)
kmeans_time.update(time.time() - kst)
kmeans_time.show(prefix='kmeans_time')
if row_pred:
return pred_vec, pred_adjusted, met, centroids.cuda()
else:
return pred_adjusted, met, centroids.cuda()
if not (args.CodeTest and not args.EvalOriMean and not args.EvalOriScoreMean and not args.EvalOriPredMean):
print('EvalSigel-1')
pred_vec, pred_adjusted, met, centroids = cluster_and_measure(
features=feature_vec_cluster, types=type_vec_cluster, groups=group_vec_cluster, row_pred=True)
self.pred_cac = pred_vec
self.pred_center_cac = centroids
else:
met = {}
pred_adjusted = None
centroids = None
if args.ShowClustering:
# sub_sample = args.DrawMax
# if len(feature_vec) > sub_sample * 2:
# ind = np.arange(int(len(feature_vec) // 2))
# np.random.shuffle(ind)
# ind = ind[:sub_sample]
# ind = np.concatenate((ind, ind + int(len(feature_vec) // 2)))
# feature_vec = feature_vec[ind]
# group_vec = group_vec[ind]
# type_vec = type_vec[ind]
# visualize2(feature_vec=feature_vec, type_vec=type_vec, group_vec=group_vec,
# pred_vec=None,
# prefix=os.path.join('../', 'Visualization/E{:03d}N{:04d}'.format(epoch, len(type_vec))))
# visual_image_scatter(
# data_vec,
# feature_vec,
# group_vec,
# type_vec,
# )
raw_dataset = torchvision.datasets.ImageFolder(
'D:/VirtualMachine/Data/caltech-101/101_ObjectCategories',
transform=torchvision.transforms.Resize([256, 256])
)
# mat = sio.loadmat('D:/VirtualMachine/Data/Caltech101-all.mat')
# data = mat['X'][0][3:5]
# label = np.squeeze(mat['Y'])-1
raw_data_ind = np.ones(len(data_vec), dtype=int) * -1
class_num = len(np.unique(type_vec))
class_num_s = len(np.unique(raw_dataset.targets))
raw_dataset.targets = np.asarray(raw_dataset.targets) - 1
for t in range(class_num_s):
print('{: 4d} {: 4d} {: 4d} {: 4d}'.format(
t,
np.sum(t == type_vec),
np.sum(t == raw_dataset.targets),
np.sum(t == 0),
))
for t in np.unique(type_vec):
bank_inds = np.arange(len(raw_dataset.targets))[raw_dataset.targets == t]
raw_data_ind[t == type_vec] = np.concatenate([bank_inds, bank_inds])
# raw_data = raw_dataset[np.asarray(raw_data_ind, dtype=int)]
raw_data = np.asarray([np.asarray(raw_dataset[it][0]) for it in raw_data_ind])
np.savez(
os.path.join(args.resume.replace('.checkpoint', 'Raw2.npz')),
data_vec=raw_data,
feature_vec=feature_vec,
group_vec=group_vec,
type_vec=type_vec,
pred_adjusted=pred_adjusted,
)
return
if (epoch + 1) == epochs or (epoch + 1) % args.VisualFreq == 0:
met_mul2 = {}
if args.EvalMulti:
print('EvalMulti')
multi_modality_feature = np.concatenate(
[feature_vec[group_vec == view] for view in np.unique(group_vec)],
axis=1)
_, met_mul, _ = cluster_and_measure(
features=multi_modality_feature, types=type_vec[group_vec == 0],
groups=group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['Multi-' + nv] = v
if args.EvalMean:
print('EvalMean')
_, met_mul, _ = cluster_and_measure(
features=np.mean(
np.asarray([feature_vec[group_vec == view] for view in np.unique(group_vec)]),
axis=0
), types=type_vec[group_vec == 0], groups=group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['Mean-' + nv] = v
if args.EvalSingel0:
print('EvalSingel0')
_, met_mul, _ = cluster_and_measure(features=feature_vec_cluster[group_vec_cluster == 0],
types=type_vec_cluster[group_vec_cluster == 0],
groups=group_vec_cluster[group_vec_cluster == 0])
for nv, v in met_mul.items():
met_mul2['Singel0-' + nv] = v
if args.EvalSingel1:
print('EvalSingel1')
_, met_mul, _ = cluster_and_measure(features=feature_vec_cluster[group_vec_cluster == 1],
types=type_vec_cluster[group_vec_cluster == 1],
groups=group_vec_cluster[group_vec_cluster == 1] - 1)
for nv, v in met_mul.items():
met_mul2['Singel1-' + nv] = v
if args.EvalOriMean:
print('EvalOriMean')
mean_fea = np.mean(
np.asarray([feature_vec[group_vec == view] for view in np.unique(group_vec)]),
axis=0
)
score = self.soft_ass(mean_fea, centroids.cpu().numpy())
pred_vec = np.argmax(score, axis=1)
_, met_mul = evaluate2(None, pred_vec, type_vec[group_vec == 0], group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['OriMean-' + nv] = v
if args.EvalOriScoreMean:
print('EvalOriScoreMean')
score = self.soft_ass(torch.from_numpy(feature_vec).cuda(), centroids).cpu().numpy()
pred_vec = np.argmax(np.mean(
np.asarray([score[group_vec == view] for view in np.unique(group_vec)]),
axis=0
), axis=1)
_, met_mul = evaluate2(None, pred_vec, type_vec[group_vec == 0], group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['OriScoreMean-' + nv] = v
if args.EvalOriPredMean:
print('EvalOriPredMean')
pred = torch.softmax(self.soft_ass(torch.from_numpy(feature_vec).cuda(), centroids) / 0.2,
dim=1).cpu().numpy()
pred_vec = np.argmax(np.mean(
np.asarray([pred[group_vec == view] for view in np.unique(group_vec)]),
axis=0
), axis=1)
_, met_mul = evaluate2(None, pred_vec, type_vec[group_vec == 0], group_vec[group_vec == 0])
for nv, v in met_mul.items():
met_mul2['EvalOriPredMean-' + nv] = v
if args.EvalCla:
mv_f = np.asarray([feature_vec[group_vec == view] for view in np.unique(group_vec)])
mv_gt = np.asarray([feature_vec_classification[group_vec == view] for view in np.unique(group_vec)])
for test_prop in [0.2, 0.5, 0.8]:
met_mul2['ClassificationACC{:.01f}'.format(test_prop)] = np.mean(
| [svm_classify(
| 11 | 2023-12-21 08:50:36+00:00 | 12k |
botcs/wolfson-scheduler | tests/test_solver.py | [
{
"identifier": "unravel_indices",
"path": "solver.py",
"snippet": "def unravel_indices(indices, shape):\n coord = []\n\n for dim in reversed(shape):\n coord.append(indices % dim)\n indices = indices // dim\n\n coord = torch.stack(coord[::-1], dim=-1)\n\n return coord"
},
{
"identifier": "generalized_outer_addition",
"path": "solver.py",
"snippet": "def generalized_outer_addition(vectors, output=None):\n \"\"\"\n Corrected function to compute the outer addition of N K-dimensional vectors using broadcasting.\n This function is equivalent to the following code:\n ```\n result = torch.zeros((K1, K2, ..., KN))\n for idx1 in range(K1):\n for idx2 in range(K2):\n ...\n result[idx1, idx2, ..., idxn] = vectors[idx1] + vectors[idx2] + ... + vectors[idxn]\n ```\n However, it is much faster because it uses pre-computed sums and sums of squares.\n\n :param vectors: List of N vectors of shape (K1, K2, ..., KN)\n :param output: Optional output tensor\n if provided, must be of shape (K1, K2, ..., KN)\n :return: Tensor of shape (K1, K2, ..., KN)\n \"\"\"\n\n # Assert all vectors are on the same device\n device = vectors[0].device\n assert all(\n v.device == device for v in vectors\n ), \"All vectors must be on the same device\"\n\n # Number of vectors (N) and dimensions (K)\n # N, K = vectors.shape\n N = len(vectors)\n Ks = [len(v) for v in vectors]\n if output is None:\n output = torch.zeros(Ks, dtype=vectors[0].dtype, device=vectors[0].device)\n else:\n assert output.shape == tuple(Ks), \"Output tensor has incorrect shape\"\n output.zero_()\n\n # Reshape each vector to have a unique non-singleton dimension\n for i in range(N):\n expanded_shape = [1] * N\n expanded_shape[i] = Ks[i]\n reshaped_vector = vectors[i].view(*expanded_shape)\n output += reshaped_vector\n\n return output"
},
{
"identifier": "compute_variances",
"path": "solver.py",
"snippet": "def compute_variances(X, Y):\n \"\"\"\n Compute variances between all combinations of vectors in X and Y.\n This function is equivalent to the following code:\n ```\n variances = torch.zeros((X.size(0), Y.size(0)))\n for i in range(X.size(0)):\n for j in range(Y.size(0)):\n concatenated = torch.cat((X[i], Y[j]))\n variances[i, j] = torch.var(concatenated, unbiased=False)\n ```\n However, it is much faster because it uses pre-computed sums and sums of squares.\n\n\n :param X: Tensor of shape (N, K)\n :param Y: Tensor of shape (M, L)\n \"\"\"\n\n # Compute sums and sums of squares for X\n sum_X = torch.sum(X, dim=1)\n sum_sq_X = torch.sum(X**2, dim=1)\n\n # Compute sums and sums of squares for Y\n sum_Y = torch.sum(Y, dim=1)\n sum_sq_Y = torch.sum(Y**2, dim=1)\n\n # Lengths of vectors in X and Y\n len_X = X.shape[1]\n len_Y = Y.shape[1]\n\n # Broadcasting sums and sum of squares for all combinations\n total_sum = sum_X.unsqueeze(1) + sum_Y.unsqueeze(0)\n total_sum_sq = sum_sq_X.unsqueeze(1) + sum_sq_Y.unsqueeze(0)\n total_len = len_X + len_Y\n\n # Compute variances\n mean = total_sum / total_len\n variances = total_sum_sq / total_len - mean**2\n\n return variances"
},
{
"identifier": "get_max_numel",
"path": "solver.py",
"snippet": "def get_max_numel(dtype, memory_capacity=None, device=\"cpu\"):\n \"\"\"\n Compute the maximum number of elements that fit in specified memory.\n\n :param dtype: Data type of the tensor (e.g., torch.float32)\n :param memory_capacity: Memory capacity in bytes\n :param device: 'cpu' or 'cuda'\n :return: maximum number of elements that fit\n \"\"\"\n\n # Get memory capacity\n if memory_capacity is None:\n memory_capacity = get_free_memory(device)\n\n # Calculate maximum number of elements that fit\n element_size = torch.tensor(\n [], dtype=dtype\n ).element_size() # Size in bytes of one element\n max_numel = memory_capacity // element_size\n\n return max_numel"
},
{
"identifier": "check_matrix_fit_and_num_chunks",
"path": "solver.py",
"snippet": "def check_matrix_fit_and_num_chunks(\n dimensions, dtype, memory_capacity=None, device=\"cpu\"\n):\n \"\"\"\n Check if a tensor of given dimensions and data type fits in specified memory.\n If not, return chunk sizes that maximize the capacity, slicing only along the first dimension.\n\n :param dimensions: Tuple of dimensions for the tensor\n :param dtype: Data type of the tensor (e.g., torch.float32)\n :param memory_capacity: Memory capacity in bytes\n :param device: 'cpu' or 'cuda'\n :return: number of chunks along the first dimension\n \"\"\"\n\n # Get memory capacity\n if memory_capacity is None:\n memory_capacity = get_memory_capacity(device)\n\n # Calculate total number of elements\n total_elements = 1\n for dim in dimensions:\n total_elements *= dim\n\n element_size = torch.tensor(\n [], dtype=dtype\n ).element_size() # Size in bytes of one element\n total_size = total_elements * element_size # Total memory required for the tensor\n\n if total_size <= memory_capacity:\n return 1\n\n # If doesn't fit, calculate chunk size for the first dimension\n other_dims_product = 1\n for dim in dimensions[1:]:\n other_dims_product *= dim\n\n max_first_dim_size = memory_capacity // (other_dims_product * element_size)\n if max_first_dim_size == 0:\n raise ValueError(\"Tensor does not fit in memory.\")\n\n num_chunks = math.ceil(dimensions[0] / max_first_dim_size)\n\n return num_chunks"
},
{
"identifier": "convert_property_to_categorical",
"path": "solver.py",
"snippet": "def convert_property_to_categorical(property):\n \"\"\"\n Convert the properties to a categorical variable.\n\n :param property: List of properties for each rower.\n Shape: (num_rowers)\n dtype: Any\n\n :return: Tensor of categorical properties.\n Shape: (num_rowers)\n dtype: torch.long\n \"\"\"\n\n unique_properties = set()\n for p in property:\n unique_properties.add(p)\n unique_properties = sorted(list(unique_properties))\n property = [unique_properties.index(p) for p in property]\n property = torch.tensor(property)\n return property"
},
{
"identifier": "extract_best_assignment",
"path": "solver.py",
"snippet": "def extract_best_assignment(assignments_per_week, total_score):\n \"\"\"\n Extract the best assignment for each outing.\n\n :param assignments_per_week: Tensor of assignments per week.\n shape: (num_outings, num_combinations, num_rowers)\n :param total_score: Tensor of total score for each assignment.\n shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings\n\n :return: Tensor of best assignment per outing.\n shape: (num_outings, 1, num_rowers)\n\n \"\"\"\n\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n\n # Find the top assignments\n # top_inds = torch.argsort(total_score.flatten(), descending=True)[0]\n top_idx = torch.argmax(total_score.flatten())\n\n top_idx = unravel_indices(top_idx, total_score.shape)\n\n # top_inds tells us for each outing the index of the top assignment\n top_assignment = torch.zeros(\n num_outings,\n 1,\n num_rowers,\n dtype=torch.uint8,\n device=assignments_per_week.device,\n )\n for outing_idx, comb_idx in enumerate(top_idx):\n top_assignment[outing_idx] = assignments_per_week[outing_idx, comb_idx]\n\n return top_assignment"
},
{
"identifier": "get_no_overlap_inds",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef get_no_overlap_inds(A, B):\n \"\"\"\n Perform matrix multiplication of A and B in chunks.\n Return the indices of rows in A and columns in B that have no overlap.\n Overlap is defined as a non-zero value in the product of A and B.\n\n :param A: First matrix\n shape: (num_combinations_A, num_rowers)\n :param B: Second matrix\n shape: (num_combinations_B, num_rowers)\n :param chunk_sizes: Chunk sizes for the first dimension of A\n :return: indices of rows in A and columns in B that have no overlap\n \"\"\"\n\n # check if the product of the two matrices fits in memory\n # if not, chunk the matrices and check for overlap in chunks\n num_chunks = check_matrix_fit_and_num_chunks(\n (A.shape[0], A.shape[1], B.shape[0]), dtype=A.dtype, device=A.device\n )\n\n # num_chunks = 1\n def multiply_and_find(A_chunk, B):\n # counts the number of double-assignments for each rower between the two boats\n assignment_count = torch.matmul(A_chunk, B.T)\n no_overlap_inds = torch.nonzero(assignment_count == 0)\n return no_overlap_inds\n\n # if the product fits in memory, check for overlap in one go\n if num_chunks == 1:\n return multiply_and_find(A, B)\n\n A_chunks = torch.chunk(A, num_chunks)\n\n # otherwise, chunk the matrices and check for overlap in chunks\n no_overlap_inds = []\n offset_idx = 0\n for A_chunk in tqdm.tqdm(A_chunks):\n # no_overlap_inds.append(multiply_and_find(A_chunk, B).tolist())\n chunk_inds = multiply_and_find(A_chunk, B)\n\n # add the chunk size to offset the indices\n chunk_inds[:, 0] += offset_idx\n offset_idx += A_chunk.shape[0]\n no_overlap_inds.append(chunk_inds)\n\n return torch.cat(no_overlap_inds)"
},
{
"identifier": "generate_binary_matrices",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef generate_binary_matrices(\n num_rowers,\n boat_sizes,\n device=\"cpu\",\n max_num_combinations=NUM_MAX_COMBINATION_PER_BOAT,\n):\n \"\"\"\n Generate binary matrices for each combination of rowers in boats.\n\n :param num_rowers: Total number of rowers\n :param boat_sizes: List of boat sizes\n \"\"\"\n per_boat_binary_matrices = []\n for boat_size in boat_sizes:\n # Precompute indices for combinations\n row_indices = []\n col_indices = []\n\n num_combinations = math.comb(num_rowers, boat_size)\n if num_combinations > max_num_combinations:\n M = torch.zeros((max_num_combinations, num_rowers), dtype=torch.bool)\n\n keep_indices = sample(\n torch.arange(num_combinations), k=max_num_combinations\n )\n keep_indices = keep_indices.sort().values\n i = 0\n for row, combination in enumerate(\n itertools.combinations(range(num_rowers), boat_size)\n ):\n if keep_indices[i] != row:\n continue\n for col in combination:\n row_indices.append(i)\n col_indices.append(col)\n i += 1\n if i == max_num_combinations:\n break\n\n else:\n M = torch.zeros((num_combinations, num_rowers), dtype=torch.bool)\n for row, combination in enumerate(\n itertools.combinations(range(num_rowers), boat_size)\n ):\n for col in combination:\n row_indices.append(row)\n col_indices.append(col)\n\n # Use advanced indexing to fill the matrix\n M[row_indices, col_indices] = 1\n per_boat_binary_matrices.append(M)\n return per_boat_binary_matrices"
},
{
"identifier": "eliminate_invalid_boats",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef eliminate_invalid_boats(\n binary_matrix, rower_sides, num_max_combinations=NUM_MAX_COMBINATION_PER_BOAT\n):\n \"\"\"\n Eliminate invalid boats from a binary matrix.\n\n Currently we consider a boat invalid if there are more rowers on one side than the other.\n We represent stroke as 1 and bow as -1 and 0 for no preference.\n\n :param binary_matrix: Binary matrix of rower combinations\n shape: (num_combinations, num_rowers)\n :return: Binary matrix with invalid boats eliminated\n \"\"\"\n\n # gather the rower sides for each rower in each boat for each combination\n num_assigned_rowers = binary_matrix[0].sum()\n # assert each row has the same number of assigned rowers\n assert (binary_matrix.sum(dim=1) == num_assigned_rowers).all()\n assert len(rower_sides) == binary_matrix.shape[1]\n idx = binary_matrix.nonzero()[:, 1].view(len(binary_matrix), num_assigned_rowers)\n outings = rower_sides[idx]\n\n # Compute the offset between the number of stroke and bow seats\n offset = torch.sum(outings, dim=1).abs()\n # Determine the number of rowers that are both stroke and bow seat\n count_where_both = torch.sum(outings == 0, dim=1)\n\n # Eliminate invalid boats\n is_valid = count_where_both >= offset\n binary_matrix = binary_matrix[is_valid]\n\n if len(binary_matrix) > num_max_combinations:\n binary_matrix = sample(binary_matrix, k=num_max_combinations)\n\n return binary_matrix"
},
{
"identifier": "generate_valid_assignments",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef generate_valid_assignments(\n single_boat_bin_matrices, num_max_combinations=NUM_MAX_PAIRWISE_COMBINATION\n):\n \"\"\"\n Generate valid combinations of rowers across multiple boats on a single outing\n\n :param matrices: List of binary matrices, each representing combinations for a boat.\n shape: List[\n Tensor(num_combinations_1, num_rowers),\n Tensor(num_combinations_2, num_rowers),\n ...\n Tensor(num_combinations_n, num_rowers),\n ]\n :return: Tensor of valid combinations across all boats.\n \"\"\"\n assert len(single_boat_bin_matrices) > 0, \"Must have at least one boat\"\n assert all(\n m.shape[1] == single_boat_bin_matrices[0].shape[1]\n for m in single_boat_bin_matrices\n ), \"All matrices must have the same number of rowers\"\n\n assignments = single_boat_bin_matrices[0]\n for boat_ind, boat_B in enumerate(single_boat_bin_matrices[1:], start=2):\n no_overlap_inds = get_no_overlap_inds(assignments, boat_B)\n\n if len(no_overlap_inds) > num_max_combinations:\n no_overlap_inds = sample(no_overlap_inds, k=num_max_combinations)\n\n A_inds, B_inds = no_overlap_inds.T\n\n # update boat_A to be the combination of boat_A and boat_B with no overlap\n assignments = assignments[A_inds] + boat_B[B_inds] * boat_ind\n return assignments"
},
{
"identifier": "evaluate_skill_variance",
"path": "solver.py",
"snippet": "def evaluate_skill_variance(assignments_per_week, skill_levels, dtype=torch.float16):\n \"\"\"\n This relies on the notion that the skill levels entered are not categorical\n but integer values (or can be mapped to ordered categories, e.g. M1 > M2 > M3 ... )\n\n :param assignments_per_week: Tensor of assignments per week.\n shape: (num_outings, num_combinations, num_rowers)\n\n :param skill_levels: Tensor of skill levels for each rower.\n shape: (num_rowers,)\n\n :return: Tensor of variance for each combination in each outing.\n shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings\n \"\"\"\n\n # assert that the number of assigned rowers is the same for each outing\n for outing_idx in range(len(assignments_per_week)):\n num_assigned_rowers = assignments_per_week[outing_idx][0].sum()\n assert (\n assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers\n ).all()\n\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n max_num_boats = assignments_per_week.max().item()\n outing_variance = torch.zeros(\n num_outings, num_combinations, device=assignments_per_week.device, dtype=dtype\n )\n for boat_idx in range(max_num_boats):\n boat_assignment = assignments_per_week == boat_idx + 1\n # we use binary masking\n X = skill_levels * boat_assignment\n\n # but we need to make sure that we don't include the rowers that are not assigned\n X_sum = X.sum(dim=2)\n X_len = boat_assignment.sum(dim=2)\n X_mean = X_sum / X_len\n\n boat_variance = ((X - X_mean.unsqueeze_(2)) * boat_assignment) ** 2\n boat_variance = boat_variance.sum(dim=2)\n\n # we use the unbiased variance since the sample size is small\n boat_variance /= torch.clamp(X_len - 1, min=1)\n\n outing_variance += boat_variance\n\n # now we need to compute the variance between the outings across the week\n week_variance = generalized_outer_addition(outing_variance)\n return week_variance"
},
{
"identifier": "evaluate_num_preferred_outings",
"path": "solver.py",
"snippet": "def evaluate_num_preferred_outings(\n assignments_per_week, num_preferred_outings, dtype=torch.long\n):\n # assert that the number of assigned rowers is the same for each outing\n for outing_idx in range(len(assignments_per_week)):\n num_assigned_rowers = assignments_per_week[outing_idx, 0].sum()\n assert (\n assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers\n ).all()\n\n assignments_per_week = assignments_per_week > 0\n\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n\n # just to pin memory and reuse the output tensor\n num_assignment_per_rower = torch.zeros(\n [num_combinations] * num_outings,\n device=assignments_per_week.device,\n dtype=dtype,\n )\n\n week_over_assignment = torch.zeros(\n [num_combinations] * num_outings,\n device=assignments_per_week.device,\n dtype=dtype,\n )\n\n for rower_idx in range(num_rowers):\n num_assignment_per_rower = generalized_outer_addition(\n assignments_per_week[:, :, rower_idx], output=num_assignment_per_rower\n )\n num_preferred_outings_per_rower = num_preferred_outings[rower_idx]\n assignment_diff = num_assignment_per_rower - num_preferred_outings_per_rower\n over_assignment = assignment_diff.clamp_(min=0)\n week_over_assignment += over_assignment\n\n return week_over_assignment"
},
{
"identifier": "evaluate_assignments_per_week",
"path": "solver.py",
"snippet": "def evaluate_assignments_per_week(\n assignments_per_week, properties, weights, return_stats=False\n):\n \"\"\"\n Evaluate the assignments per week.\n\n :param assignments_per_week: Tensor of num_outings different assignments for the week.\n Shape: (num_outings, num_combinations, num_rowers)\n dtype: torch.uint8\n :param properties: dict of Tensors of properties.\n Shape: {property_name: Tensor(num_rowers)}\n dtype: torch.long\n :param weights: dict of weights for each property.\n Shape: {property_name: float}\n :param return_stats: Whether to return the stats for each property.\n\n :return: Total score for the week.\n Shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings\n :return: Stats for each weight category.\n \"\"\"\n\n # Compute variance of skill levels\n week_variance = evaluate_skill_variance(\n assignments_per_week, properties[\"skill_level\"]\n )\n\n # Compute number of preferred outings\n week_num_preferred_outings = evaluate_num_preferred_outings(\n assignments_per_week, properties[\"num_preferred_outings\"]\n )\n\n # Compute total score\n total_score = (\n weights[\"skill variance\"] * week_variance\n + weights[\"over assignment\"] * week_num_preferred_outings\n )\n\n if return_stats:\n stats = {\n \"values\": {\n \"skill variance\": week_variance,\n \"over assignment\": week_num_preferred_outings,\n },\n \"weights\": weights,\n \"total\": total_score,\n }\n return total_score, stats\n\n return total_score"
},
{
"identifier": "permute_top_assignments",
"path": "solver.py",
"snippet": "def permute_top_assignments(\n valid_assignments,\n assignments_per_week,\n total_scores,\n num_permutations=10,\n randomize_permutations=True,\n):\n \"\"\"\n Permute the top assignments for the week.\n \"\"\"\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n\n assert len(valid_assignments) == num_outings, \"Must have the same number of outings\"\n assert (\n len(assignments_per_week) == num_outings\n ), \"Must have the same number of outings\"\n if any(m.ndim != 2 for m in valid_assignments):\n raise ValueError(\"All outing assignments have to be 2D for every outing\")\n if any(m.shape[1] != num_rowers for m in valid_assignments):\n raise ValueError(\n \"All outing assignments have to have the same number of rowers\"\n )\n if any((m.sum(dim=1) != m[0].sum()).any() for m in valid_assignments):\n raise ValueError(\n f\"In each combination of every outing,\\\n the number of rowers assigned must be the same.\"\n )\n\n # assert all(\n # m.ndim == 2\n # for m in valid_assignments\n # ), f\"All matrices must have the same number of dim: {[m.shape for m in valid_assignments]}\"\n # assert all(\n # m.shape[1] == num_rowers\n # for m in valid_assignments\n # ), \"All matrices must have the same number of rowers\"\n # for outing_idx in range(len(valid_assignments)):\n # assert (valid_assignments[outing_idx].sum() == valid_assignments[outing_idx][0].sum()).all(),\\\n # \"Combinations must have the same number of rowers assigned in an outing\"\n\n # assert that the number of assigned rowers is the same for each outing\n for outing_idx in range(len(assignments_per_week)):\n num_assigned_rowers = assignments_per_week[outing_idx, 0].sum()\n assert (\n assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers\n ).all()\n\n best_assignment = extract_best_assignment(assignments_per_week, total_scores)\n\n # in the permutations we fix all outings except the outing we are permuting\n permuted_assignment = best_assignment.repeat(1, num_permutations + 1, 1)\n for outing_idx in range(len(assignments_per_week)):\n # just copy the best assignment num_permutations times\n if randomize_permutations:\n # we need to make sure that the best assignment is included\n permuted_assignment[outing_idx, 1:] = sample(\n valid_assignments[outing_idx], k=num_permutations\n )\n else:\n permuted_assignment[outing_idx, 1:] = valid_assignments[outing_idx][\n :num_permutations\n ]\n return permuted_assignment"
}
] | import torch
import unittest
import math
from unittest.mock import patch
from solver import (
unravel_indices,
generalized_outer_addition,
compute_variances,
get_max_numel,
check_matrix_fit_and_num_chunks,
convert_property_to_categorical,
extract_best_assignment,
get_no_overlap_inds,
generate_binary_matrices,
eliminate_invalid_boats,
generate_valid_assignments,
evaluate_skill_variance,
evaluate_num_preferred_outings,
evaluate_assignments_per_week,
permute_top_assignments,
) | 8,855 | assignments_per_week = torch.randint(0, 2, (3, 4, 5), dtype=torch.uint8)
total_score = torch.rand(4, 4, 4) # Mock score tensor for 3 outings
# Expected output shape
expected_shape = (3, 1, 5)
result = extract_best_assignment(assignments_per_week, total_score)
self.assertEqual(result.shape, expected_shape)
def test_edge_case_single_outing(self):
assignments_per_week = torch.randint(0, 2, (1, 4, 5), dtype=torch.uint8)
total_score = torch.rand(4,)
expected_shape = (1, 1, 5)
result = extract_best_assignment(assignments_per_week, total_score)
self.assertEqual(result.shape, expected_shape)
def test_output_type(self):
assignments_per_week = torch.randint(0, 2, (3, 4, 5), dtype=torch.uint8)
total_score = torch.rand(4, 4, 4)
result = extract_best_assignment(assignments_per_week, total_score)
self.assertIsInstance(result, torch.Tensor)
self.assertTrue(result.dtype, torch.uint8)
def test_correctness_of_assignment_extraction(self):
# Mock data for 3 outings with 4 combinations each
assignments_per_week = torch.tensor([
[[0, 0], [0, 1], [1, 0], [1, 1]], # Outing 1
[[0, 0], [0, 1], [1, 0], [1, 1]], # Outing 2
[[0, 0], [0, 1], [1, 0], [1, 1]] # Outing 3
], dtype=torch.uint8)
# Mock total scores where the best scores are known
# Assuming the best scores are for the combinations [1, 0, 3] for outings [1, 2, 3]
total_score = torch.zeros((4, 4, 4))
total_score[1, 0, 3] = 1 # Highest score
# Expected best assignments for each outing
expected_assignments = torch.tensor([
[[0, 1]], # Outing 1
[[0, 0]], # Outing 2
[[1, 1]] # Outing 3
], dtype=torch.uint8) # Add dimension to match the expected output shape
result = extract_best_assignment(assignments_per_week, total_score)
self.assertTrue(torch.equal(result, expected_assignments))
class TestGetNoOverlapInds(unittest.TestCase):
def test_no_overlap(self):
A = torch.tensor([[1, 0], [0, 1]])
B = torch.tensor([[0, 1], [1, 0]])
expected_result = torch.tensor([[0, 0], [1, 1]])
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
def test_partial_overlap(self):
A = torch.tensor([[1, 1], [0, 1]])
B = torch.tensor([[1, 0], [0, 1]])
expected_result = torch.tensor([[1, 0]])
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
def test_complete_overlap(self):
A = torch.tensor([[1, 1], [1, 1]])
B = torch.tensor([[1, 1], [1, 1]])
expected_result = torch.empty((0, 2), dtype=torch.int64)
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
def test_different_sizes(self):
A = torch.tensor([[1, 1, 0, 0], [0, 1, 1, 0]])
B = torch.tensor([[1, 1, 0, 0], [0, 1, 1, 0], [1, 0, 0, 1]])
expected_result = torch.tensor([[1, 2]])
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
class TestGenerateBinaryMatrices(unittest.TestCase):
def test_correct_matrix_generation(self):
num_rowers = 4
boat_sizes = [2, 3]
expected_combinations = [math.comb(num_rowers, boat_size) for boat_size in boat_sizes]
result_matrices = generate_binary_matrices(num_rowers, boat_sizes)
for i, M in enumerate(result_matrices):
self.assertEqual(M.shape[0], expected_combinations[i]) # Correct number of combinations
self.assertEqual(M.shape[1], num_rowers) # Correct number of columns
self.assertTrue(torch.all((M.sum(axis=1) == boat_sizes[i]).logical_or(M.sum(axis=1) == 0))) # Correct boat sizes
def test_different_rower_and_boat_sizes(self):
num_rowers = 5
boat_sizes = [1, 4]
result_matrices = generate_binary_matrices(num_rowers, boat_sizes)
for M, boat_size in zip(result_matrices, boat_sizes):
self.assertEqual(M.shape, (math.comb(num_rowers, boat_size), num_rowers))
def test_output_type(self):
num_rowers = 3
boat_sizes = [2]
result_matrices = generate_binary_matrices(num_rowers, boat_sizes)
for M in result_matrices:
self.assertIsInstance(M, torch.Tensor)
self.assertTrue(M.dtype, torch.bool)
class TestEliminateInvalidBoats(unittest.TestCase):
def test_no_elimination_of_valid_boats(self):
binary_matrix = torch.tensor([[1, 0, 1], [1, 1, 0], [0, 1, 1]])
rower_sides = torch.tensor([1, -1, 0]) # Stroke, Bow, No preference
expected_result = torch.tensor([[1, 0, 1], [1, 1, 0], [0, 1, 1]]) # Eliminate [1, 1, 0] combination
|
class TestUnravelIndices(unittest.TestCase):
def test_simple_case(self):
indices = torch.tensor([0, 1, 2, 3, 4, 5])
shape = (2, 3)
expected_result = torch.tensor([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]])
result = unravel_indices(indices, shape)
self.assertTrue(torch.equal(result, expected_result))
def test_single_dimension(self):
indices = torch.tensor([0, 1, 2, 3])
shape = (4,)
expected_result = torch.tensor([[0], [1], [2], [3]])
result = unravel_indices(indices, shape)
self.assertTrue(torch.equal(result, expected_result))
def test_multi_dimension(self):
indices = torch.tensor([0, 1, 5, 11])
shape = (2, 3, 2)
expected_result = torch.tensor([[0, 0, 0], [0, 0, 1], [0, 2, 1], [1, 2, 1]])
result = unravel_indices(indices, shape)
self.assertTrue(torch.equal(result, expected_result))
def test_edge_cases(self):
indices = torch.tensor([0])
shape = (1, 1, 1)
expected_result = torch.tensor([[0, 0, 0]])
result = unravel_indices(indices, shape)
self.assertTrue(torch.equal(result, expected_result))
def test_output_type_and_shape(self):
indices = torch.tensor([3, 7])
shape = (2, 4)
result = unravel_indices(indices, shape)
self.assertIsInstance(result, torch.Tensor)
self.assertEqual(result.shape, (2, 2))
class TestGeneralizedOuterAddition(unittest.TestCase):
def test_correct_calculation(self):
vectors = [torch.tensor([1, 2]), torch.tensor([3, 4])]
expected_result = torch.tensor([[4, 5], [5, 6]])
result = generalized_outer_addition(vectors)
self.assertTrue(torch.equal(result, expected_result))
def test_different_vector_sizes(self):
vectors = [torch.tensor([1, 2]), torch.tensor([3, 4, 5])]
expected_result = torch.tensor([[4, 5, 6], [5, 6, 7]])
result = generalized_outer_addition(vectors)
self.assertTrue(torch.equal(result, expected_result))
def test_with_output_tensor(self):
vectors = [torch.tensor([1, 2]), torch.tensor([3, 4])]
output = torch.empty((2, 2))
expected_result = torch.tensor([[4, 5], [5, 6]])
result = generalized_outer_addition(vectors, output)
self.assertTrue(torch.equal(result, expected_result))
def test_error_with_incorrect_output_shape(self):
vectors = [torch.tensor([1, 2]), torch.tensor([3, 4])]
output = torch.empty((3, 3))
with self.assertRaises(AssertionError):
generalized_outer_addition(vectors, output)
def test_type_and_device_consistency(self):
vectors = [torch.tensor([1., 2.], device="cuda"), torch.tensor([3., 4.], device="cuda")]
result = generalized_outer_addition(vectors)
self.assertTrue(result.dtype == torch.float32)
self.assertTrue(result.device.type == "cuda")
class TestComputeVariances(unittest.TestCase):
def test_variances(self):
# Create sample data
torch.manual_seed(0) # For reproducibility
X = torch.rand(3, 7)
Y = torch.rand(4, 5)
# Expected variances computed by manual concatenation
expected_variances = torch.zeros((X.size(0), Y.size(0)))
for i in range(X.size(0)):
for j in range(Y.size(0)):
concatenated = torch.cat((X[i], Y[j]))
expected_variances[i, j] = torch.var(concatenated, unbiased=False)
# Variances computed by the function
actual_variances = compute_variances(X, Y)
# Assert equality (within a tolerance to account for floating-point errors)
self.assertTrue(torch.allclose(expected_variances, actual_variances, atol=1e-6))
class TestGetMaxNumel(unittest.TestCase):
@patch('solver.get_free_memory')
def test_with_different_dtypes(self, mock_get_free_memory):
mock_get_free_memory.return_value = 1024 # Mock 1024 bytes of free memory
dtypes = [torch.float32, torch.int32, torch.float64]
for dtype in dtypes:
element_size = torch.tensor([], dtype=dtype).element_size()
expected_result = 1024 // element_size
result = get_max_numel(dtype)
self.assertEqual(result, expected_result)
@patch('solver.get_free_memory')
def test_without_specified_memory_capacity(self, mock_get_free_memory):
mock_get_free_memory.return_value = 2048 # Mock 2048 bytes of free memory
dtype = torch.float32
element_size = torch.tensor([], dtype=dtype).element_size()
expected_result = 2048 // element_size
result = get_max_numel(dtype)
self.assertEqual(result, expected_result)
def test_with_specified_memory_capacity(self):
dtype = torch.float32
memory_capacity = 4096 # Specify 4096 bytes of memory
element_size = torch.tensor([], dtype=dtype).element_size()
expected_result = 4096 // element_size
result = get_max_numel(dtype, memory_capacity)
self.assertEqual(result, expected_result)
class TestCheckMatrixFitAndNumChunks(unittest.TestCase):
def test_tensor_fits_memory(self):
dimensions = (10, 10, 10)
dtype = torch.float32
memory_capacity = 40000 # Set a capacity that's more than enough
self.assertEqual(check_matrix_fit_and_num_chunks(dimensions, dtype, memory_capacity), 1)
def test_tensor_exceeds_memory(self):
dimensions = (100, 100, 100)
dtype = torch.float32
memory_capacity = 1000 # Set a capacity that's too small
self.assertRaises(ValueError, check_matrix_fit_and_num_chunks, dimensions, dtype, memory_capacity)
def test_different_data_types(self):
dimensions = (100, 100)
memory_capacity = 100000
for dtype in [torch.float32, torch.int32, torch.float64]:
self.assertIsInstance(check_matrix_fit_and_num_chunks(dimensions, dtype, memory_capacity), int)
def test_various_dimensions(self):
dtype = torch.float32
memory_capacity = 10000
test_dimensions = [
(100, 20, 5),
(50, 40, 30),
(200, 10, 10)
]
for dimensions in test_dimensions:
self.assertIsInstance(check_matrix_fit_and_num_chunks(dimensions, dtype, memory_capacity), int)
def test_without_specified_memory_capacity(self):
dimensions = (10, 10, 10)
dtype = torch.float32
self.assertIsInstance(check_matrix_fit_and_num_chunks(dimensions, dtype), int)
class TestConvertPropertyToCategorical(unittest.TestCase):
def test_correct_conversion(self):
property_list = ["red", "blue", "red"]
expected_result = torch.tensor([1, 0, 1])
result = convert_property_to_categorical(property_list)
self.assertTrue(torch.equal(result, expected_result))
def test_empty_input(self):
property_list = []
expected_result = torch.tensor([])
result = convert_property_to_categorical(property_list)
self.assertTrue(torch.equal(result, expected_result))
def test_mixed_values(self):
property_list = ["apple", "banana", "apple", "cherry"]
expected_result = torch.tensor([0, 1, 0, 2])
result = convert_property_to_categorical(property_list)
self.assertTrue(torch.equal(result, expected_result))
def test_consistency_in_indexing(self):
property_list = ["dog", "cat", "bird", "cat"]
expected_result = torch.tensor([2, 1, 0, 1])
result = convert_property_to_categorical(property_list)
self.assertTrue(torch.equal(result, expected_result))
def test_output_type_and_shape(self):
property_list = ["one", "two", "three"]
result = convert_property_to_categorical(property_list)
self.assertIsInstance(result, torch.Tensor)
self.assertEqual(result.dtype, torch.int64)
self.assertEqual(result.shape, (3,))
class TestExtractBestAssignment(unittest.TestCase):
def test_valid_inputs(self):
# Mock data
assignments_per_week = torch.randint(0, 2, (3, 4, 5), dtype=torch.uint8)
total_score = torch.rand(4, 4, 4) # Mock score tensor for 3 outings
# Expected output shape
expected_shape = (3, 1, 5)
result = extract_best_assignment(assignments_per_week, total_score)
self.assertEqual(result.shape, expected_shape)
def test_edge_case_single_outing(self):
assignments_per_week = torch.randint(0, 2, (1, 4, 5), dtype=torch.uint8)
total_score = torch.rand(4,)
expected_shape = (1, 1, 5)
result = extract_best_assignment(assignments_per_week, total_score)
self.assertEqual(result.shape, expected_shape)
def test_output_type(self):
assignments_per_week = torch.randint(0, 2, (3, 4, 5), dtype=torch.uint8)
total_score = torch.rand(4, 4, 4)
result = extract_best_assignment(assignments_per_week, total_score)
self.assertIsInstance(result, torch.Tensor)
self.assertTrue(result.dtype, torch.uint8)
def test_correctness_of_assignment_extraction(self):
# Mock data for 3 outings with 4 combinations each
assignments_per_week = torch.tensor([
[[0, 0], [0, 1], [1, 0], [1, 1]], # Outing 1
[[0, 0], [0, 1], [1, 0], [1, 1]], # Outing 2
[[0, 0], [0, 1], [1, 0], [1, 1]] # Outing 3
], dtype=torch.uint8)
# Mock total scores where the best scores are known
# Assuming the best scores are for the combinations [1, 0, 3] for outings [1, 2, 3]
total_score = torch.zeros((4, 4, 4))
total_score[1, 0, 3] = 1 # Highest score
# Expected best assignments for each outing
expected_assignments = torch.tensor([
[[0, 1]], # Outing 1
[[0, 0]], # Outing 2
[[1, 1]] # Outing 3
], dtype=torch.uint8) # Add dimension to match the expected output shape
result = extract_best_assignment(assignments_per_week, total_score)
self.assertTrue(torch.equal(result, expected_assignments))
class TestGetNoOverlapInds(unittest.TestCase):
def test_no_overlap(self):
A = torch.tensor([[1, 0], [0, 1]])
B = torch.tensor([[0, 1], [1, 0]])
expected_result = torch.tensor([[0, 0], [1, 1]])
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
def test_partial_overlap(self):
A = torch.tensor([[1, 1], [0, 1]])
B = torch.tensor([[1, 0], [0, 1]])
expected_result = torch.tensor([[1, 0]])
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
def test_complete_overlap(self):
A = torch.tensor([[1, 1], [1, 1]])
B = torch.tensor([[1, 1], [1, 1]])
expected_result = torch.empty((0, 2), dtype=torch.int64)
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
def test_different_sizes(self):
A = torch.tensor([[1, 1, 0, 0], [0, 1, 1, 0]])
B = torch.tensor([[1, 1, 0, 0], [0, 1, 1, 0], [1, 0, 0, 1]])
expected_result = torch.tensor([[1, 2]])
result = get_no_overlap_inds(A, B)
self.assertTrue(torch.equal(result, expected_result))
class TestGenerateBinaryMatrices(unittest.TestCase):
def test_correct_matrix_generation(self):
num_rowers = 4
boat_sizes = [2, 3]
expected_combinations = [math.comb(num_rowers, boat_size) for boat_size in boat_sizes]
result_matrices = generate_binary_matrices(num_rowers, boat_sizes)
for i, M in enumerate(result_matrices):
self.assertEqual(M.shape[0], expected_combinations[i]) # Correct number of combinations
self.assertEqual(M.shape[1], num_rowers) # Correct number of columns
self.assertTrue(torch.all((M.sum(axis=1) == boat_sizes[i]).logical_or(M.sum(axis=1) == 0))) # Correct boat sizes
def test_different_rower_and_boat_sizes(self):
num_rowers = 5
boat_sizes = [1, 4]
result_matrices = generate_binary_matrices(num_rowers, boat_sizes)
for M, boat_size in zip(result_matrices, boat_sizes):
self.assertEqual(M.shape, (math.comb(num_rowers, boat_size), num_rowers))
def test_output_type(self):
num_rowers = 3
boat_sizes = [2]
result_matrices = generate_binary_matrices(num_rowers, boat_sizes)
for M in result_matrices:
self.assertIsInstance(M, torch.Tensor)
self.assertTrue(M.dtype, torch.bool)
class TestEliminateInvalidBoats(unittest.TestCase):
def test_no_elimination_of_valid_boats(self):
binary_matrix = torch.tensor([[1, 0, 1], [1, 1, 0], [0, 1, 1]])
rower_sides = torch.tensor([1, -1, 0]) # Stroke, Bow, No preference
expected_result = torch.tensor([[1, 0, 1], [1, 1, 0], [0, 1, 1]]) # Eliminate [1, 1, 0] combination | result = eliminate_invalid_boats(binary_matrix, rower_sides) | 9 | 2023-12-18 05:12:36+00:00 | 12k |
Azure-Samples/functions-python-web-crawler | .venv/Lib/site-packages/charset_normalizer/cd.py | [
{
"identifier": "FREQUENCIES",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"u\",\n \"m\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"y\",\n \"b\",\n \"v\",\n \"k\",\n \"x\",\n \"j\",\n \"z\",\n \"q\",\n ],\n \"English—\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"m\",\n \"u\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"b\",\n \"y\",\n \"v\",\n \"k\",\n \"j\",\n \"x\",\n \"z\",\n \"q\",\n ],\n \"German\": [\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"s\",\n \"t\",\n \"a\",\n \"d\",\n \"h\",\n \"u\",\n \"l\",\n \"g\",\n \"o\",\n \"c\",\n \"m\",\n \"b\",\n \"f\",\n \"k\",\n \"w\",\n \"z\",\n \"p\",\n \"v\",\n \"ü\",\n \"ä\",\n \"ö\",\n \"j\",\n ],\n \"French\": [\n \"e\",\n \"a\",\n \"s\",\n \"n\",\n \"i\",\n \"t\",\n \"r\",\n \"l\",\n \"u\",\n \"o\",\n \"d\",\n \"c\",\n \"p\",\n \"m\",\n \"é\",\n \"v\",\n \"g\",\n \"f\",\n \"b\",\n \"h\",\n \"q\",\n \"à\",\n \"x\",\n \"è\",\n \"y\",\n \"j\",\n ],\n \"Dutch\": [\n \"e\",\n \"n\",\n \"a\",\n \"i\",\n \"r\",\n \"t\",\n \"o\",\n \"d\",\n \"s\",\n \"l\",\n \"g\",\n \"h\",\n \"v\",\n \"m\",\n \"u\",\n \"k\",\n \"c\",\n \"p\",\n \"b\",\n \"w\",\n \"j\",\n \"z\",\n \"f\",\n \"y\",\n \"x\",\n \"ë\",\n ],\n \"Italian\": [\n \"e\",\n \"i\",\n \"a\",\n \"o\",\n \"n\",\n \"l\",\n \"t\",\n \"r\",\n \"s\",\n \"c\",\n \"d\",\n \"u\",\n \"p\",\n \"m\",\n \"g\",\n \"v\",\n \"f\",\n \"b\",\n \"z\",\n \"h\",\n \"q\",\n \"è\",\n \"à\",\n \"k\",\n \"y\",\n \"ò\",\n ],\n \"Polish\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"z\",\n \"w\",\n \"s\",\n \"c\",\n \"t\",\n \"k\",\n \"y\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"l\",\n \"j\",\n \"ł\",\n \"g\",\n \"b\",\n \"h\",\n \"ą\",\n \"ę\",\n \"ó\",\n ],\n \"Spanish\": [\n \"e\",\n \"a\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"i\",\n \"l\",\n \"d\",\n \"t\",\n \"c\",\n \"u\",\n \"m\",\n \"p\",\n \"b\",\n \"g\",\n \"v\",\n \"f\",\n \"y\",\n \"ó\",\n \"h\",\n \"q\",\n \"í\",\n \"j\",\n \"z\",\n \"á\",\n ],\n \"Russian\": [\n \"о\",\n \"а\",\n \"е\",\n \"и\",\n \"н\",\n \"с\",\n \"т\",\n \"р\",\n \"в\",\n \"л\",\n \"к\",\n \"м\",\n \"д\",\n \"п\",\n \"у\",\n \"г\",\n \"я\",\n \"ы\",\n \"з\",\n \"б\",\n \"й\",\n \"ь\",\n \"ч\",\n \"х\",\n \"ж\",\n \"ц\",\n ],\n # Jap-Kanji\n \"Japanese\": [\n \"人\",\n \"一\",\n \"大\",\n \"亅\",\n \"丁\",\n \"丨\",\n \"竹\",\n \"笑\",\n \"口\",\n \"日\",\n \"今\",\n \"二\",\n \"彳\",\n \"行\",\n \"十\",\n \"土\",\n \"丶\",\n \"寸\",\n \"寺\",\n \"時\",\n \"乙\",\n \"丿\",\n \"乂\",\n \"气\",\n \"気\",\n \"冂\",\n \"巾\",\n \"亠\",\n \"市\",\n \"目\",\n \"儿\",\n \"見\",\n \"八\",\n \"小\",\n \"凵\",\n \"県\",\n \"月\",\n \"彐\",\n \"門\",\n \"間\",\n \"木\",\n \"東\",\n \"山\",\n \"出\",\n \"本\",\n \"中\",\n \"刀\",\n \"分\",\n \"耳\",\n \"又\",\n \"取\",\n \"最\",\n \"言\",\n \"田\",\n \"心\",\n \"思\",\n \"刂\",\n \"前\",\n \"京\",\n \"尹\",\n \"事\",\n \"生\",\n \"厶\",\n \"云\",\n \"会\",\n \"未\",\n \"来\",\n \"白\",\n \"冫\",\n \"楽\",\n \"灬\",\n \"馬\",\n \"尸\",\n \"尺\",\n \"駅\",\n \"明\",\n \"耂\",\n \"者\",\n \"了\",\n \"阝\",\n \"都\",\n \"高\",\n \"卜\",\n \"占\",\n \"厂\",\n \"广\",\n \"店\",\n \"子\",\n \"申\",\n \"奄\",\n \"亻\",\n \"俺\",\n \"上\",\n \"方\",\n \"冖\",\n \"学\",\n \"衣\",\n \"艮\",\n \"食\",\n \"自\",\n ],\n # Jap-Katakana\n \"Japanese—\": [\n \"ー\",\n \"ン\",\n \"ス\",\n \"・\",\n \"ル\",\n \"ト\",\n \"リ\",\n \"イ\",\n \"ア\",\n \"ラ\",\n \"ッ\",\n \"ク\",\n \"ド\",\n \"シ\",\n \"レ\",\n \"ジ\",\n \"タ\",\n \"フ\",\n \"ロ\",\n \"カ\",\n \"テ\",\n \"マ\",\n \"ィ\",\n \"グ\",\n \"バ\",\n \"ム\",\n \"プ\",\n \"オ\",\n \"コ\",\n \"デ\",\n \"ニ\",\n \"ウ\",\n \"メ\",\n \"サ\",\n \"ビ\",\n \"ナ\",\n \"ブ\",\n \"ャ\",\n \"エ\",\n \"ュ\",\n \"チ\",\n \"キ\",\n \"ズ\",\n \"ダ\",\n \"パ\",\n \"ミ\",\n \"ェ\",\n \"ョ\",\n \"ハ\",\n \"セ\",\n \"ベ\",\n \"ガ\",\n \"モ\",\n \"ツ\",\n \"ネ\",\n \"ボ\",\n \"ソ\",\n \"ノ\",\n \"ァ\",\n \"ヴ\",\n \"ワ\",\n \"ポ\",\n \"ペ\",\n \"ピ\",\n \"ケ\",\n \"ゴ\",\n \"ギ\",\n \"ザ\",\n \"ホ\",\n \"ゲ\",\n \"ォ\",\n \"ヤ\",\n \"ヒ\",\n \"ユ\",\n \"ヨ\",\n \"ヘ\",\n \"ゼ\",\n \"ヌ\",\n \"ゥ\",\n \"ゾ\",\n \"ヶ\",\n \"ヂ\",\n \"ヲ\",\n \"ヅ\",\n \"ヵ\",\n \"ヱ\",\n \"ヰ\",\n \"ヮ\",\n \"ヽ\",\n \"゠\",\n \"ヾ\",\n \"ヷ\",\n \"ヿ\",\n \"ヸ\",\n \"ヹ\",\n \"ヺ\",\n ],\n # Jap-Hiragana\n \"Japanese——\": [\n \"の\",\n \"に\",\n \"る\",\n \"た\",\n \"と\",\n \"は\",\n \"し\",\n \"い\",\n \"を\",\n \"で\",\n \"て\",\n \"が\",\n \"な\",\n \"れ\",\n \"か\",\n \"ら\",\n \"さ\",\n \"っ\",\n \"り\",\n \"す\",\n \"あ\",\n \"も\",\n \"こ\",\n \"ま\",\n \"う\",\n \"く\",\n \"よ\",\n \"き\",\n \"ん\",\n \"め\",\n \"お\",\n \"け\",\n \"そ\",\n \"つ\",\n \"だ\",\n \"や\",\n \"え\",\n \"ど\",\n \"わ\",\n \"ち\",\n \"み\",\n \"せ\",\n \"じ\",\n \"ば\",\n \"へ\",\n \"び\",\n \"ず\",\n \"ろ\",\n \"ほ\",\n \"げ\",\n \"む\",\n \"べ\",\n \"ひ\",\n \"ょ\",\n \"ゆ\",\n \"ぶ\",\n \"ご\",\n \"ゃ\",\n \"ね\",\n \"ふ\",\n \"ぐ\",\n \"ぎ\",\n \"ぼ\",\n \"ゅ\",\n \"づ\",\n \"ざ\",\n \"ぞ\",\n \"ぬ\",\n \"ぜ\",\n \"ぱ\",\n \"ぽ\",\n \"ぷ\",\n \"ぴ\",\n \"ぃ\",\n \"ぁ\",\n \"ぇ\",\n \"ぺ\",\n \"ゞ\",\n \"ぢ\",\n \"ぉ\",\n \"ぅ\",\n \"ゐ\",\n \"ゝ\",\n \"ゑ\",\n \"゛\",\n \"゜\",\n \"ゎ\",\n \"ゔ\",\n \"゚\",\n \"ゟ\",\n \"゙\",\n \"ゕ\",\n \"ゖ\",\n ],\n \"Portuguese\": [\n \"a\",\n \"e\",\n \"o\",\n \"s\",\n \"i\",\n \"r\",\n \"d\",\n \"n\",\n \"t\",\n \"m\",\n \"u\",\n \"c\",\n \"l\",\n \"p\",\n \"g\",\n \"v\",\n \"b\",\n \"f\",\n \"h\",\n \"ã\",\n \"q\",\n \"é\",\n \"ç\",\n \"á\",\n \"z\",\n \"í\",\n ],\n \"Swedish\": [\n \"e\",\n \"a\",\n \"n\",\n \"r\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"d\",\n \"o\",\n \"m\",\n \"k\",\n \"g\",\n \"v\",\n \"h\",\n \"f\",\n \"u\",\n \"p\",\n \"ä\",\n \"c\",\n \"b\",\n \"ö\",\n \"å\",\n \"y\",\n \"j\",\n \"x\",\n ],\n \"Chinese\": [\n \"的\",\n \"一\",\n \"是\",\n \"不\",\n \"了\",\n \"在\",\n \"人\",\n \"有\",\n \"我\",\n \"他\",\n \"这\",\n \"个\",\n \"们\",\n \"中\",\n \"来\",\n \"上\",\n \"大\",\n \"为\",\n \"和\",\n \"国\",\n \"地\",\n \"到\",\n \"以\",\n \"说\",\n \"时\",\n \"要\",\n \"就\",\n \"出\",\n \"会\",\n \"可\",\n \"也\",\n \"你\",\n \"对\",\n \"生\",\n \"能\",\n \"而\",\n \"子\",\n \"那\",\n \"得\",\n \"于\",\n \"着\",\n \"下\",\n \"自\",\n \"之\",\n \"年\",\n \"过\",\n \"发\",\n \"后\",\n \"作\",\n \"里\",\n \"用\",\n \"道\",\n \"行\",\n \"所\",\n \"然\",\n \"家\",\n \"种\",\n \"事\",\n \"成\",\n \"方\",\n \"多\",\n \"经\",\n \"么\",\n \"去\",\n \"法\",\n \"学\",\n \"如\",\n \"都\",\n \"同\",\n \"现\",\n \"当\",\n \"没\",\n \"动\",\n \"面\",\n \"起\",\n \"看\",\n \"定\",\n \"天\",\n \"分\",\n \"还\",\n \"进\",\n \"好\",\n \"小\",\n \"部\",\n \"其\",\n \"些\",\n \"主\",\n \"样\",\n \"理\",\n \"心\",\n \"她\",\n \"本\",\n \"前\",\n \"开\",\n \"但\",\n \"因\",\n \"只\",\n \"从\",\n \"想\",\n \"实\",\n ],\n \"Ukrainian\": [\n \"о\",\n \"а\",\n \"н\",\n \"і\",\n \"и\",\n \"р\",\n \"в\",\n \"т\",\n \"е\",\n \"с\",\n \"к\",\n \"л\",\n \"у\",\n \"д\",\n \"м\",\n \"п\",\n \"з\",\n \"я\",\n \"ь\",\n \"б\",\n \"г\",\n \"й\",\n \"ч\",\n \"х\",\n \"ц\",\n \"ї\",\n ],\n \"Norwegian\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"s\",\n \"i\",\n \"o\",\n \"l\",\n \"d\",\n \"g\",\n \"k\",\n \"m\",\n \"v\",\n \"f\",\n \"p\",\n \"u\",\n \"b\",\n \"h\",\n \"å\",\n \"y\",\n \"j\",\n \"ø\",\n \"c\",\n \"æ\",\n \"w\",\n ],\n \"Finnish\": [\n \"a\",\n \"i\",\n \"n\",\n \"t\",\n \"e\",\n \"s\",\n \"l\",\n \"o\",\n \"u\",\n \"k\",\n \"ä\",\n \"m\",\n \"r\",\n \"v\",\n \"j\",\n \"h\",\n \"p\",\n \"y\",\n \"d\",\n \"ö\",\n \"g\",\n \"c\",\n \"b\",\n \"f\",\n \"w\",\n \"z\",\n ],\n \"Vietnamese\": [\n \"n\",\n \"h\",\n \"t\",\n \"i\",\n \"c\",\n \"g\",\n \"a\",\n \"o\",\n \"u\",\n \"m\",\n \"l\",\n \"r\",\n \"à\",\n \"đ\",\n \"s\",\n \"e\",\n \"v\",\n \"p\",\n \"b\",\n \"y\",\n \"ư\",\n \"d\",\n \"á\",\n \"k\",\n \"ộ\",\n \"ế\",\n ],\n \"Czech\": [\n \"o\",\n \"e\",\n \"a\",\n \"n\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"v\",\n \"r\",\n \"k\",\n \"d\",\n \"u\",\n \"m\",\n \"p\",\n \"í\",\n \"c\",\n \"h\",\n \"z\",\n \"á\",\n \"y\",\n \"j\",\n \"b\",\n \"ě\",\n \"é\",\n \"ř\",\n ],\n \"Hungarian\": [\n \"e\",\n \"a\",\n \"t\",\n \"l\",\n \"s\",\n \"n\",\n \"k\",\n \"r\",\n \"i\",\n \"o\",\n \"z\",\n \"á\",\n \"é\",\n \"g\",\n \"m\",\n \"b\",\n \"y\",\n \"v\",\n \"d\",\n \"h\",\n \"u\",\n \"p\",\n \"j\",\n \"ö\",\n \"f\",\n \"c\",\n ],\n \"Korean\": [\n \"이\",\n \"다\",\n \"에\",\n \"의\",\n \"는\",\n \"로\",\n \"하\",\n \"을\",\n \"가\",\n \"고\",\n \"지\",\n \"서\",\n \"한\",\n \"은\",\n \"기\",\n \"으\",\n \"년\",\n \"대\",\n \"사\",\n \"시\",\n \"를\",\n \"리\",\n \"도\",\n \"인\",\n \"스\",\n \"일\",\n ],\n \"Indonesian\": [\n \"a\",\n \"n\",\n \"e\",\n \"i\",\n \"r\",\n \"t\",\n \"u\",\n \"s\",\n \"d\",\n \"k\",\n \"m\",\n \"l\",\n \"g\",\n \"p\",\n \"b\",\n \"o\",\n \"h\",\n \"y\",\n \"j\",\n \"c\",\n \"w\",\n \"f\",\n \"v\",\n \"z\",\n \"x\",\n \"q\",\n ],\n \"Turkish\": [\n \"a\",\n \"e\",\n \"i\",\n \"n\",\n \"r\",\n \"l\",\n \"ı\",\n \"k\",\n \"d\",\n \"t\",\n \"s\",\n \"m\",\n \"y\",\n \"u\",\n \"o\",\n \"b\",\n \"ü\",\n \"ş\",\n \"v\",\n \"g\",\n \"z\",\n \"h\",\n \"c\",\n \"p\",\n \"ç\",\n \"ğ\",\n ],\n \"Romanian\": [\n \"e\",\n \"i\",\n \"a\",\n \"r\",\n \"n\",\n \"t\",\n \"u\",\n \"l\",\n \"o\",\n \"c\",\n \"s\",\n \"d\",\n \"p\",\n \"m\",\n \"ă\",\n \"f\",\n \"v\",\n \"î\",\n \"g\",\n \"b\",\n \"ș\",\n \"ț\",\n \"z\",\n \"h\",\n \"â\",\n \"j\",\n ],\n \"Farsi\": [\n \"ا\",\n \"ی\",\n \"ر\",\n \"د\",\n \"ن\",\n \"ه\",\n \"و\",\n \"م\",\n \"ت\",\n \"ب\",\n \"س\",\n \"ل\",\n \"ک\",\n \"ش\",\n \"ز\",\n \"ف\",\n \"گ\",\n \"ع\",\n \"خ\",\n \"ق\",\n \"ج\",\n \"آ\",\n \"پ\",\n \"ح\",\n \"ط\",\n \"ص\",\n ],\n \"Arabic\": [\n \"ا\",\n \"ل\",\n \"ي\",\n \"م\",\n \"و\",\n \"ن\",\n \"ر\",\n \"ت\",\n \"ب\",\n \"ة\",\n \"ع\",\n \"د\",\n \"س\",\n \"ف\",\n \"ه\",\n \"ك\",\n \"ق\",\n \"أ\",\n \"ح\",\n \"ج\",\n \"ش\",\n \"ط\",\n \"ص\",\n \"ى\",\n \"خ\",\n \"إ\",\n ],\n \"Danish\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"i\",\n \"s\",\n \"d\",\n \"l\",\n \"o\",\n \"g\",\n \"m\",\n \"k\",\n \"f\",\n \"v\",\n \"u\",\n \"b\",\n \"h\",\n \"p\",\n \"å\",\n \"y\",\n \"ø\",\n \"æ\",\n \"c\",\n \"j\",\n \"w\",\n ],\n \"Serbian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"р\",\n \"с\",\n \"у\",\n \"т\",\n \"к\",\n \"ј\",\n \"в\",\n \"д\",\n \"м\",\n \"п\",\n \"л\",\n \"г\",\n \"з\",\n \"б\",\n \"a\",\n \"i\",\n \"e\",\n \"o\",\n \"n\",\n \"ц\",\n \"ш\",\n ],\n \"Lithuanian\": [\n \"i\",\n \"a\",\n \"s\",\n \"o\",\n \"r\",\n \"e\",\n \"t\",\n \"n\",\n \"u\",\n \"k\",\n \"m\",\n \"l\",\n \"p\",\n \"v\",\n \"d\",\n \"j\",\n \"g\",\n \"ė\",\n \"b\",\n \"y\",\n \"ų\",\n \"š\",\n \"ž\",\n \"c\",\n \"ą\",\n \"į\",\n ],\n \"Slovene\": [\n \"e\",\n \"a\",\n \"i\",\n \"o\",\n \"n\",\n \"r\",\n \"s\",\n \"l\",\n \"t\",\n \"j\",\n \"v\",\n \"k\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"z\",\n \"b\",\n \"g\",\n \"h\",\n \"č\",\n \"c\",\n \"š\",\n \"ž\",\n \"f\",\n \"y\",\n ],\n \"Slovak\": [\n \"o\",\n \"a\",\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"v\",\n \"t\",\n \"s\",\n \"l\",\n \"k\",\n \"d\",\n \"m\",\n \"p\",\n \"u\",\n \"c\",\n \"h\",\n \"j\",\n \"b\",\n \"z\",\n \"á\",\n \"y\",\n \"ý\",\n \"í\",\n \"č\",\n \"é\",\n ],\n \"Hebrew\": [\n \"י\",\n \"ו\",\n \"ה\",\n \"ל\",\n \"ר\",\n \"ב\",\n \"ת\",\n \"מ\",\n \"א\",\n \"ש\",\n \"נ\",\n \"ע\",\n \"ם\",\n \"ד\",\n \"ק\",\n \"ח\",\n \"פ\",\n \"ס\",\n \"כ\",\n \"ג\",\n \"ט\",\n \"צ\",\n \"ן\",\n \"ז\",\n \"ך\",\n ],\n \"Bulgarian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"с\",\n \"в\",\n \"л\",\n \"к\",\n \"д\",\n \"п\",\n \"м\",\n \"з\",\n \"г\",\n \"я\",\n \"ъ\",\n \"у\",\n \"б\",\n \"ч\",\n \"ц\",\n \"й\",\n \"ж\",\n \"щ\",\n \"х\",\n ],\n \"Croatian\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"j\",\n \"s\",\n \"t\",\n \"u\",\n \"k\",\n \"l\",\n \"v\",\n \"d\",\n \"m\",\n \"p\",\n \"g\",\n \"z\",\n \"b\",\n \"c\",\n \"č\",\n \"h\",\n \"š\",\n \"ž\",\n \"ć\",\n \"f\",\n ],\n \"Hindi\": [\n \"क\",\n \"र\",\n \"स\",\n \"न\",\n \"त\",\n \"म\",\n \"ह\",\n \"प\",\n \"य\",\n \"ल\",\n \"व\",\n \"ज\",\n \"द\",\n \"ग\",\n \"ब\",\n \"श\",\n \"ट\",\n \"अ\",\n \"ए\",\n \"थ\",\n \"भ\",\n \"ड\",\n \"च\",\n \"ध\",\n \"ष\",\n \"इ\",\n ],\n \"Estonian\": [\n \"a\",\n \"i\",\n \"e\",\n \"s\",\n \"t\",\n \"l\",\n \"u\",\n \"n\",\n \"o\",\n \"k\",\n \"r\",\n \"d\",\n \"m\",\n \"v\",\n \"g\",\n \"p\",\n \"j\",\n \"h\",\n \"ä\",\n \"b\",\n \"õ\",\n \"ü\",\n \"f\",\n \"c\",\n \"ö\",\n \"y\",\n ],\n \"Thai\": [\n \"า\",\n \"น\",\n \"ร\",\n \"อ\",\n \"ก\",\n \"เ\",\n \"ง\",\n \"ม\",\n \"ย\",\n \"ล\",\n \"ว\",\n \"ด\",\n \"ท\",\n \"ส\",\n \"ต\",\n \"ะ\",\n \"ป\",\n \"บ\",\n \"ค\",\n \"ห\",\n \"แ\",\n \"จ\",\n \"พ\",\n \"ช\",\n \"ข\",\n \"ใ\",\n ],\n \"Greek\": [\n \"α\",\n \"τ\",\n \"ο\",\n \"ι\",\n \"ε\",\n \"ν\",\n \"ρ\",\n \"σ\",\n \"κ\",\n \"η\",\n \"π\",\n \"ς\",\n \"υ\",\n \"μ\",\n \"λ\",\n \"ί\",\n \"ό\",\n \"ά\",\n \"γ\",\n \"έ\",\n \"δ\",\n \"ή\",\n \"ω\",\n \"χ\",\n \"θ\",\n \"ύ\",\n ],\n \"Tamil\": [\n \"க\",\n \"த\",\n \"ப\",\n \"ட\",\n \"ர\",\n \"ம\",\n \"ல\",\n \"ன\",\n \"வ\",\n \"ற\",\n \"ய\",\n \"ள\",\n \"ச\",\n \"ந\",\n \"இ\",\n \"ண\",\n \"அ\",\n \"ஆ\",\n \"ழ\",\n \"ங\",\n \"எ\",\n \"உ\",\n \"ஒ\",\n \"ஸ\",\n ],\n \"Kazakh\": [\n \"а\",\n \"ы\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"л\",\n \"і\",\n \"д\",\n \"с\",\n \"м\",\n \"қ\",\n \"к\",\n \"о\",\n \"б\",\n \"и\",\n \"у\",\n \"ғ\",\n \"ж\",\n \"ң\",\n \"з\",\n \"ш\",\n \"й\",\n \"п\",\n \"г\",\n \"ө\",\n ],\n}"
},
{
"identifier": "KO_NAMES",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "KO_NAMES: Set[str] = {\"johab\", \"cp949\", \"euc_kr\"}"
},
{
"identifier": "LANGUAGE_SUPPORTED_COUNT",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)"
},
{
"identifier": "TOO_SMALL_SEQUENCE",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "TOO_SMALL_SEQUENCE: int = 32"
},
{
"identifier": "ZH_NAMES",
"path": ".venv/Lib/site-packages/charset_normalizer/constant.py",
"snippet": "ZH_NAMES: Set[str] = {\"big5\", \"cp950\", \"big5hkscs\", \"hz\"}"
},
{
"identifier": "is_suspiciously_successive_range",
"path": ".venv/Lib/site-packages/charset_normalizer/md.py",
"snippet": "@lru_cache(maxsize=1024)\ndef is_suspiciously_successive_range(\n unicode_range_a: Optional[str], unicode_range_b: Optional[str]\n) -> bool:\n \"\"\"\n Determine if two Unicode range seen next to each other can be considered as suspicious.\n \"\"\"\n if unicode_range_a is None or unicode_range_b is None:\n return True\n\n if unicode_range_a == unicode_range_b:\n return False\n\n if \"Latin\" in unicode_range_a and \"Latin\" in unicode_range_b:\n return False\n\n if \"Emoticons\" in unicode_range_a or \"Emoticons\" in unicode_range_b:\n return False\n\n # Latin characters can be accompanied with a combining diacritical mark\n # eg. Vietnamese.\n if (\"Latin\" in unicode_range_a or \"Latin\" in unicode_range_b) and (\n \"Combining\" in unicode_range_a or \"Combining\" in unicode_range_b\n ):\n return False\n\n keywords_range_a, keywords_range_b = unicode_range_a.split(\n \" \"\n ), unicode_range_b.split(\" \")\n\n for el in keywords_range_a:\n if el in UNICODE_SECONDARY_RANGE_KEYWORD:\n continue\n if el in keywords_range_b:\n return False\n\n # Japanese Exception\n range_a_jp_chars, range_b_jp_chars = (\n unicode_range_a\n in (\n \"Hiragana\",\n \"Katakana\",\n ),\n unicode_range_b in (\"Hiragana\", \"Katakana\"),\n )\n if (range_a_jp_chars or range_b_jp_chars) and (\n \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b\n ):\n return False\n if range_a_jp_chars and range_b_jp_chars:\n return False\n\n if \"Hangul\" in unicode_range_a or \"Hangul\" in unicode_range_b:\n if \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n # Chinese/Japanese use dedicated range for punctuation and/or separators.\n if (\"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b) or (\n unicode_range_a in [\"Katakana\", \"Hiragana\"]\n and unicode_range_b in [\"Katakana\", \"Hiragana\"]\n ):\n if \"Punctuation\" in unicode_range_a or \"Punctuation\" in unicode_range_b:\n return False\n if \"Forms\" in unicode_range_a or \"Forms\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n return True"
},
{
"identifier": "CoherenceMatches",
"path": ".venv/Lib/site-packages/charset_normalizer/models.py",
"snippet": "class CharsetMatch:\nclass CharsetMatches:\nclass CliDetectionResult:\n def __init__(\n self,\n payload: bytes,\n guessed_encoding: str,\n mean_mess_ratio: float,\n has_sig_or_bom: bool,\n languages: \"CoherenceMatches\",\n decoded_payload: Optional[str] = None,\n ):\n def __eq__(self, other: object) -> bool:\n def __lt__(self, other: object) -> bool:\n def multi_byte_usage(self) -> float:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def add_submatch(self, other: \"CharsetMatch\") -> None:\n def encoding(self) -> str:\n def encoding_aliases(self) -> List[str]:\n def bom(self) -> bool:\n def byte_order_mark(self) -> bool:\n def languages(self) -> List[str]:\n def language(self) -> str:\n def chaos(self) -> float:\n def coherence(self) -> float:\n def percent_chaos(self) -> float:\n def percent_coherence(self) -> float:\n def raw(self) -> bytes:\n def submatch(self) -> List[\"CharsetMatch\"]:\n def has_submatch(self) -> bool:\n def alphabets(self) -> List[str]:\n def could_be_from_charset(self) -> List[str]:\n def output(self, encoding: str = \"utf_8\") -> bytes:\n def fingerprint(self) -> str:\n def __init__(self, results: Optional[List[CharsetMatch]] = None):\n def __iter__(self) -> Iterator[CharsetMatch]:\n def __getitem__(self, item: Union[int, str]) -> CharsetMatch:\n def __len__(self) -> int:\n def __bool__(self) -> bool:\n def append(self, item: CharsetMatch) -> None:\n def best(self) -> Optional[\"CharsetMatch\"]:\n def first(self) -> Optional[\"CharsetMatch\"]:\n def __init__(\n self,\n path: str,\n encoding: Optional[str],\n encoding_aliases: List[str],\n alternative_encodings: List[str],\n language: str,\n alphabets: List[str],\n has_sig_or_bom: bool,\n chaos: float,\n coherence: float,\n unicode_path: Optional[str],\n is_preferred: bool,\n ):\n def __dict__(self) -> Dict[str, Any]: # type: ignore\n def to_json(self) -> str:"
},
{
"identifier": "is_accentuated",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_accentuated(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return (\n \"WITH GRAVE\" in description\n or \"WITH ACUTE\" in description\n or \"WITH CEDILLA\" in description\n or \"WITH DIAERESIS\" in description\n or \"WITH CIRCUMFLEX\" in description\n or \"WITH TILDE\" in description\n or \"WITH MACRON\" in description\n or \"WITH RING ABOVE\" in description\n )"
},
{
"identifier": "is_latin",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_latin(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return \"LATIN\" in description"
},
{
"identifier": "is_multi_byte_encoding",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=128)\ndef is_multi_byte_encoding(name: str) -> bool:\n \"\"\"\n Verify is a specific encoding is a multi byte one based on it IANA name\n \"\"\"\n return name in {\n \"utf_8\",\n \"utf_8_sig\",\n \"utf_16\",\n \"utf_16_be\",\n \"utf_16_le\",\n \"utf_32\",\n \"utf_32_le\",\n \"utf_32_be\",\n \"utf_7\",\n } or issubclass(\n importlib.import_module(\"encodings.{}\".format(name)).IncrementalDecoder,\n MultibyteIncrementalDecoder,\n )"
},
{
"identifier": "is_unicode_range_secondary",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))\ndef is_unicode_range_secondary(range_name: str) -> bool:\n return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)"
},
{
"identifier": "unicode_range",
"path": ".venv/Lib/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef unicode_range(character: str) -> Optional[str]:\n \"\"\"\n Retrieve the Unicode range official name from a single character.\n \"\"\"\n character_ord: int = ord(character)\n\n for range_name, ord_range in UNICODE_RANGES_COMBINED.items():\n if character_ord in ord_range:\n return range_name\n\n return None"
}
] | import importlib
from codecs import IncrementalDecoder
from collections import Counter
from functools import lru_cache
from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
from .constant import (
FREQUENCIES,
KO_NAMES,
LANGUAGE_SUPPORTED_COUNT,
TOO_SMALL_SEQUENCE,
ZH_NAMES,
)
from .md import is_suspiciously_successive_range
from .models import CoherenceMatches
from .utils import (
is_accentuated,
is_latin,
is_multi_byte_encoding,
is_unicode_range_secondary,
unicode_range,
) | 10,281 |
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module(
"encodings.{}".format(iana_name)
).IncrementalDecoder
p: IncrementalDecoder = decoder(errors="ignore")
seen_ranges: Dict[str, int] = {}
character_count: int = 0
for i in range(0x40, 0xFF):
chunk: str = p.decode(bytes([i]))
if chunk:
character_range: Optional[str] = unicode_range(chunk)
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
if character_range not in seen_ranges:
seen_ranges[character_range] = 0
seen_ranges[character_range] += 1
character_count += 1
return sorted(
[
character_range
for character_range in seen_ranges
if seen_ranges[character_range] / character_count >= 0.15
]
)
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages: List[str] = []
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
primary_range: Optional[str] = None
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
@lru_cache()
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name == "cp932"
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
return ["Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
return ["Korean"]
return []
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
def get_target_features(language: str) -> Tuple[bool, bool]:
"""
Determine main aspects from a supported language if it contains accents and if is pure Latin.
"""
target_have_accents: bool = False
target_pure_latin: bool = True
for character in FREQUENCIES[language]:
|
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module(
"encodings.{}".format(iana_name)
).IncrementalDecoder
p: IncrementalDecoder = decoder(errors="ignore")
seen_ranges: Dict[str, int] = {}
character_count: int = 0
for i in range(0x40, 0xFF):
chunk: str = p.decode(bytes([i]))
if chunk:
character_range: Optional[str] = unicode_range(chunk)
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
if character_range not in seen_ranges:
seen_ranges[character_range] = 0
seen_ranges[character_range] += 1
character_count += 1
return sorted(
[
character_range
for character_range in seen_ranges
if seen_ranges[character_range] / character_count >= 0.15
]
)
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages: List[str] = []
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
primary_range: Optional[str] = None
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
@lru_cache()
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name == "cp932"
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
return ["Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
return ["Korean"]
return []
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
def get_target_features(language: str) -> Tuple[bool, bool]:
"""
Determine main aspects from a supported language if it contains accents and if is pure Latin.
"""
target_have_accents: bool = False
target_pure_latin: bool = True
for character in FREQUENCIES[language]: | if not target_have_accents and is_accentuated(character): | 7 | 2023-12-16 04:12:01+00:00 | 12k |
liebrandapps/FindMyGUI | main.py | [
{
"identifier": "AirTag",
"path": "airTag.py",
"snippet": "class AirTag:\n\n def __init__(self, ctx, jsonFile=None):\n self.log = ctx.log\n self.cfg = ctx.cfg\n self.__id = uuid.uuid4().hex\n self._name = \"\"\n self._privateKey = None\n self._advertisementKey = None\n self._hashedKey = None\n self._needsSave = False\n self._lastSeen = None\n self._latitude = None\n self._longitude = None\n self._history = {}\n self._imgId = \"airtag\"\n if jsonFile is None:\n airTagDir = ctx.cfg.general_airTagDirectory\n airTagSuffix = ctx.cfg.general_airTagSuffix\n self.fileName = join(airTagDir, self.__id + airTagSuffix)\n self._needsSave = True\n else:\n self.fileName = jsonFile\n self.load(jsonFile)\n\n @property\n def id(self):\n return self.__id\n\n def load(self, jsonFile):\n with open(jsonFile) as f:\n dta = json.load(f)\n self._name = dta['name']\n self._privateKey = base64.b64decode(dta['privateKey'])\n self._advertisementKey = base64.b64decode(dta['advertisementKey'])\n s256 = hashlib.sha256()\n s256.update(self._advertisementKey)\n self._hashedKey = base64.b64encode(s256.digest()).decode(\"ascii\")\n if 'id' in dta.keys():\n self.__id = dta['id']\n else:\n self.save()\n if 'lastSeen' in dta.keys():\n self._lastSeen = dta['lastSeen']\n self._longitude = dta['longitude']\n self._latitude = dta['latitude']\n if 'history' in dta.keys():\n self._history = dta['history']\n if 'imgId' in dta.keys():\n self._imgId = dta['imgId']\n self.log.info(f\"Loaded AirTag [{self._name} / {self.__id}] from file {self.fileName}\")\n self._needsSave = False\n\n def save(self):\n toRemove = []\n cutOff = datetime.now() - timedelta(days=self.cfg.general_history)\n for h in self._history.keys():\n if int(h) < cutOff.timestamp():\n toRemove.append(h)\n for r in toRemove:\n del self._history[r]\n j = self.toJSON()\n with open(self.fileName, 'w') as f:\n print(j, file=f)\n self.log.info(f\"Saved AirTag [{self._name} / {self.__id}] to file {self.fileName}\")\n self._needsSave = False\n\n @property\n def needsSave(self):\n return self._needsSave\n\n def toJSON(self):\n return json.dumps(self.toDict(), indent=4)\n\n def toDict(self):\n return {'name': self._name,\n 'privateKey': base64.b64encode(self._privateKey).decode('ascii'),\n 'advertisementKey': base64.b64encode(self._advertisementKey).decode('ascii'),\n 'lastSeen': self._lastSeen,\n 'longitude': self._longitude,\n 'latitude': self._latitude,\n 'history': self._history,\n 'imgId': self._imgId,\n 'id': self.id}\n\n def resolveTag(self, tag):\n value = \"notFound\"\n if tag == '##NAME##':\n value = self._name\n if tag == '##ID##':\n value = self.id\n if tag == '##LASTSEEN##':\n if self._lastSeen is None or int(self._lastSeen) == 0:\n value = \"Never\"\n else:\n value = datetime.utcfromtimestamp(self._lastSeen).strftime('%H:%M:%S %d.%m.%Y')\n return value\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._needsSave = self._needsSave or (value != self._name)\n self._name = value\n\n @property\n def privateKey(self):\n return base64.b64encode(self._privateKey).decode('ascii')\n\n @privateKey.setter\n def privateKey(self, value):\n v = base64.b64decode(value)\n self._needsSave = self._needsSave or (v != self._privateKey)\n self._privateKey = v\n\n @property\n def advertisementKey(self):\n return base64.b64encode(self._advertisementKey).decode('ascii')\n\n @advertisementKey.setter\n def advertisementKey(self, value):\n v = base64.b64decode(value)\n self._needsSave = self._needsSave or (v != self._advertisementKey)\n self._advertisementKey = v\n\n @property\n def hashedAdvKey(self):\n return self._hashedKey\n\n @property\n def lastSeen(self):\n return self._lastSeen\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n def updateLocation(self, when, latitude, longitude):\n if self._lastSeen is None or when > self._lastSeen:\n self._longitude = longitude\n self._latitude = latitude\n self._lastSeen = when\n self._history[when] = {'lat': latitude, 'lon': longitude}\n self._needsSave = True\n\n @property\n def history(self):\n return self._history\n\n @property\n def imgId(self):\n return self._imgId\n\n @imgId.setter\n def imgId(self, value):\n self._needsSave = self._needsSave or value != self.imgId\n self._imgId = value"
},
{
"identifier": "API",
"path": "api.py",
"snippet": "class API:\n\n def __init__(self, ctx):\n self.ctx = ctx\n self.log = ctx.log\n\n def call(self, cmd, params=None):\n self.log.debug(f\"[API] Handling API command <{cmd}>\")\n result = {}\n if cmd == \"listTags\":\n result = self._listTags()\n if cmd == 'getPos':\n result = self._getPos()\n if cmd == 'refresh':\n result = self._refresh()\n if cmd == 'getTagData':\n result = self._getTagData(params['id'][0])\n if cmd == 'editTag':\n result = self._editTag(params['id'][0], params['name'][0], params['privateKey'][0],\n params['advertisementKey'][0], params['imgId'][0])\n if cmd == 'addTag':\n result = self._addTag(params['id'][0], params['name'][0], params['privateKey'][0],\n params['advertisementKey'][0], params['imgId'][0])\n if cmd == 'signInStatus':\n result = self._signInStatus(int(params['timeStamp'][0]))\n if cmd == 'creds':\n result = self._creds(params['userName'][0], params['password'][0])\n if cmd == 'auth':\n result = self._auth(params['ndFactor'][0])\n if cmd == 'lastLocationUpdate':\n result = self._lastLocationUpdate()\n return json.dumps(result if result is not None else {})\n\n def _listTags(self):\n dct = {}\n for id in self.ctx.airtags.keys():\n dct[id] = self.ctx.airtags[id].toDict()\n return dct\n\n def _getPos(self):\n findMy = FindMy(self.ctx)\n data = findMy.retrieveLocations()\n return data\n\n def _refresh(self):\n self.ctx.signInDone = False\n findMy = FindMy(self.ctx)\n try:\n data = findMy.retrieveLocations()\n except requests.exceptions.ConnectTimeout as e:\n msg = f\"[API] Anisette Server not running: {str(e)}\"\n self.ctx.errMsg = msg\n self.ctx.log.error(msg)\n data = {\"status\": \"fail\", \"msg\": msg}\n return data\n\n def _getTagData(self, id):\n self.log.debug(f\"[API] Cmds' getTagData parameter is id={id}\")\n if id in self.ctx.airtags.keys():\n tag = self.ctx.airtags[id]\n dct = tag.toDict()\n dct['status'] = 'ok'\n else:\n dct = {'status': 'fail', 'msg': 'tag not found', 'id': id}\n return dct\n\n def _editTag(self, id, name, privKey, advKey, imgId):\n self.log.debug(f\"[API] Cmds' editTag parameter are id={id}, name={name}, private Key={privKey}, \"\n f\"advertisementKey={advKey}\")\n if id in self.ctx.airtags.keys():\n tag = self.ctx.airtags[id]\n tag.name = name\n tag.privateKey = privKey\n tag.advertisementKey = advKey\n tag.imgId = imgId\n if tag.needsSave:\n tag.save()\n dct = {'status': 'ok', 'dataChanged': str(tag.needsSave)}\n else:\n dct = {'status': 'fail', 'msg': 'tag not found', 'id': id}\n return dct\n\n def _addTag(self, id, name, privKey, advKey, imgId):\n self.log.debug(f\"[API] Cmds' addTag parameter are id={id}, name={name}, private Key={privKey}, \"\n f\"advertisementKey={advKey}\")\n tag = AirTag(self.ctx)\n tag.name = name\n tag.privateKey = privKey\n tag.advertisementKey = advKey\n tag.imgId = imgId\n tag.save()\n self.ctx.airtags[tag.id] = tag\n return {'status': 'ok', 'id': tag.id}\n\n def _signInStatus(self, timeStamp):\n self.log.debug(f\"[API] Cmds' signInStatus parameter is timeStamp={timeStamp}\")\n dct = {'status': 'wait', 'timeStamp': timeStamp}\n idx = 3\n while idx > 0:\n if self.ctx.signInDone:\n dct['status'] = \"done\"\n self.ctx.signInDone = False\n break\n elif len(self.ctx.errMsg) > 0:\n dct['status'] = \"fail\"\n dct['msg'] = self.ctx.errMsg\n self.ctx.errMsg = \"\"\n break\n elif self.ctx.requestCreds > timeStamp:\n dct['status'] = \"creds\"\n dct['timeStamp'] = self.ctx.requestCreds\n break\n elif self.ctx.requestAuth > timeStamp:\n dct['status'] = \"auth\"\n dct['timeStamp'] = self.ctx.requestAuth\n break\n idx -= 1\n time.sleep(1.0)\n return dct\n\n def _creds(self, userName, password):\n self.log.debug(f\"[API] Cmds' creds parameter are userName={userName}, password=(is set: {len(password) > 0})\")\n self.ctx.userName = userName\n self.ctx.password = password\n return {'status': 'ok'}\n\n def _auth(self, ndFactor):\n self.log.debug(f\"[API] Cmds' auth parameter are ndFactor={ndFactor}\")\n self.ctx.ndFactor = str(ndFactor)\n return {'status': 'ok'}\n\n def _lastLocationUpdate(self):\n return {'lastLocationUpdate': self.ctx.lastLocationUpdate}"
},
{
"identifier": "Config",
"path": "config.py",
"snippet": "class Config:\n\n def __init__(self, cfgFile):\n self.cfg = RawConfigParser()\n _ = self.cfg.read(cfgFile)\n self.scope = {}\n self.lastget = None\n self.section = None\n\n def addScope(self, dictionary):\n for key in dictionary.keys():\n if key in self.scope.keys():\n self.scope[key].update(dictionary[key])\n else:\n self.scope[key] = dictionary[key]\n\n @staticmethod\n def hasKey(dct, key):\n k = key.upper()\n for d in dct:\n if d.upper() == k:\n return d\n return None\n\n def hasSection(self, section):\n return self.cfg.has_section(section)\n\n def hasOption(self, option):\n return self.cfg.has_option(self.section, option)\n\n #\n # name is one of the following:\n # - a single key(option), then section needs to be set before\n # - section_option\n def __getattr__(self, name):\n if self.lastget is None:\n # ok - now try section_option\n idx = name.split('_')\n if len(idx) > 1:\n # if we have more than one '_' in the string, section_option may be ambiguous\n tmpSection = idx[0]\n if tmpSection not in self.scope and len(idx) > 2:\n tmpSection = idx[0] + \"_\" + idx[1]\n idx[1] = \"_\".join(idx[2:])\n else:\n idx[1] = \"_\".join(idx[1:])\n if tmpSection in self.scope:\n option = idx[1]\n subScope = self.scope[tmpSection]\n if option in subScope:\n theTuple = subScope[option]\n if len(theTuple) > 1:\n defaultValue = [] if theTuple[0].upper().startswith('A') else theTuple[1]\n else:\n defaultValue = [] if theTuple[0].upper().startswith('A') else None\n if not(self.cfg.has_option(tmpSection, option)):\n return defaultValue\n if theTuple[0].startswith('S'):\n return self.cfg.get(tmpSection, option)\n if theTuple[0].startswith('I'):\n return self.cfg.getint(tmpSection, option)\n if theTuple[0].startswith('B'):\n return self.cfg.getboolean(tmpSection, option)\n if theTuple[0].startswith(\"F\"):\n return self.cfg.getfloat(tmpSection, option)\n if theTuple[0].upper().startswith('A'):\n return [] if self.cfg.get(tmpSection, option) is None \\\n else self.cfg.get(tmpSection, option).split(':')\n # target design: try section.option\n if self.lastget is None:\n if name in self.scope:\n self.lastget = name\n return self\n else:\n section = self.lastget\n self.lastget = None\n theTuple = self.scope[section][name]\n if not(self.cfg.has_section(section)):\n self.cfg.add_section(section)\n if not (self.cfg.has_option(section, name)) and len(theTuple) > 1:\n self.cfg.set(section, name, theTuple[1])\n if theTuple[0].upper().startswith('S'):\n return self.cfg.get(section, name)\n if theTuple[0].upper().startswith('I'):\n return self.cfg.getint(section, name)\n if theTuple[0].upper().startswith('B'):\n return self.cfg.getboolean(section, name)\n if theTuple[0].upper().startswith('A'):\n return [] if self.cfg.get(section, name) is None else self.cfg.get(section, name).split(':')\n return None\n\n def setSection(self, newSection):\n tmp = self.section\n self.section = newSection\n return tmp\n\n def readValue(self, key):\n return self.cfg.get(self.section, key)"
},
{
"identifier": "Context",
"path": "context.py",
"snippet": "class Context:\n statusFile = \"./findMyGUI.json\"\n\n def __init__(self, cfg, log):\n self.__log = log\n self.__cfg = cfg\n self.__threadMonitor = {}\n self.startTime = datetime.now()\n self.__airtags = {}\n self._signInDone = False\n self._requestCreds = 0\n self._requestAuth = 0\n self._userName = \"\"\n self._password = \"\"\n self._ndFactor = \"\"\n self._errMsg = \"\"\n self._lastLocationUpdate = 0\n\n def load(self):\n if exists(Context.statusFile):\n with open(Context.statusFile) as f:\n dta = json.load(f)\n self._lastLocationUpdate = dta['lastLocationUpdate']\n\n def save(self):\n j = {\"lastLocationUpdate\": self._lastLocationUpdate}\n with open(Context.statusFile, 'w') as f:\n print(json.dumps(j, indent=4), file=f)\n\n @property\n def log(self):\n return self.__log\n\n @property\n def cfg(self):\n return self.__cfg\n\n @property\n def airtags(self):\n return self.__airtags\n\n @property\n def threadMonitor(self):\n return self.__threadMonitor\n\n def checkThreads(self, now):\n missing = []\n for k in self.__threadMonitor.keys():\n if (now - self.__threadMonitor[k][0]).seconds > 900:\n # thread has not updated since 15 minutes\n self.__log.warn(\"[CTX] Thread for class %s has not sent an alive message for %d seconds\" %\n (k, (now - self.__threadMonitor[k][0]).seconds))\n missing.append(self.__threadMonitor[k])\n return missing\n\n def uptime(self, now):\n days = (now - self.startTime).days\n secs = (now - self.startTime).seconds\n hours = int((secs % 86400) / 3600)\n minutes = int((secs % 3600) / 60)\n seconds = int(secs % 60)\n\n up = \"\"\n if days > 0:\n up += str(days) + \" \" + (days == 1 and \"day\" or \"days\") + \", \"\n if len(up) > 0 or hours > 0:\n up += str(hours) + \" \" + (hours == 1 and \"hour\" or \"hours\") + \", \"\n if len(up) > 0 or minutes > 0:\n up += str(minutes) + \" \" + (minutes == 1 and \"minute\" or \"minutes\") + \", \"\n up += str(seconds) + \" \" + (seconds == 1 and \"second\" or \"seconds\")\n return up\n\n @property\n def requestCreds(self):\n return self._requestCreds\n\n @requestCreds.setter\n def requestCreds(self, value):\n self._requestCreds = value\n\n @property\n def requestAuth(self):\n return self._requestAuth\n\n @requestAuth.setter\n def requestAuth(self, value):\n self._requestAuth = value\n\n @property\n def signInDone(self):\n return self._signInDone\n\n @signInDone.setter\n def signInDone(self, value):\n self._signInDone = value\n\n @property\n def userName(self):\n return self._userName\n\n @userName.setter\n def userName(self, value):\n self._userName = value\n\n @property\n def password(self):\n return self._password\n\n @password.setter\n def password(self, value):\n self._password = value\n\n @property\n def ndFactor(self):\n return self._ndFactor\n\n @ndFactor.setter\n def ndFactor(self, value):\n self._ndFactor = value\n\n @property\n def errMsg(self):\n return self._errMsg\n\n @errMsg.setter\n def errMsg(self, value):\n self._errMsg = value\n\n @property\n def lastLocationUpdate(self):\n return self._lastLocationUpdate\n\n @lastLocationUpdate.setter\n def lastLocationUpdate(self, value):\n self._lastLocationUpdate = value\n self.save()"
},
{
"identifier": "Daemon",
"path": "daemon.py",
"snippet": "class Daemon:\n\n def __init__(self, pidFile, app, logFile):\n self.pidFile = pidFile\n self.logFile = logFile\n self.app = app\n\n @staticmethod\n def getTimeStamp():\n return time.strftime('%d.%m.%Y %H:%M:%S', time.localtime(time.time()))\n\n @staticmethod\n def printLogLine(file, message):\n file.write('%s %s\\n' % (Daemon.getTimeStamp(), message))\n file.flush()\n\n def startstop(self, todo, stdout=\"/dev/null\", stderr=None, stdin=\"/dev/null\"):\n try:\n pf = open(self.pidFile, 'r')\n pid = int(pf.read().strip())\n pf.close()\n except IOError:\n pid = None\n\n if 'stop' == todo or 'restart' == todo:\n if not pid:\n msg = \"[%s] Could not stop. Pidfile %s is missing\\n\" % (self.app, self.pidFile)\n Daemon.printLogLine(sys.stderr, msg)\n sys.exit(1)\n Daemon.printLogLine(sys.stdout, \"[%s] Stopping Process with PID %d\" % (self.app, pid))\n try:\n cnt = 10\n while 1:\n if cnt < 0:\n os.kill(pid, signal.SIGKILL)\n else:\n os.kill(pid, signal.SIGTERM)\n time.sleep(3)\n cnt -= 1\n except OSError as err:\n err = str(err)\n if err.find(\"No such process\") > 0:\n if \"stop\" == todo:\n if os.path.exists(self.pidFile):\n os.remove(self.pidFile)\n sys.exit(0)\n todo = \"start\"\n pid = None\n else:\n print(str(err))\n sys.exit(1)\n if 'start' == todo:\n if pid:\n msg = \"[%s] Start aborted since Pidfile %s exists\" % self.app\n Daemon.printLogLine(sys.stderr, msg % self.pidFile)\n sys.exit(1)\n Daemon.printLogLine(sys.stdout, \"[%s] Starting Process as Daemon\" % self.app)\n self.daemonize(stdout, stderr, stdin)\n if 'status' == todo:\n if pid:\n logFileStatus = os.path.exists(self.logFile)\n if logFileStatus:\n (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(self.logFile)\n logLastModified = time.ctime(mtime)\n else:\n logLastModified = \"never\"\n if psutil.pid_exists(pid):\n process = psutil.Process(pid)\n with process.oneshot():\n msg = \"[%s] Process with pid %d is running [%s], last log update [%s]\" \\\n % (self.app, pid, process.name(), logLastModified)\n self.printLogLine(sys.stdout, msg)\n sys.exit(0)\n else:\n msg = \"[%s] Process with pid %d is NOT running, but we have a PID file - maybe it crashed. Last \" \\\n \"log update [%s]\" % (self.app, pid, logLastModified)\n self.printLogLine(sys.stdout, msg)\n if os.path.exists(self.pidFile):\n os.remove(self.pidFile)\n sys.exit(3)\n else:\n msg = \"[%s] Process seems to be not running - no PIDFile (%s) found.\" % (self.app, self.pidFile)\n self.printLogLine(sys.stderr, msg)\n sys.exit(0)\n\n def daemonize(self, stdout='/dev/null', stderr=None, stdin='/dev/null'):\n if not stderr:\n stderr = stdout\n si = open(stdin, 'r')\n so = open(stdout, 'a+')\n se = open(stderr, 'a+')\n\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"[%s] fork #1 failed (%d) %s\" % (self.app, e.errno, e.strerror))\n sys.exit(1)\n\n os.umask(0)\n os.setsid()\n\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"[%s] fork #2 failed (%d) %s\" % (self.app, e.errno, e.strerror))\n sys.exit(1)\n pid = str(os.getpid())\n self.printLogLine(sys.stdout, \"[%s] Process started as Daemon with pid %s\" % (self.app, pid))\n if self.pidFile:\n open(self.pidFile, 'w+').write('%s\\n' % pid)"
}
] | import glob
import logging
import signal
import sys
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
from logging.handlers import RotatingFileHandler
from os import makedirs
from os.path import join, exists, splitext
from threading import Thread
from urllib.parse import parse_qs, urlparse
from airTag import AirTag
from api import API
from config import Config
from context import Context
from daemon import Daemon | 7,275 | global runAsDaemon
try:
_log = logging.Logger(APP)
loghdl = RotatingFileHandler(cfg.logging_logFile, 'a', cfg.logging_maxFilesize, 4)
loghdl.setFormatter(logging.Formatter(cfg.logging_msgFormat))
loghdl.setLevel(cfg.logging_logLevel)
_log.addHandler(loghdl)
if cfg.logging_stdout and not runAsDaemon:
loghdl = logging.StreamHandler(sys.stdout)
loghdl.setFormatter(logging.Formatter(cfg.logging_msgFormat))
loghdl.setLevel(cfg.logging_logLevel)
_log.addHandler(loghdl)
_log.disabled = False
return _log
except Exception as e:
print("[%s] Unable to initialize logging. Reason: %s" % (APP, e))
return None
def terminate(sigNo, _):
global doTerminate
global myServer
global httpIsRunning
if doTerminate:
return
doTerminate = True
ctx.log.info(f"[{APP}] Terminating with Signal {sigNo} {sigs[sigNo]}")
if httpIsRunning:
Thread(target=myServer.shutdown).start()
def loadAirTags():
global ctx
airTagDir = ctx.cfg.general_airTagDirectory
airTagSuffix = ctx.cfg.general_airTagSuffix
if not exists(airTagDir):
ctx.log.info(
f"[loadAirTags] Airtags Directory '{airTagDir}' does not exist, creating it. This will be used to store Airtag key information.")
makedirs(airTagDir)
tags = glob.glob(join(airTagDir, '*' + airTagSuffix))
for t in tags:
airtag = AirTag(ctx, jsonFile=t)
ctx.airtags[airtag.id] = airtag
class FindMyServer(BaseHTTPRequestHandler):
''' Extension: ContentType, Encode '''
contentTypeDct = {'.html': ["text/html", True],
'.js': ["application/javascript", True],
'.css': ["text/css", True],
'.png': ["image/png", False],
}
def do_GET(self):
if self.path.startswith('/api'):
api = API(ctx)
query_components = parse_qs(urlparse(self.path).query)
cmd = query_components["command"]
result = api.call(cmd[0], params=query_components)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(result.encode('UTF-8'))
else:
file = "/index.html" if self.path == "/" else self.path
file = join('www', file[1:])
ext = splitext(file)[1]
ct = self.contentTypeDct[ext] if ext in self.contentTypeDct.keys() else None
if exists(file) and ct is not None:
contentType = ct[0]
encode = ct[1]
self.send_response(200)
self.send_header("Content-type", contentType)
self.end_headers()
with open(file, 'r' if encode else 'rb') as f:
data = f.read()
self.wfile.write(data.encode('UTF-8') if encode else data)
else:
self.send_response(404)
self.end_headers()
if __name__ == '__main__':
doTerminate = False
initialConfig = {
"general": {
"httpHost": ['String', '0.0.0.0'],
"httpPort": ['Integer', 8008],
"httpFiles": ['String', 'www'],
"anisetteHost": ['String', 'http://192.168.2.15'],
"anisettePort": ['Integer', 6969],
"airTagDirectory": ['String', 'airtags'],
"airTagSuffix": ['String', '.json'],
"history": ["Integer", 30],
},
"logging": {
"logFile": ["String", "/tmp/findMyGUI.log"],
"maxFilesize": ["Integer", 1000000],
"msgFormat": ["String", "%(asctime)s, %(levelname)s, %(module)s {%(process)d}, %(lineno)d, %(message)s"],
"logLevel": ["Integer", 10],
"stdout": ["Boolean", True],
},
"appleId": {
"appleId": ["String", ''],
"password": ["String", ''],
"trustedDevice": ["Boolean", False],
}
}
path = join(CONFIG_DIR, CONFIG_FILE)
if not (exists(path)):
print(f"[{APP}] No config file {CONFIG_FILE} found at {CONFIG_DIR}, using defaults")
cfg = Config(path)
cfg.addScope(initialConfig)
runAsDaemon = False
if len(sys.argv) > 1:
todo = sys.argv[1]
if todo in ['start', 'stop', 'restart', 'status']:
runAsDaemon = True
pidFile = cfg.general_pidFile
logFile = cfg.logging_logFile
| """
Mark Liebrand 2024
This file is part of FindMyGUI which is released under the Apache 2.0 License
See file LICENSE or go to for full license details https://github.com/liebrandapps/FindMyGUI
"""
APP = "findMyGUI"
CONFIG_DIR = "./"
CONFIG_FILE = "findMyGUI.ini"
def setupLogger():
global runAsDaemon
try:
_log = logging.Logger(APP)
loghdl = RotatingFileHandler(cfg.logging_logFile, 'a', cfg.logging_maxFilesize, 4)
loghdl.setFormatter(logging.Formatter(cfg.logging_msgFormat))
loghdl.setLevel(cfg.logging_logLevel)
_log.addHandler(loghdl)
if cfg.logging_stdout and not runAsDaemon:
loghdl = logging.StreamHandler(sys.stdout)
loghdl.setFormatter(logging.Formatter(cfg.logging_msgFormat))
loghdl.setLevel(cfg.logging_logLevel)
_log.addHandler(loghdl)
_log.disabled = False
return _log
except Exception as e:
print("[%s] Unable to initialize logging. Reason: %s" % (APP, e))
return None
def terminate(sigNo, _):
global doTerminate
global myServer
global httpIsRunning
if doTerminate:
return
doTerminate = True
ctx.log.info(f"[{APP}] Terminating with Signal {sigNo} {sigs[sigNo]}")
if httpIsRunning:
Thread(target=myServer.shutdown).start()
def loadAirTags():
global ctx
airTagDir = ctx.cfg.general_airTagDirectory
airTagSuffix = ctx.cfg.general_airTagSuffix
if not exists(airTagDir):
ctx.log.info(
f"[loadAirTags] Airtags Directory '{airTagDir}' does not exist, creating it. This will be used to store Airtag key information.")
makedirs(airTagDir)
tags = glob.glob(join(airTagDir, '*' + airTagSuffix))
for t in tags:
airtag = AirTag(ctx, jsonFile=t)
ctx.airtags[airtag.id] = airtag
class FindMyServer(BaseHTTPRequestHandler):
''' Extension: ContentType, Encode '''
contentTypeDct = {'.html': ["text/html", True],
'.js': ["application/javascript", True],
'.css': ["text/css", True],
'.png': ["image/png", False],
}
def do_GET(self):
if self.path.startswith('/api'):
api = API(ctx)
query_components = parse_qs(urlparse(self.path).query)
cmd = query_components["command"]
result = api.call(cmd[0], params=query_components)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(result.encode('UTF-8'))
else:
file = "/index.html" if self.path == "/" else self.path
file = join('www', file[1:])
ext = splitext(file)[1]
ct = self.contentTypeDct[ext] if ext in self.contentTypeDct.keys() else None
if exists(file) and ct is not None:
contentType = ct[0]
encode = ct[1]
self.send_response(200)
self.send_header("Content-type", contentType)
self.end_headers()
with open(file, 'r' if encode else 'rb') as f:
data = f.read()
self.wfile.write(data.encode('UTF-8') if encode else data)
else:
self.send_response(404)
self.end_headers()
if __name__ == '__main__':
doTerminate = False
initialConfig = {
"general": {
"httpHost": ['String', '0.0.0.0'],
"httpPort": ['Integer', 8008],
"httpFiles": ['String', 'www'],
"anisetteHost": ['String', 'http://192.168.2.15'],
"anisettePort": ['Integer', 6969],
"airTagDirectory": ['String', 'airtags'],
"airTagSuffix": ['String', '.json'],
"history": ["Integer", 30],
},
"logging": {
"logFile": ["String", "/tmp/findMyGUI.log"],
"maxFilesize": ["Integer", 1000000],
"msgFormat": ["String", "%(asctime)s, %(levelname)s, %(module)s {%(process)d}, %(lineno)d, %(message)s"],
"logLevel": ["Integer", 10],
"stdout": ["Boolean", True],
},
"appleId": {
"appleId": ["String", ''],
"password": ["String", ''],
"trustedDevice": ["Boolean", False],
}
}
path = join(CONFIG_DIR, CONFIG_FILE)
if not (exists(path)):
print(f"[{APP}] No config file {CONFIG_FILE} found at {CONFIG_DIR}, using defaults")
cfg = Config(path)
cfg.addScope(initialConfig)
runAsDaemon = False
if len(sys.argv) > 1:
todo = sys.argv[1]
if todo in ['start', 'stop', 'restart', 'status']:
runAsDaemon = True
pidFile = cfg.general_pidFile
logFile = cfg.logging_logFile | d = Daemon(pidFile, APP, logFile) | 4 | 2023-12-16 12:39:52+00:00 | 12k |
zhcui/polar_preview | polar/lang_firsov/ulf.py | [
{
"identifier": "grad_ulf",
"path": "polar/lang_firsov/grad_ulf.py",
"snippet": "def get_grad_lf(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None,\n scf_max_cycle=50, fci=False, beta=np.inf):\ndef get_grad_lf_full(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None):\ndef get_grad_glf(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None,\n scf_max_cycle=50, fci=False, beta=np.inf):\ndef get_grad_glf_2(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None,\n scf_max_cycle=50, fci=False, beta=np.inf):\ndef get_grad_gglf(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None,\n scf_max_cycle=50, fci=False, beta=np.inf):\n G = h_ep - einsum('x, xp -> xp', w_p, lams)\n H2 = h2 * fac\n G = h_ep * fac1"
},
{
"identifier": "thermal_average",
"path": "polar/lang_firsov/thermal_average.py",
"snippet": "def get_str(nph, compact=True):\ndef count(string, num_bra=0):\ndef get_counts(nph, ph_str=None):\ndef comm(A, B):\ndef bch_h1_exact(h1, lams, order, H1_ref=None):\ndef bch_h1(h1, lams, order, H1_ref=None):\ndef trace_A1(lams):\ndef bch_h1_exp_ref(h1, lams):\ndef bch_h1_exp(h1, lams):\n def A_func(h):\ndef comm_h2(h2, B):\ndef bch_h2_exact(h2, lams, order, H2_ref=None):\ndef bch_h2(h2, lams, order, H2_ref=None):\ndef trace_A2(lams):\ndef bch_h2_exp_ref(h2, lams):\ndef bch_h2_exp(h2, lams):\n def A_func(h):\n H1 = np.array(h1, copy=True)\n H1 = np.array(h1, copy=True)\n H1 = np.array(h1, copy=True)\n I = np.eye(nao) * 0.5\n H1 = np.dot(op, h1.ravel()).reshape(nao, nao)\n H1 = _expm_multiply_simple(A_op, h1.ravel(), traceA=tr)\n H1 = H1.reshape(nao, nao)\n H2 = np.array(h2, copy=True)\n H2 = np.array(h2, copy=True)\n H2 = np.array(h2, copy=True)\n I = np.eye(nao)\n H2 = np.dot(op, h2.ravel()).reshape(nao, nao, nao, nao)\n H2 = _expm_multiply_simple(A_op, h2.ravel(), traceA=tr)\n H2 = H2.reshape(nao, nao, nao, nao)\n H1 = h1 * factor\n H1_2 = bch_h1(h1, lams, order=6, H1_ref=H1)\n H1_2 = bch_h1(h1, lams, order=10)\n H2 = h2 * factor\n H2_2 = bch_h2(h2, lams, order=10, H2_ref=H2)"
},
{
"identifier": "fc_factor",
"path": "polar/fci/fci.py",
"snippet": "def fc_factor(n, m, l):\n \"\"\"\n Get the Franck-Condon factors, <n|exp(-l(b-b+))|m>\n https://physics.stackexchange.com/questions/553225/representation-of-the-displacement-operator-in-number-basis\n \"\"\"\n lsq = l * l\n res = np.exp(lsq * (-0.5))\n if n >= m:\n res *= l ** (n-m)\n res *= np.sqrt(fac(m) / fac(n))\n res *= lg(lsq, m, n-m)\n else:\n res *= l ** (m-n)\n res *= (np.sqrt(fac(n) / fac(m)) * ((-1)**(m-n)))\n res *= lg(lsq, n, m-n)\n return res"
},
{
"identifier": "GLangFirsov",
"path": "polar/lang_firsov/lang_firsov.py",
"snippet": "class GLangFirsov(LangFirsov):\n def __init__(self, mol, h_ep, w_p, h0=None, h1=None, h2=None, ovlp=None, nelec=None,\n spin=0, params=None, uniform=False, lams_only=False, zs_only=False, aosym='s1',\n verbose=4):\n \"\"\"\n Generalized LF.\n \"\"\"\n if mol is None:\n self.mol = gto.Mole(verbose=verbose)\n self.mol.build(dump_input=False)\n else:\n self.mol = mol\n self.verbose = verbose\n self.max_memory = self.mol.max_memory\n self.stdout = self.mol.stdout\n\n if h0 is None:\n self.h0 = self.mol.energy_nuc()\n else:\n self.h0 = h0\n\n if h1 is None:\n self.h1 = hf.get_hcore(self.mol)\n else:\n self.h1 = h1\n\n if ovlp is None:\n if mol is None:\n self.ovlp = np.eye(self.h1.shape[-1])\n else:\n self.ovlp = hf.get_ovlp(self.mol)\n else:\n self.ovlp = ovlp\n\n if h2 is None:\n if mol is None:\n self.h2 = h2\n else:\n self.h2 = self.mol.intor('int2e', aosym=aosym)\n else:\n self.h2 = h2\n\n self.h_ep = h_ep\n self.w_p = w_p\n\n if nelec is None:\n self.nelec = self.mol.nelectron\n self.mol.tot_electrons = lambda *args: self.nelec\n else:\n self.nelec = nelec\n self.mol.tot_electrons = lambda *args: self.nelec\n self.mol.nelectron = nelec\n self.mol.incore_anyway = True\n\n if self.nelec == 1:\n self.spin = self.mol.spin = 1\n else:\n self.spin = self.mol.spin = spin\n self.nelec_a = (self.nelec + self.spin) // 2\n self.nelec_b = (self.nelec - self.spin) // 2\n assert self.nelec_a + self.nelec_b == self.nelec\n\n self.nmode = len(self.w_p)\n self.nao = self.h1.shape[-1]\n\n if self.h_ep.shape == (self.nmode, self.nao):\n self.lf_type = 'glf'\n elif self.h_ep.shape == (self.nmode, self.nao, self.nao):\n self.lf_type = 'glf'\n else:\n raise ValueError(\"h_ep shape %s is not supported.\"%(str(self.h_ep.shape)))\n\n self.uniform = uniform\n self.lams_only = lams_only\n self.zs_only = zs_only\n\n if params is None:\n self.params = self.get_init_params()\n else:\n self.params = params\n assert len(self.params) == self.nparam\n\n self.params_full = np.zeros(self.nparam_full)\n self.params_full[-self.nparam:] = self.params\n\n # results:\n self.chkfile = None\n self.params_opt = None\n self.params_full_opt = None\n self.e_tot = None\n self.e_hf = None\n self.e_mp1 = None\n self.e_mp2 = None\n self.e_mp3 = None\n self.e_mp4 = None\n self.mo_energy = None\n self.mo_coeff = None\n self.mo_occ = None\n\n @property\n def nlams(self):\n if self.zs_only:\n nlams = 0\n elif self.uniform:\n nlams = 1\n else:\n nlams = self.nmode * self.nao\n return nlams\n\n def pack_params(self, lams, zs):\n if self.lams_only:\n if self.uniform:\n params = np.array((lams[0, 0],))\n else:\n params = np.hstack((lams.ravel(),))\n elif self.zs_only:\n if self.uniform:\n params = np.array((zs[0],))\n else:\n params = np.hstack((zs.ravel(),))\n else:\n if self.uniform:\n params = np.array((lams[0, 0], zs[0]))\n else:\n params = np.hstack((lams.ravel(), zs.ravel()))\n return params\n\n def get_init_params(self, scale=0.1):\n h_ep = self.h_ep\n w_p = self.w_p\n if self.zs_only:\n lams = np.array([])\n elif self.uniform:\n val = (np.random.random() - 0.5) * (np.max(h_ep) / np.max(w_p) * scale)\n lams = np.zeros((self.nmode, self.nao))\n lams[range(self.nmode), range(self.nao)] = val\n else:\n lams = (np.random.random(self.nlams) - 0.5) * (np.max(h_ep) / np.max(w_p) * scale)\n lams = lams.reshape(self.nmode, self.nao)\n\n if self.lams_only:\n zs = np.array([])\n elif self.zs_only:\n zs = np.random.random(self.nzs)\n else:\n dm0 = self.get_dm0()\n fc1 = self.get_fc1(lams=lams)\n if self.h_ep.shape == (self.nmode, self.nao):\n zs = np.einsum(\"yp, pp -> y\", lams, dm0) - \\\n np.einsum(\"yp, pp, pp -> y\", h_ep, fc1, dm0) / w_p\n else:\n zs = np.einsum(\"yp, pp -> y\", lams, dm0) - \\\n np.einsum(\"ypq, pq, qp -> y\", h_ep, fc1, dm0) / w_p\n\n params = np.append(lams.ravel(), zs)\n\n if self.uniform:\n if self.lams_only or self.zs_only:\n params = params[[-1]]\n else:\n params = params[[0, self.nlams]]\n\n return params\n\n def get_fc1(self, lams=None):\n if lams is None:\n lams, zs = self.get_lams_zs(opt=True)\n diff = lib.direct_sum('xq - xp -> xpq', lams, lams)\n f00 = fc_factor(0, 0, diff)\n fc1 = np.prod(f00, axis=0)\n return fc1\n\n def make_rdm1p(self, mo_coeff=None, mo_occ=None, lams=None, zs=None):\n \"\"\"\n Phonon part of rdm1.\n rho_xy = <LF | b^{\\dag}_y b_x |LF>\n \"\"\"\n if lams is None or zs is None:\n lams, zs = self.get_lams_zs(opt=True)\n rdm1 = self.make_rdm1(mo_coeff=mo_coeff, mo_occ=mo_occ)\n nao = self.nao\n rdm1_diag = rdm1[range(nao), range(nao)]\n rho = np.einsum(\"y, x -> xy\", zs, zs)\n\n tmp = np.einsum(\"xp, p -> x\", lams, rdm1_diag)\n tmp = np.einsum(\"y, x -> xy\", zs, tmp)\n rho -= tmp\n rho -= tmp.conj().T\n\n rho += np.einsum(\"yp, xp, p -> xy\", lams, lams, rdm1_diag, optimize=True)\n tmp = np.einsum(\"p, q -> pq\", rdm1_diag, rdm1_diag)\n tmp -= 0.5 * np.einsum(\"qp, pq -> pq\", rdm1, rdm1)\n rho += np.einsum(\"yp, xp, pq -> xy\", lams, lams, tmp, optimize=True)\n return rho\n\n def make_rdm1p_linear(self, mo_coeff=None, mo_occ=None, lams=None, zs=None):\n \"\"\"\n Phonon linear part of rdm1.\n rho_x = <LF | b_x |LF>\n \"\"\"\n if lams is None or zs is None:\n lams, zs = self.get_lams_zs(opt=True)\n rdm1 = self.make_rdm1(mo_coeff=mo_coeff, mo_occ=mo_occ)\n nao = self.nao\n rdm1_diag = rdm1[range(nao), range(nao)]\n rho = zs - np.einsum(\"xp, p -> x\", lams, rdm1_diag)\n return rho\n\n get_lf_ham = get_glf_ham\n\n get_grad = grad.get_grad_glf"
},
{
"identifier": "GGLangFirsov",
"path": "polar/lang_firsov/lang_firsov.py",
"snippet": "class GGLangFirsov(GLangFirsov):\n \"\"\"\n Most generalized LF.\n lams has shape (nmode, nao, nao).\n \"\"\"\n @property\n def nlams(self):\n if self.zs_only:\n nlams = 0\n elif self.uniform:\n nlams = 1\n else:\n nlams = self.nmode * self.nao * (self.nao+1) // 2\n return nlams\n\n def pack_params(self, lams, zs):\n if self.lams_only:\n if self.uniform:\n params = np.array((lams[0, 0, 0],))\n else:\n params = np.hstack((lib.pack_tril(lams).ravel(),))\n elif self.zs_only:\n if self.uniform:\n params = np.array((zs[0],))\n else:\n params = np.hstack((zs.ravel(),))\n else:\n if self.uniform:\n params = np.array((lams[0, 0, 0], zs[0]))\n else:\n params = np.hstack((lib.pack_tril(lams).ravel(), zs.ravel()))\n return params\n\n def get_init_params(self, scale=0.1):\n h_ep = self.h_ep\n w_p = self.w_p\n if self.zs_only:\n lams = np.array([])\n elif self.uniform:\n val = (np.random.random() - 0.5) * (np.max(h_ep) / np.max(w_p) * scale)\n lams = np.zeros((self.nmode, self.nao, self.nao))\n lams[range(self.nmode), range(self.nao), range(self.nao)] = val\n else:\n lams = (np.random.random(self.nlams) - 0.5) * (np.max(h_ep) / np.max(w_p) * scale)\n lams = lib.unpack_tril(lams.reshape(self.nmode, -1))\n\n dm0 = self.get_dm0()\n if self.lams_only:\n zs = np.array([])\n elif self.zs_only:\n zs = np.random.random(self.nzs)\n else:\n #lams_diag = lams[:, range(self.nao), range(self.nao)]\n #diff = lib.direct_sum('xq - xp -> xpq', lams_diag, lams_diag)\n #f00 = fc_factor(0, 0, diff)\n #fc1 = np.prod(f00, axis=0)\n #zs = np.einsum(\"yp, pp -> y\", lams_diag, dm0) - \\\n # np.einsum(\"ypq, pq, qp -> y\", h_ep, fc1, dm0) / w_p\n fc1 = self.get_fc1(lams=lams)\n zs = np.einsum(\"ypq, qp -> y\", lams, dm0) - \\\n np.einsum(\"ypq, pq, qp -> y\", h_ep, fc1, dm0) / w_p\n\n if self.zs_only:\n params = zs\n else:\n params = np.append(lib.pack_tril(lams).ravel(), zs)\n\n if self.uniform:\n if self.lams_only or self.zs_only:\n params = params[[-1]]\n else:\n params = params[[0, self.nlams]]\n return params\n\n def get_fc1(self, lams=None):\n if lams is None:\n lams, zs = self.get_lams_zs(opt=True)\n h1 = np.eye(self.nao)\n fc1 = ta.bch_h1_exp(h1, lams)\n return fc1\n\n def make_rdm1p(self, mo_coeff=None, mo_occ=None, lams=None, zs=None):\n \"\"\"\n Phonon part of rdm1.\n rho_xy = <LF | b^{\\dag}_y b_x |LF>\n \"\"\"\n raise NotImplementedError\n\n def make_rdm1p_linear(self, mo_coeff=None, mo_occ=None, lams=None, zs=None):\n \"\"\"\n Phonon linear part of rdm1.\n rho_xy = <LF | b^{\\dag}_y b_x |LF>\n \"\"\"\n raise NotImplementedError\n\n get_lf_ham = get_gglf_ham\n\n get_grad = grad.get_grad_gglf"
}
] | from functools import partial
from scipy import linalg as la
from scipy import optimize as opt
from pyscf import gto, scf, ao2mo, lib
from pyscf.scf import hf, uhf
from pyscf.lib import logger
from polar.lang_firsov import grad_ulf as grad
from polar.lang_firsov import thermal_average as ta
from polar.fci.fci import fc_factor
from polar.lang_firsov.lang_firsov import GLangFirsov, GGLangFirsov
from pyscf.pbc.scf.addons import smearing_
from polar.lang_firsov import mp_glf
import numpy as np | 7,543 | mylf.e_hf = float(e_tot)
conv = mf.converged
mylf.mo_coeff = mf.mo_coeff = mo_coeff
mylf.mo_occ = mf.mo_occ = mo_occ
mylf.mo_energy = mf.mo_energy = mo_energy
if mp4 or mp3 or mp2:
logger.info(mylf, "LF-MP2 start, nph = %d", nph)
ovlp_g = la.block_diag(ovlp, ovlp)
# ZHC FIXME should we use h1 or H1?
hcore_g = la.block_diag(H1, H1)
#hcore_g = la.block_diag(h1, h1)
mf = mylf._scf = mf.to_ghf()
mf.get_ovlp = lambda *args: ovlp_g
mf.get_hcore = lambda *args: hcore_g
mf._eri = H2
if mp4:
e_mp1, e_mp2, e_mp3, e_mp4 = mp_glf.get_e_mp4(mylf, lams=lams, zs=zs, nph=nph)
e_tot += e_mp1
e_tot += e_mp2
e_tot += e_mp3
e_tot += e_mp4
mylf.e_mp1 = e_mp1
mylf.e_mp2 = e_mp2
mylf.e_mp3 = e_mp3
mylf.e_mp4 = e_mp4
logger.info(mylf, "e_mp1 %15.8f", e_mp1)
logger.info(mylf, "e_mp2 %15.8f", e_mp2)
logger.info(mylf, "e_mp3 %15.8f", e_mp3)
logger.info(mylf, "e_mp4 %15.8f", e_mp4)
elif mp2:
e_mp2 = mp_glf.get_e_mp2(mylf, lams=lams, zs=zs, nph=nph)
e_tot += e_mp2
mylf.e_mp2 = e_mp2
logger.info(mylf, "e_mp2 %15.8f", mylf.e_mp2)
return e_tot, rdm1
class UGLangFirsov(GLangFirsov):
@property
def nkappa(self):
nocc_a = self.nelec_a
nvir_a = self.nao - nocc_a
nk_a = nvir_a * nocc_a
nocc_b = self.nelec_b
nvir_b = self.nao - nocc_b
nk_b = nvir_b * nocc_b
nparam = nk_a + nk_b
return nparam
def unpack_params_full(self, params, uniform=None):
nocc_a = self.nelec_a
nvir_a = self.nao - nocc_a
nk_a = nvir_a * nocc_a
nocc_b = self.nelec_b
nvir_b = self.nao - nocc_b
nk_b = nvir_b * nocc_b
kappa_a = params[:nk_a]
kappa_b = params[nk_a:(nk_a+nk_b)]
lams, zs = self.unpack_params(params[(nk_a+nk_b):])
return (kappa_a, kappa_b), lams, zs
def make_rdm1(self, mo_coeff=None, mo_occ=None):
if mo_occ is None:
mo_occ = self.mo_occ
if mo_coeff is None:
mo_coeff = self.mo_coeff
dm_a = np.dot(mo_coeff[0] * mo_occ[0], mo_coeff[0].conj().T)
dm_b = np.dot(mo_coeff[1] * mo_occ[1], mo_coeff[1].conj().T)
dm = np.asarray((dm_a, dm_b))
return dm
def make_rdm1p(self, mo_coeff=None, mo_occ=None, lams=None, zs=None):
"""
Phonon part of rdm1.
rho_xy = <LF | b^{\dag}_y b_x |LF>
"""
if lams is None or zs is None:
lams, zs = self.get_lams_zs(opt=True)
rdm1 = self.make_rdm1(mo_coeff=mo_coeff, mo_occ=mo_occ)
nao = self.nao
rdm1_diag = rdm1[:, range(nao), range(nao)]
rdm1_diag_sum = np.sum(rdm1_diag, axis=0)
rho = np.einsum("y, x -> xy", zs, zs)
tmp = np.einsum("xp, p -> x", lams, rdm1_diag_sum)
tmp = np.einsum("y, x -> xy", zs, tmp)
rho -= tmp
rho -= tmp.conj().T
rho += np.einsum("yp, xp, p -> xy", lams, lams, rdm1_diag_sum, optimize=True)
tmp = np.einsum("p, q -> pq", rdm1_diag_sum, rdm1_diag_sum)
tmp -= np.einsum("sqp, spq -> pq", rdm1, rdm1)
rho += np.einsum("yp, xp, pq -> xy", lams, lams, tmp, optimize=True)
return rho
def make_rdm1p_linear(self, mo_coeff=None, mo_occ=None, lams=None, zs=None):
"""
Phonon linear part of rdm1.
rho_x = <LF | b_x |LF>
"""
if lams is None or zs is None:
lams, zs = self.get_lams_zs(opt=True)
rdm1 = self.make_rdm1(mo_coeff=mo_coeff, mo_occ=mo_occ)
nao = self.nao
rdm1_diag = rdm1[:, range(nao), range(nao)].sum(axis=0)
rho = zs - np.einsum("xp, p -> x", lams, rdm1_diag)
return rho
get_grad = grad.get_grad_glf
get_grad_full = grad.get_grad_lf_full
solve_lf_ham = solve_lf_ham
solve_lf_ham_full = solve_lf_ham_full
| #!/usr/bin/env python
"""
Unrestricted version variational Lang-Firsov.
Authors:
Zhi-Hao Cui <[email protected]>
"""
einsum = partial(np.einsum, optimize=True)
# ****************************************************************************
# Variational Lang-Firsov
# ****************************************************************************
def solve_lf_ham(mylf, params=None, nelec=None, spin=None, mp2=False, mp3=False, mp4=False,
nph=9, verbose=False, scf_newton=False, beta=np.inf, dm0=None,
scf_max_cycle=50, fci=False):
H0, H1, H2, H_ep, w_p = mylf.get_lf_ham(params=params)
ovlp = mylf.get_ovlp()
nao = mylf.nao
h1 = mylf.get_h1()
if nelec is None:
nelec = mylf.nelec
if spin is None:
spin = mylf.spin
if params is None:
params = mylf.params
lams, zs = mylf.unpack_params(params)
if H2 is not None:
mf = uhf.UHF(mylf.mol)
mf.energy_nuc = lambda *args: H0
mf.get_hcore = lambda *args: H1
mf.get_ovlp = lambda *args: ovlp
mf._eri = H2
mf.direct_scf = False
mf.max_cycle = scf_max_cycle
mf.conv_tol = mylf.conv_tol * 0.1
if scf_newton:
mf = mf.newton()
if beta < np.inf:
mf = smearing_(mf, sigma=1.0/beta, method='fermi')
e_tot = mf.kernel(dm0=dm0)
rdm1 = mf.make_rdm1()
mylf._scf = mf
mylf.mo_energy = mf.mo_energy
mylf.mo_coeff = mf.mo_coeff
mylf.mo_occ = mf.mo_occ
mylf.e_hf = float(e_tot)
conv = mf.converged
if fci:
raise NotImplementedError
else:
raise NotImplementedError
mylf.e_tot = e_tot
return e_tot, rdm1
def solve_lf_ham_full(mylf, params=None, nelec=None, mp2=False, mp3=False, mp4=False,
nph=9, verbose=False, scf_newton=False, beta=np.inf, dm0=None,
scf_max_cycle=50, mo_coeff=None, mo_occ=None, canonicalization=True):
if params is None:
params = mylf.params
(kappa_a, kappa_b), lams, zs = mylf.unpack_params_full(params)
params_p = mylf.pack_params(lams, zs)
H0, H1, H2, H_ep, w_p = mylf.get_lf_ham(params=params_p)
ovlp = mylf.get_ovlp()
nao = mylf.nao
h1 = mylf.get_h1()
if nelec is None:
nelec = mylf.nelec
if H2 is not None:
mf = uhf.UHF(mylf.mol)
mf.energy_nuc = lambda *args: H0
mf.get_hcore = lambda *args: H1
mf.get_ovlp = lambda *args: ovlp
# ZHC FIXME NOTE the transformed H2 may not have the 4-fold symmetry,
# it is only 2-fold. pqrs = rspq
#mf._eri = ao2mo.restore(4, H2, nao)
mf._eri = H2
mf.direct_scf = False
mf.max_cycle = scf_max_cycle
mf.conv_tol = mylf.conv_tol * 0.1
nmo = len(mo_occ[0])
nocc_a = mylf.nelec_a
nocc_b = mylf.nelec_b
nvir_a = nmo - nocc_a
nvir_b = nmo - nocc_b
dr_a = hf.unpack_uniq_var(kappa_a, mo_occ[0])
mo_coeff_a = np.dot(mo_coeff[0], la.expm(dr_a))
dr_b = hf.unpack_uniq_var(kappa_b, mo_occ[1])
mo_coeff_b = np.dot(mo_coeff[1], la.expm(dr_b))
mo_coeff = np.asarray([mo_coeff_a, mo_coeff_b])
rdm1 = mf.make_rdm1(mo_coeff, mo_occ)
e_tot = mf.energy_elec(dm=rdm1)[0] + mf.energy_nuc()
fock = mf.get_fock(dm=rdm1)
if canonicalization:
print("-" * 79)
mo_energy, mo_coeff = mf.canonicalize(mo_coeff, mo_occ, fock)
homo_a = lumo_a = homo_b = lumo_b = None
mo_e_occ_a = mo_energy[0][mo_occ[0] >= 0.5]
mo_e_vir_a = mo_energy[0][mo_occ[0] < 0.5]
if len(mo_e_occ_a) > 0:
homo_a = mo_e_occ_a.max()
if len(mo_e_vir_a) > 0:
lumo_a = mo_e_vir_a.min()
if homo_a is not None:
print ('HOMO (a) = %15.8g'%(homo_a))
if lumo_a is not None:
print ('LUMO (a) = %15.8g'%(lumo_a))
if homo_a is not None:
print ("gap (a) = %15.8g"%(lumo_a - homo_a))
if (lumo_a is not None) and (homo_a is not None) and (homo_a > lumo_a):
print ('WARN: HOMO (a) %s > LUMO (a) %s was found in the canonicalized orbitals.'
%(homo_a, lumo_a))
print ("mo_energy (a):\n%s"%mo_energy[0])
print("-" * 79)
mo_e_occ_b = mo_energy[1][mo_occ[1] >= 0.5]
mo_e_vir_b = mo_energy[1][mo_occ[1] < 0.5]
if len(mo_e_occ_b) > 0:
homo_b = mo_e_occ_b.max()
if len(mo_e_vir_b) > 0:
lumo_b = mo_e_vir_b.min()
if homo_b is not None:
print ('HOMO (b) = %15.8g'%(homo_b))
if lumo_b is not None:
print ('LUMO (b) = %15.8g'%(lumo_b))
if homo_b is not None:
print ("gap (b) = %15.8g"%(lumo_b - homo_b))
if (lumo_b is not None) and (homo_b is not None) and (homo_b > lumo_b):
print ('WARN: HOMO (b) %s > LUMO (b) %s was found in the canonicalized orbitals.'
%(homo_b, lumo_b))
print ("mo_energy (b):\n%s"%mo_energy[1])
grad = mf.get_grad(mo_coeff, mo_occ, fock)
grad_norm = la.norm(grad)
print("-" * 79)
print ("|g| = %15.8g" % grad_norm)
print("-" * 79)
else:
mo_energy = einsum("spm, spq, sqm -> sm", mo_coeff.conj(), fock, mo_coeff)
mylf._scf = mf
mylf.e_hf = float(e_tot)
conv = mf.converged
mylf.mo_coeff = mf.mo_coeff = mo_coeff
mylf.mo_occ = mf.mo_occ = mo_occ
mylf.mo_energy = mf.mo_energy = mo_energy
if mp4 or mp3 or mp2:
logger.info(mylf, "LF-MP2 start, nph = %d", nph)
ovlp_g = la.block_diag(ovlp, ovlp)
# ZHC FIXME should we use h1 or H1?
hcore_g = la.block_diag(H1, H1)
#hcore_g = la.block_diag(h1, h1)
mf = mylf._scf = mf.to_ghf()
mf.get_ovlp = lambda *args: ovlp_g
mf.get_hcore = lambda *args: hcore_g
mf._eri = H2
if mp4:
e_mp1, e_mp2, e_mp3, e_mp4 = mp_glf.get_e_mp4(mylf, lams=lams, zs=zs, nph=nph)
e_tot += e_mp1
e_tot += e_mp2
e_tot += e_mp3
e_tot += e_mp4
mylf.e_mp1 = e_mp1
mylf.e_mp2 = e_mp2
mylf.e_mp3 = e_mp3
mylf.e_mp4 = e_mp4
logger.info(mylf, "e_mp1 %15.8f", e_mp1)
logger.info(mylf, "e_mp2 %15.8f", e_mp2)
logger.info(mylf, "e_mp3 %15.8f", e_mp3)
logger.info(mylf, "e_mp4 %15.8f", e_mp4)
elif mp2:
e_mp2 = mp_glf.get_e_mp2(mylf, lams=lams, zs=zs, nph=nph)
e_tot += e_mp2
mylf.e_mp2 = e_mp2
logger.info(mylf, "e_mp2 %15.8f", mylf.e_mp2)
return e_tot, rdm1
class UGLangFirsov(GLangFirsov):
@property
def nkappa(self):
nocc_a = self.nelec_a
nvir_a = self.nao - nocc_a
nk_a = nvir_a * nocc_a
nocc_b = self.nelec_b
nvir_b = self.nao - nocc_b
nk_b = nvir_b * nocc_b
nparam = nk_a + nk_b
return nparam
def unpack_params_full(self, params, uniform=None):
nocc_a = self.nelec_a
nvir_a = self.nao - nocc_a
nk_a = nvir_a * nocc_a
nocc_b = self.nelec_b
nvir_b = self.nao - nocc_b
nk_b = nvir_b * nocc_b
kappa_a = params[:nk_a]
kappa_b = params[nk_a:(nk_a+nk_b)]
lams, zs = self.unpack_params(params[(nk_a+nk_b):])
return (kappa_a, kappa_b), lams, zs
def make_rdm1(self, mo_coeff=None, mo_occ=None):
if mo_occ is None:
mo_occ = self.mo_occ
if mo_coeff is None:
mo_coeff = self.mo_coeff
dm_a = np.dot(mo_coeff[0] * mo_occ[0], mo_coeff[0].conj().T)
dm_b = np.dot(mo_coeff[1] * mo_occ[1], mo_coeff[1].conj().T)
dm = np.asarray((dm_a, dm_b))
return dm
def make_rdm1p(self, mo_coeff=None, mo_occ=None, lams=None, zs=None):
"""
Phonon part of rdm1.
rho_xy = <LF | b^{\dag}_y b_x |LF>
"""
if lams is None or zs is None:
lams, zs = self.get_lams_zs(opt=True)
rdm1 = self.make_rdm1(mo_coeff=mo_coeff, mo_occ=mo_occ)
nao = self.nao
rdm1_diag = rdm1[:, range(nao), range(nao)]
rdm1_diag_sum = np.sum(rdm1_diag, axis=0)
rho = np.einsum("y, x -> xy", zs, zs)
tmp = np.einsum("xp, p -> x", lams, rdm1_diag_sum)
tmp = np.einsum("y, x -> xy", zs, tmp)
rho -= tmp
rho -= tmp.conj().T
rho += np.einsum("yp, xp, p -> xy", lams, lams, rdm1_diag_sum, optimize=True)
tmp = np.einsum("p, q -> pq", rdm1_diag_sum, rdm1_diag_sum)
tmp -= np.einsum("sqp, spq -> pq", rdm1, rdm1)
rho += np.einsum("yp, xp, pq -> xy", lams, lams, tmp, optimize=True)
return rho
def make_rdm1p_linear(self, mo_coeff=None, mo_occ=None, lams=None, zs=None):
"""
Phonon linear part of rdm1.
rho_x = <LF | b_x |LF>
"""
if lams is None or zs is None:
lams, zs = self.get_lams_zs(opt=True)
rdm1 = self.make_rdm1(mo_coeff=mo_coeff, mo_occ=mo_occ)
nao = self.nao
rdm1_diag = rdm1[:, range(nao), range(nao)].sum(axis=0)
rho = zs - np.einsum("xp, p -> x", lams, rdm1_diag)
return rho
get_grad = grad.get_grad_glf
get_grad_full = grad.get_grad_lf_full
solve_lf_ham = solve_lf_ham
solve_lf_ham_full = solve_lf_ham_full
| class UGGLangFirsov(GGLangFirsov, UGLangFirsov): | 4 | 2023-12-18 07:39:51+00:00 | 12k |
YaoFANGUK/video-subtitle-remover | backend/scenedetect/backends/opencv.py | [
{
"identifier": "FrameTimecode",
"path": "backend/scenedetect/frame_timecode.py",
"snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num"
},
{
"identifier": "MAX_FPS_DELTA",
"path": "backend/scenedetect/frame_timecode.py",
"snippet": "MAX_FPS_DELTA: float = 1.0 / 100000"
},
{
"identifier": "get_file_name",
"path": "backend/scenedetect/platform.py",
"snippet": "def get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\n \"\"\"Return the file name that `file_path` refers to, optionally removing the extension.\n\n If `include_extension` is False, the result will always be a str.\n\n E.g. /tmp/foo.bar -> foo\"\"\"\n file_name = os.path.basename(file_path)\n if not include_extension:\n file_name = str(file_name)\n last_dot_pos = file_name.rfind('.')\n if last_dot_pos >= 0:\n file_name = file_name[:last_dot_pos]\n return file_name"
},
{
"identifier": "VideoStream",
"path": "backend/scenedetect/video_stream.py",
"snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "SeekError",
"path": "backend/scenedetect/video_stream.py",
"snippet": "class SeekError(Exception):\n \"\"\"Either an unrecoverable error happened while attempting to seek, or the underlying\n stream is not seekable (additional information will be provided when possible).\n\n The stream is guaranteed to be left in a valid state, but the position may be reset.\"\"\""
},
{
"identifier": "VideoOpenFailure",
"path": "backend/scenedetect/video_stream.py",
"snippet": "class VideoOpenFailure(Exception):\n \"\"\"Raised by a backend if opening a video fails.\"\"\"\n\n # pylint: disable=useless-super-delegation\n def __init__(self, message: str = \"Unknown backend error.\"):\n \"\"\"\n Arguments:\n message: Additional context the backend can provide for the open failure.\n \"\"\"\n super().__init__(message)\n\n # pylint: enable=useless-super-delegation"
},
{
"identifier": "FrameRateUnavailable",
"path": "backend/scenedetect/video_stream.py",
"snippet": "class FrameRateUnavailable(VideoOpenFailure):\n \"\"\"Exception instance to provide consistent error messaging across backends when the video frame\n rate is unavailable or cannot be calculated. Subclass of VideoOpenFailure.\"\"\"\n\n def __init__(self):\n super().__init__('Unable to obtain video framerate! Specify `framerate` manually, or'\n ' re-encode/re-mux the video and try again.')"
}
] | from logging import getLogger
from typing import AnyStr, Tuple, Union, Optional
from numpy import ndarray
from backend.scenedetect.frame_timecode import FrameTimecode, MAX_FPS_DELTA
from backend.scenedetect.platform import get_file_name
from backend.scenedetect.video_stream import VideoStream, SeekError, VideoOpenFailure, FrameRateUnavailable
import math
import os.path
import cv2 | 7,345 |
NON_VIDEO_FILE_INPUT_IDENTIFIERS = (
IMAGE_SEQUENCE_IDENTIFIER, # image sequence
'://', # URL/network stream
' ! ', # gstreamer pipe
)
def _get_aspect_ratio(cap: cv2.VideoCapture, epsilon: float = 0.0001) -> float:
"""Display/pixel aspect ratio of the VideoCapture as a float (1.0 represents square pixels)."""
# Versions of OpenCV < 3.4.1 do not support this, so we fall back to 1.0.
if not 'CAP_PROP_SAR_NUM' in dir(cv2):
return 1.0
num: float = cap.get(cv2.CAP_PROP_SAR_NUM)
den: float = cap.get(cv2.CAP_PROP_SAR_DEN)
# If numerator or denominator are close to zero, so we fall back to 1.0.
if abs(num) < epsilon or abs(den) < epsilon:
return 1.0
return num / den
class VideoStreamCv2(VideoStream):
"""OpenCV `cv2.VideoCapture` backend."""
def __init__(
self,
path: AnyStr = None,
framerate: Optional[float] = None,
max_decode_attempts: int = 5,
path_or_device: Union[bytes, str, int] = None,
):
"""Open a video file, image sequence, or network stream.
Arguments:
path: Path to the video. Can be a file, image sequence (`'folder/DSC_%04d.jpg'`),
or network stream.
framerate: If set, overrides the detected framerate.
max_decode_attempts: Number of attempts to continue decoding the video
after a frame fails to decode. This allows processing videos that
have a few corrupted frames or metadata (in which case accuracy
of detection algorithms may be lower). Once this limit is passed,
decoding will stop and emit an error.
path_or_device: [DEPRECATED] Specify `path` for files, image sequences, or
network streams/URLs. Use `VideoCaptureAdapter` for devices/pipes.
Raises:
OSError: file could not be found or access was denied
VideoOpenFailure: video could not be opened (may be corrupted)
ValueError: specified framerate is invalid
"""
super().__init__()
# TODO(v0.7): Replace with DeprecationWarning that `path_or_device` will be removed in v0.8.
if path_or_device is not None:
logger.error('path_or_device is deprecated, use path or VideoCaptureAdapter instead.')
path = path_or_device
if path is None:
raise ValueError('Path must be specified!')
if framerate is not None and framerate < MAX_FPS_DELTA:
raise ValueError('Specified framerate (%f) is invalid!' % framerate)
if max_decode_attempts < 0:
raise ValueError('Maximum decode attempts must be >= 0!')
self._path_or_device = path
self._is_device = isinstance(self._path_or_device, int)
# Initialized in _open_capture:
self._cap: Optional[
cv2.VideoCapture] = None # Reference to underlying cv2.VideoCapture object.
self._frame_rate: Optional[float] = None
# VideoCapture state
self._has_grabbed = False
self._max_decode_attempts = max_decode_attempts
self._decode_failures = 0
self._warning_displayed = False
self._open_capture(framerate)
#
# Backend-Specific Methods/Properties
#
@property
def capture(self) -> cv2.VideoCapture:
"""Returns reference to underlying VideoCapture object. Use with caution.
Prefer to use this property only to take ownership of the underlying cv2.VideoCapture object
backing this object. Seeking or using the read/grab methods through this property are
unsupported and will leave this object in an inconsistent state.
"""
assert self._cap
return self._cap
#
# VideoStream Methods/Properties
#
BACKEND_NAME = 'opencv'
"""Unique name used to identify this backend."""
@property
def frame_rate(self) -> float:
"""Framerate in frames/sec."""
assert self._frame_rate
return self._frame_rate
@property
def path(self) -> Union[bytes, str]:
"""Video or device path."""
if self._is_device:
assert isinstance(self._path_or_device, (int))
return "Device %d" % self._path_or_device
assert isinstance(self._path_or_device, (bytes, str))
return self._path_or_device
@property
def name(self) -> str:
"""Name of the video, without extension, or device."""
if self._is_device:
return self.path
| # -*- coding: utf-8 -*-
#
# PySceneDetect: Python-Based Video Scene Detector
# -------------------------------------------------------------------
# [ Site: https://scenedetect.com ]
# [ Docs: https://scenedetect.com/docs/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
#
# Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>.
# PySceneDetect is licensed under the BSD 3-Clause License; see the
# included LICENSE file, or visit one of the above pages for details.
#
""":class:`VideoStreamCv2` is backed by the OpenCV `VideoCapture` object. This is the default
backend. Works with video files, image sequences, and network streams/URLs.
For wrapping input devices or pipes, there is also :class:`VideoCaptureAdapter` which can be
constructed from an existing `cv2.VideoCapture`. This allows performing scene detection on inputs
which do not support seeking.
"""
logger = getLogger('pyscenedetect')
IMAGE_SEQUENCE_IDENTIFIER = '%'
NON_VIDEO_FILE_INPUT_IDENTIFIERS = (
IMAGE_SEQUENCE_IDENTIFIER, # image sequence
'://', # URL/network stream
' ! ', # gstreamer pipe
)
def _get_aspect_ratio(cap: cv2.VideoCapture, epsilon: float = 0.0001) -> float:
"""Display/pixel aspect ratio of the VideoCapture as a float (1.0 represents square pixels)."""
# Versions of OpenCV < 3.4.1 do not support this, so we fall back to 1.0.
if not 'CAP_PROP_SAR_NUM' in dir(cv2):
return 1.0
num: float = cap.get(cv2.CAP_PROP_SAR_NUM)
den: float = cap.get(cv2.CAP_PROP_SAR_DEN)
# If numerator or denominator are close to zero, so we fall back to 1.0.
if abs(num) < epsilon or abs(den) < epsilon:
return 1.0
return num / den
class VideoStreamCv2(VideoStream):
"""OpenCV `cv2.VideoCapture` backend."""
def __init__(
self,
path: AnyStr = None,
framerate: Optional[float] = None,
max_decode_attempts: int = 5,
path_or_device: Union[bytes, str, int] = None,
):
"""Open a video file, image sequence, or network stream.
Arguments:
path: Path to the video. Can be a file, image sequence (`'folder/DSC_%04d.jpg'`),
or network stream.
framerate: If set, overrides the detected framerate.
max_decode_attempts: Number of attempts to continue decoding the video
after a frame fails to decode. This allows processing videos that
have a few corrupted frames or metadata (in which case accuracy
of detection algorithms may be lower). Once this limit is passed,
decoding will stop and emit an error.
path_or_device: [DEPRECATED] Specify `path` for files, image sequences, or
network streams/URLs. Use `VideoCaptureAdapter` for devices/pipes.
Raises:
OSError: file could not be found or access was denied
VideoOpenFailure: video could not be opened (may be corrupted)
ValueError: specified framerate is invalid
"""
super().__init__()
# TODO(v0.7): Replace with DeprecationWarning that `path_or_device` will be removed in v0.8.
if path_or_device is not None:
logger.error('path_or_device is deprecated, use path or VideoCaptureAdapter instead.')
path = path_or_device
if path is None:
raise ValueError('Path must be specified!')
if framerate is not None and framerate < MAX_FPS_DELTA:
raise ValueError('Specified framerate (%f) is invalid!' % framerate)
if max_decode_attempts < 0:
raise ValueError('Maximum decode attempts must be >= 0!')
self._path_or_device = path
self._is_device = isinstance(self._path_or_device, int)
# Initialized in _open_capture:
self._cap: Optional[
cv2.VideoCapture] = None # Reference to underlying cv2.VideoCapture object.
self._frame_rate: Optional[float] = None
# VideoCapture state
self._has_grabbed = False
self._max_decode_attempts = max_decode_attempts
self._decode_failures = 0
self._warning_displayed = False
self._open_capture(framerate)
#
# Backend-Specific Methods/Properties
#
@property
def capture(self) -> cv2.VideoCapture:
"""Returns reference to underlying VideoCapture object. Use with caution.
Prefer to use this property only to take ownership of the underlying cv2.VideoCapture object
backing this object. Seeking or using the read/grab methods through this property are
unsupported and will leave this object in an inconsistent state.
"""
assert self._cap
return self._cap
#
# VideoStream Methods/Properties
#
BACKEND_NAME = 'opencv'
"""Unique name used to identify this backend."""
@property
def frame_rate(self) -> float:
"""Framerate in frames/sec."""
assert self._frame_rate
return self._frame_rate
@property
def path(self) -> Union[bytes, str]:
"""Video or device path."""
if self._is_device:
assert isinstance(self._path_or_device, (int))
return "Device %d" % self._path_or_device
assert isinstance(self._path_or_device, (bytes, str))
return self._path_or_device
@property
def name(self) -> str:
"""Name of the video, without extension, or device."""
if self._is_device:
return self.path | file_name: str = get_file_name(self.path, include_extension=False) | 2 | 2023-10-25 02:50:01+00:00 | 12k |
Genesis-Embodied-AI/RoboGen | manipulation/sim.py | [
{
"identifier": "Panda",
"path": "manipulation/panda.py",
"snippet": "class Panda(Robot):\n def __init__(self, controllable_joints='right', slider=True, floating=False):\n self.slider = slider\n self.floating = floating\n if not floating:\n if not slider:\n right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints\n right_end_effector = 11 # Used to get the pose of the end effector\n right_gripper_indices = [9, 10] # Gripper actuated joints\n else:\n right_arm_joint_indices = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]\n right_end_effector = 15 # Used to get the pose of the end effector\n right_gripper_indices = [13, 14] # Gripper actuated joints\n \n else:\n right_arm_joint_indices = []\n right_end_effector = -1\n right_gripper_indices = [0, 1]\n\n super(Panda, self).__init__(controllable_joints, right_arm_joint_indices, right_end_effector, right_gripper_indices)\n\n def init(self, directory, id, np_random, fixed_base=False, use_suction=True):\n self.body = p.loadURDF(os.path.join(directory, 'franka_mobile', 'panda_suction_slider_mobile.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)\n\n for i in range(p.getNumJoints(self.body, physicsClientId=id)):\n print(p.getJointInfo(self.body, i, physicsClientId=id))\n link_name = p.getJointInfo(self.body, i, physicsClientId=id)[12].decode('utf-8')\n print(\"link_name: \", link_name)\n\n super(Panda, self).init(self.body, id, np_random)"
},
{
"identifier": "UR5",
"path": "manipulation/ur5.py",
"snippet": "class UR5(Robot):\n def __init__(self, controllable_joints='right', slider=True, floating=False):\n self.slider = slider\n self.floating = floating\n if not floating:\n if not slider:\n right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints\n right_end_effector = 11 # Used to get the pose of the end effector\n right_gripper_indices = [9, 10] # Gripper actuated joints\n else:\n right_arm_joint_indices = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]\n right_end_effector = 21 # Used to get the pose of the end effector\n right_gripper_indices = [21, 19] # Gripper actuated joints\n \n else:\n right_arm_joint_indices = []\n right_end_effector = -1\n right_gripper_indices = [0, 1]\n\n super(UR5, self).__init__(controllable_joints, right_arm_joint_indices, right_end_effector, right_gripper_indices)\n\n def init(self, directory, id, np_random, fixed_base=False, use_suction=True):\n self.body = p.loadURDF(os.path.join(directory, 'ur5', 'ur5_robotiq85_mobile.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)\n\n for i in range(p.getNumJoints(self.body, physicsClientId=id)):\n print(p.getJointInfo(self.body, i, physicsClientId=id))\n link_name = p.getJointInfo(self.body, i, physicsClientId=id)[12].decode('utf-8')\n print(\"link_name: \", link_name)\n\n all_joint_num = p.getNumJoints(self.body)\n all_joint_idx = list(range(all_joint_num))\n joint_idx = [j for j in all_joint_idx if self._is_not_fixed(j)]\n self.right_arm_joint_indices = joint_idx\n self.controllable_joint_indices = self.right_arm_joint_indices\n\n super(UR5, self).init(self.body, id, np_random)"
},
{
"identifier": "Sawyer",
"path": "manipulation/sawyer.py",
"snippet": "class Sawyer(Robot):\n def __init__(self, controllable_joints='right', slider=True, floating=False):\n self.slider = slider\n self.floating = floating\n if not floating:\n if not slider:\n right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints\n right_end_effector = 11 # Used to get the pose of the end effector\n right_gripper_indices = [9, 10] # Gripper actuated joints\n else:\n right_arm_joint_indices = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]\n right_end_effector = 26 # Used to get the pose of the end effector\n right_gripper_indices = [25, 23] # Gripper actuated joints\n \n else:\n right_arm_joint_indices = []\n right_end_effector = -1\n right_gripper_indices = [0, 1]\n\n super(Sawyer, self).__init__(controllable_joints, right_arm_joint_indices, right_end_effector, right_gripper_indices)\n\n def init(self, directory, id, np_random, fixed_base=False, use_suction=True):\n self.body = p.loadURDF(os.path.join(directory, 'sawyer', 'sawyer_mobile.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)\n\n for i in range(p.getNumJoints(self.body, physicsClientId=id)):\n print(p.getJointInfo(self.body, i, physicsClientId=id))\n link_name = p.getJointInfo(self.body, i, physicsClientId=id)[12].decode('utf-8')\n print(\"link_name: \", link_name)\n\n all_joint_num = p.getNumJoints(self.body)\n all_joint_idx = list(range(all_joint_num))\n joint_idx = [j for j in all_joint_idx if self._is_not_fixed(j)]\n self.right_arm_joint_indices = joint_idx\n self.controllable_joint_indices = self.right_arm_joint_indices\n print(\"joint_idx: \", joint_idx)\n\n super(Sawyer, self).init(self.body, id, np_random)"
},
{
"identifier": "parse_config",
"path": "manipulation/utils.py",
"snippet": "def parse_config(config, use_bard=True, obj_id=None, use_gpt_size=True, use_vhacd=True):\n urdf_paths = []\n urdf_sizes = []\n urdf_locations = []\n urdf_names = []\n urdf_types = []\n urdf_on_tables = []\n urdf_movables = []\n use_table = False\n articulated_joint_angles = {}\n spatial_relationships = []\n distractor_config_path = None\n\n for obj in config:\n print(obj)\n\n if \"use_table\" in obj.keys():\n use_table = obj['use_table']\n\n if \"set_joint_angle_object_name\" in obj.keys():\n new_obj = copy.deepcopy(obj)\n new_obj.pop('set_joint_angle_object_name')\n articulated_joint_angles[obj['set_joint_angle_object_name']] = new_obj\n\n if \"spatial_relationships\" in obj.keys():\n spatial_relationships = obj['spatial_relationships']\n\n if 'task_name' in obj.keys() or 'task_description' in obj.keys():\n continue\n\n if \"distractor_config_path\" in obj.keys():\n distractor_config_path = obj['distractor_config_path']\n\n if \"type\" not in obj.keys():\n continue\n \n if obj['type'] == 'mesh':\n if 'uid' not in obj.keys():\n continue\n if obj_id is None:\n uid = obj['uid'][np.random.randint(len(obj['uid']))]\n else:\n uid = obj['uid'][obj_id]\n \n urdf_file_path = osp.join(\"objaverse_utils/data/obj\", \"{}\".format(uid), \"material.urdf\")\n if not os.path.exists(urdf_file_path):\n down_load_single_object(name=obj['lang'], uids=[uid])\n \n new_urdf_file_path = urdf_file_path.replace(\"material.urdf\", \"material_non_vhacd.urdf\")\n new_urdf_lines = []\n with open(urdf_file_path, 'r') as f:\n urdf_lines = f.readlines()\n for line in urdf_lines:\n if 'vhacd' in line:\n new_line = line.replace(\"_vhacd\", \"\")\n new_urdf_lines.append(new_line)\n else:\n new_urdf_lines.append(line)\n with open(new_urdf_file_path, 'w') as f:\n f.writelines(new_urdf_lines)\n urdf_file_path = new_urdf_file_path\n print(\"object {} choosing uid {} urdf_path {}\".format(obj['lang'], uid, urdf_file_path))\n\n urdf_paths.append(urdf_file_path)\n urdf_types.append('mesh')\n urdf_movables.append(True) # all mesh objects are movable\n \n elif obj['type'] == 'urdf':\n try:\n category = obj['lang']\n possible_obj_path = partnet_mobility_dict[category]\n except:\n category = obj['name']\n if category == 'Computer display':\n category = 'Display'\n possible_obj_path = partnet_mobility_dict[category]\n \n if 'reward_asset_path' not in obj.keys():\n obj_path = np.random.choice(possible_obj_path)\n if category == 'Toaster':\n obj_path = str(103486)\n if category == 'Microwave':\n obj_path = str(7310)\n if category == \"Oven\":\n obj_path = str(101808)\n if category == 'Refrigerator':\n obj_path = str(10638)\n else:\n obj_path = obj['reward_asset_path']\n urdf_file_path = osp.join(\"data/dataset\", obj_path, \"mobility.urdf\")\n if use_vhacd:\n new_urdf_file_path = urdf_file_path.replace(\"mobility.urdf\", \"mobility_vhacd.urdf\")\n if not osp.exists(new_urdf_file_path):\n new_urdf_file_path = preprocess_urdf(urdf_file_path)\n urdf_paths.append(new_urdf_file_path)\n else:\n urdf_paths.append(urdf_file_path)\n\n urdf_types.append('urdf')\n urdf_movables.append(obj.get('movable', False)) # by default, urdf objects are not movable, unless specified\n\n urdf_sizes.append(obj['size'])\n urdf_locations.append(parse_center(obj['center']))\n urdf_names.append(obj['name'])\n urdf_on_tables.append(obj.get('on_table', False))\n\n return urdf_paths, urdf_sizes, urdf_locations, urdf_names, urdf_types, urdf_on_tables, use_table, \\\n articulated_joint_angles, spatial_relationships, distractor_config_path, urdf_movables"
},
{
"identifier": "load_env",
"path": "manipulation/utils.py",
"snippet": "def load_env(env, load_path=None, state=None):\n\n if load_path is not None:\n with open(load_path, 'rb') as f:\n state = pickle.load(f)\n \n ### set env to stored object position and orientation\n for obj_name, obj_id in env.urdf_ids.items():\n p.resetBasePositionAndOrientation(obj_id, state['object_base_position'][obj_name], state['object_base_orientation'][obj_name], physicsClientId=env.id)\n\n ### set env to stored object joint angles\n for obj_name, obj_id in env.urdf_ids.items():\n num_links = p.getNumJoints(obj_id, physicsClientId=env.id)\n for link_idx in range(0, num_links):\n joint_angle = state['object_joint_angle_dicts'][obj_name][link_idx]\n p.resetJointState(obj_id, link_idx, joint_angle, physicsClientId=env.id)\n\n ### recover suction\n env.activated = state['activated']\n if state['activated']:\n env.suction_obj_id = state['suction_object_id']\n env.suction_contact_link = state['suction_contact_link']\n env.suction_to_obj_pose = state['suction_to_obj_pose']\n env.create_suction_constraint(env.suction_obj_id, env.suction_contact_link, env.suction_to_obj_pose)\n\n if \"urdf_paths\" in state:\n env.urdf_paths = state[\"urdf_paths\"]\n\n if \"object_sizes\" in state:\n env.simulator_sizes = state[\"object_sizes\"]\n\n if \"robot_name\" in state:\n env.robot_name = state[\"robot_name\"]\n\n if \"table_path\" in state and env.use_table:\n env.table_path = state[\"table_path\"]\n\n return state"
},
{
"identifier": "download_and_parse_objavarse_obj_from_yaml_config",
"path": "manipulation/utils.py",
"snippet": "def download_and_parse_objavarse_obj_from_yaml_config(config_path, candidate_num=10, vhacd=True):\n\n config = None\n while config is None:\n with open(config_path, 'r') as file:\n config = yaml.safe_load(file)\n\n task_name = None\n task_description = None\n for obj in config:\n if 'task_name' in obj.keys():\n task_name = obj['task_name']\n task_description = obj['task_description']\n break\n\n for obj in config:\n if 'type' in obj.keys() and obj['type'] == 'mesh' and 'uid' not in obj.keys():\n print(\"{} trying to download object: {} {}\".format(\"=\" * 20, obj['lang'], \"=\" * 20))\n success = down_load_single_object(obj[\"lang\"], candidate_num=candidate_num, vhacd=vhacd, \n task_name=task_name, task_description=task_description)\n if not success:\n print(\"failed to find suitable object to download {} quit building this task\".format(obj[\"lang\"]))\n return False\n obj['uid'] = text_to_uid_dict[obj[\"lang\"]]\n obj['all_uid'] = text_to_uid_dict[obj[\"lang\"] + \"_all\"]\n\n with open(config_path, 'w') as f:\n yaml.dump(config, f, indent=4)\n\n return True"
},
{
"identifier": "get_joint_id_from_name",
"path": "manipulation/gpt_reward_api.py",
"snippet": "def get_joint_id_from_name(simulator, object_name, joint_name):\n object_id = simulator.urdf_ids[object_name]\n num_joints = p.getNumJoints(object_id, physicsClientId=simulator.id)\n joint_index = None\n for i in range(num_joints):\n joint_info = p.getJointInfo(object_id, i, physicsClientId=simulator.id)\n if joint_info[1].decode(\"utf-8\") == joint_name:\n joint_index = i\n break\n\n return joint_index"
},
{
"identifier": "get_link_id_from_name",
"path": "manipulation/gpt_reward_api.py",
"snippet": "def get_link_id_from_name(simulator, object_name, link_name):\n object_id = simulator.urdf_ids[object_name]\n num_joints = p.getNumJoints(object_id, physicsClientId=simulator.id)\n joint_index = None\n for i in range(num_joints):\n joint_info = p.getJointInfo(object_id, i, physicsClientId=simulator.id)\n if joint_info[12].decode(\"utf-8\") == link_name:\n joint_index = i\n break\n\n return joint_index"
}
] | import numpy as np
import pybullet as p
import gym
import pickle
import yaml
import os.path as osp
from gym.utils import seeding
from gym import spaces
from collections import defaultdict
from scipy.spatial.transform import Rotation as R
from manipulation.panda import Panda
from manipulation.ur5 import UR5
from manipulation.sawyer import Sawyer
from manipulation.utils import parse_config, load_env, download_and_parse_objavarse_obj_from_yaml_config
from manipulation.gpt_reward_api import get_joint_id_from_name, get_link_id_from_name
from manipulation.table_utils import table_paths, table_scales, table_poses, table_bbox_scale_down_factors | 7,620 | joint_name = p.getJointInfo(obj_id, joint_idx, physicsClientId=self.id)[1].decode("utf-8")
joint_angle = p.getJointState(obj_id, joint_idx, physicsClientId=self.id)[0]
self.initial_joint_angle[name][joint_name] = joint_angle
self.initial_pos = {}
self.initial_orient = {}
for name in self.urdf_ids:
obj_id = self.urdf_ids[name.lower()]
if name == 'robot' or name == 'plane' or name == "init_table": continue
pos, orient = p.getBasePositionAndOrientation(obj_id, physicsClientId=self.id)
self.initial_pos[name] = pos
self.initial_orient[name] = orient
def set_to_default_joint_angles(self):
for obj_name in self.urdf_ids:
if obj_name == 'robot' or obj_name == 'plane' or obj_name == "init_table": continue
obj_id = self.urdf_ids[obj_name]
num_joints = p.getNumJoints(obj_id, physicsClientId=self.id)
for joint_idx in range(num_joints):
joint_limit_low, joint_limit_high = p.getJointInfo(obj_id, joint_idx, physicsClientId=self.id)[8:10]
if joint_limit_low > joint_limit_high:
joint_limit_low, joint_limit_high = joint_limit_high, joint_limit_low
joint_val = joint_limit_low + 0.06 * (joint_limit_high - joint_limit_low)
p.resetJointState(obj_id, joint_idx, joint_val, physicsClientId=self.id)
def handle_gpt_special_relationships(self, spatial_relationships):
# we support "on" and "in" for now, but this can be extended to more relationships
for spatial_relationship in spatial_relationships:
words = spatial_relationship.lower().split(",")
words = [word.strip().lstrip() for word in words]
if words[0] == "on":
obj_a = words[1]
obj_b = words[2]
if len(words) == 4:
obj_b_link = words[3]
obj_b_link_id = get_link_id_from_name(self, obj_b, obj_b_link)
else:
obj_b_link_id = -1
obj_a_id, obj_b_id = self.urdf_ids[obj_a], self.urdf_ids[obj_b]
obj_a_bbox_min, obj_a_bbox_max = self.get_aabb(obj_a_id)
obj_a_size = obj_a_bbox_max - obj_a_bbox_min
target_aabb_min, target_aabb_max = self.get_aabb_link(obj_b_id, obj_b_link_id)
id_line = p.addUserDebugLine(target_aabb_min, target_aabb_max, [1, 0, 0], lineWidth=10, lifeTime=0, physicsClientId=self.id)
id_point = p.addUserDebugPoints([(target_aabb_min + target_aabb_max) / 2], [[0, 0, 1]], 10, 0, physicsClientId=self.id)
new_pos = (target_aabb_min + target_aabb_max) / 2
new_pos[2] = target_aabb_max[2] # put obj a on top of obj b.
new_pos[2] += obj_a_size[2] # add the height of obj a
if not self.randomize:
obj_a_orientation = p.getQuaternionFromEuler([np.pi/2, 0, 0], physicsClientId=self.id)
else:
random_orientations = [0, np.pi / 2, np.pi, np.pi * 3 / 2]
obj_a_orientation = p.getQuaternionFromEuler([np.pi/2, 0, random_orientations[np.random.randint(4)]], physicsClientId=self.id)
p.resetBasePositionAndOrientation(obj_a_id, new_pos, obj_a_orientation, physicsClientId=self.id)
p.removeUserDebugItem(id_line, physicsClientId=self.id)
p.removeUserDebugItem(id_point, physicsClientId=self.id)
if words[0] == 'in':
obj_a = words[1]
obj_b = words[2]
if len(words) == 4:
obj_b_link = words[3]
obj_b_link_id = get_link_id_from_name(self, obj_b, obj_b_link)
else:
obj_b_link_id = -1
obj_a_id, obj_b_id = self.urdf_ids[obj_a], self.urdf_ids[obj_b]
# if after a lot of trying times, there is still collision, we should scale down the size of object A.
cnt = 1
collision_free = False
obj_a_new_size = self.simulator_sizes[obj_a]
obj_a_ori_pos, obj_a_orientation = p.getBasePositionAndOrientation(obj_a_id, physicsClientId=self.id)
target_aabb_min, target_aabb_max = self.get_aabb_link(obj_b_id, obj_b_link_id)
while not collision_free:
if cnt % 100 == 0:
print("scaling down! object size is {}".format(obj_a_new_size))
obj_a_new_size = obj_a_new_size * 0.9
p.removeBody(obj_a_id, physicsClientId=self.id)
obj_a_id = p.loadURDF(self.urdf_paths[obj_a],
basePosition=obj_a_ori_pos,
baseOrientation=obj_a_orientation,
physicsClientId=self.id, useFixedBase=False, globalScaling=obj_a_new_size)
self.urdf_ids[obj_a] = obj_a_id
self.simulator_sizes[obj_a] = obj_a_new_size
obj_a_bbox_min, obj_a_bbox_max = self.get_aabb(obj_a_id)
obj_a_size = obj_a_bbox_max - obj_a_bbox_min
id_line = p.addUserDebugLine(target_aabb_min, target_aabb_max, [1, 0, 0], lineWidth=10, lifeTime=0, physicsClientId=self.id)
id_point = p.addUserDebugPoints([(target_aabb_min + target_aabb_max) / 2], [[0, 0, 1]], 10, 0, physicsClientId=self.id)
center_pos = (target_aabb_min + target_aabb_max) / 2
up_pos = center_pos.copy()
up_pos[2] += obj_a_size[2]
possible_locations = [center_pos, up_pos]
obj_a_orientation = p.getQuaternionFromEuler([np.pi/2, 0, 0], physicsClientId=self.id)
for pos in possible_locations: # we try two possible locations to put obj a in obj b
p.resetBasePositionAndOrientation(obj_a_id, pos, obj_a_orientation, physicsClientId=self.id)
contact_points = p.getClosestPoints(obj_a_id, obj_b_id, 0.002, physicsClientId=self.id)
if len(contact_points) == 0:
collision_free = True
break
p.removeUserDebugItem(id_line, physicsClientId=self.id)
p.removeUserDebugItem(id_point, physicsClientId=self.id)
cnt += 1
if cnt > 1000: # if after scaling for 10 times it still does not work, let it be.
break
def handle_gpt_joint_angle(self, articulated_init_joint_angles):
for name in articulated_init_joint_angles:
obj_id = self.urdf_ids[name.lower()]
for joint_name, joint_angle in articulated_init_joint_angles[name].items():
|
class SimpleEnv(gym.Env):
def __init__(self,
dt=0.01,
config_path=None,
gui=False,
frameskip=2,
horizon=120,
restore_state_file=None,
rotation_mode='delta-axis-angle-local',
translation_mode='delta-translation',
max_rotation=np.deg2rad(5),
max_translation=0.15,
use_suction=True, # whether to use a suction gripper
object_candidate_num=6, # how many candidate objects to sample from objaverse
vhacd=False, # if to perform vhacd on the object for better collision detection for pybullet
randomize=0, # if to randomize the scene
obj_id=0, # which object to choose to use from the candidates
):
super().__init__()
# Task
self.config_path = config_path
self.restore_state_file = restore_state_file
self.frameskip = frameskip
self.horizon = horizon
self.gui = gui
self.object_candidate_num = object_candidate_num
self.solution_path = None
self.success = False # not really used, keeped for now
self.primitive_save_path = None # to be used for saving the primitives execution results
self.randomize = randomize
self.obj_id = obj_id # which object to choose to use from the candidates
# physics
self.gravity = -9.81
self.contact_constraint = None
self.vhacd = vhacd
# action space
self.use_suction = use_suction
self.rotation_mode = rotation_mode
self.translation_mode = translation_mode
self.max_rotation_angle = max_rotation
self.max_translation = max_translation
self.suction_to_obj_pose = 0
self.suction_contact_link = None
self.suction_obj_id = None
self.activated = 0
if self.gui:
try:
self.id = p.connect(p.GUI)
except:
self.id = p.connect(p.DIRECT)
else:
self.id = p.connect(p.DIRECT)
self.asset_dir = osp.join(osp.dirname(osp.realpath(__file__)), "assets/")
hz=int(1/dt)
p.setTimeStep(1.0 / hz, physicsClientId=self.id)
self.seed()
self.set_scene()
self.setup_camera_rpy()
self.scene_lower, self.scene_upper = self.get_scene_bounds()
self.scene_center = (self.scene_lower + self.scene_upper) / 2
self.scene_range = (self.scene_upper - self.scene_lower) / 2
self.grasp_action_mag = 0.06 if not self.use_suction else 1
self.action_low = np.array([-1, -1, -1, -1, -1, -1, -1])
self.action_high = np.array([1, 1, 1, 1, 1, 1, self.grasp_action_mag])
self.action_space = spaces.Box(low=self.action_low, high=self.action_high, dtype=np.float32)
self.base_action_space = spaces.Box(low=self.action_low, high=self.action_high, dtype=np.float32)
self.num_objects = len(self.urdf_ids) - 2 # exclude plane, robot
distractor_object_num = np.sum(list(self.is_distractor.values()))
self.num_objects -= distractor_object_num
### For RL policy learning, observation space includes:
# 1. object positions and orientations (6 * num_objects)
# 2. object min and max bounding box (6 * num_objects)
# 3. articulated object joint angles (num_objects * num_joints)
# 4. articulated object link position and orientation (num_objects * num_joints * 6)
# 5. robot base position (xy)
# 6. robot end-effector position and orientation (6)
# 7. gripper suction activated/deactivate or gripper joint angle (if not using suction gripper) (1)
num_obs = self.num_objects * 12 # obs 1 and 2
for name in self.urdf_types:
if self.urdf_types[name] == 'urdf' and not self.is_distractor[name]: # obs 3 and 4
num_joints = p.getNumJoints(self.urdf_ids[name], physicsClientId=self.id)
num_obs += num_joints
num_obs += 6 * num_joints
num_obs += 2 + 6 + 1 # obs 5 6 7
self.base_num_obs = num_obs
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(num_obs, ), dtype=np.float32)
self.base_observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(self.base_num_obs, ), dtype=np.float32)
self.detected_position = {} # not used for now, keep it
def normalize_position(self, pos):
if self.translation_mode == 'normalized-direct-translation':
return (pos - self.scene_center) / self.scene_range
else:
return pos
def seed(self, seed=None):
self.np_random, _ = seeding.np_random()
def get_aabb(self, id):
num_joints = p.getNumJoints(id, physicsClientId=self.id)
min_aabbs, max_aabbs = [], []
for link_idx in range(-1, num_joints):
min_aabb, max_aabb = p.getAABB(id, link_idx, physicsClientId=self.id)
min_aabbs.append(list(min_aabb))
max_aabbs.append(list(max_aabb))
min_aabb = np.min(np.concatenate(min_aabbs, axis=0).reshape(-1, 3), axis=0)
max_aabb = np.max(np.concatenate(max_aabbs, axis=0).reshape(-1, 3), axis=0)
return min_aabb, max_aabb
def get_aabb_link(self, id, link_id):
min_aabb, max_aabb = p.getAABB(id, link_id, physicsClientId=self.id)
return np.array(min_aabb), np.array(max_aabb)
def get_scene_bounds(self):
min_aabbs = []
max_aabbs = []
for name, id in self.urdf_ids.items():
if name == 'plane': continue
min_aabb, max_aabb = self.get_aabb(id)
min_aabbs.append(min_aabb)
max_aabbs.append(max_aabb)
min_aabb = np.min(np.stack(min_aabbs, axis=0).reshape(-1, 3), axis=0)
max_aabb = np.max(np.stack(max_aabbs, axis=0).reshape(-1, 3), axis=0)
range = max_aabb - min_aabb
return min_aabb - 0.5 * range, max_aabb + 0.5 * range
def clip_within_workspace(self, robot_pos, ori_pos, on_table):
pos = ori_pos.copy()
if not on_table:
# If objects are too close to the robot, push them away
x_near_low, x_near_high = robot_pos[0] - 0.3, robot_pos[0] + 0.3
y_near_low, y_near_high = robot_pos[1] - 0.3, robot_pos[1] + 0.3
if pos[0] > x_near_low and pos[0] < x_near_high:
pos[0] = x_near_low if pos[0] < robot_pos[0] else x_near_high
if pos[1] > y_near_low and pos[1] < y_near_high:
pos[1] = y_near_low if pos[1] < robot_pos[1] else y_near_high
return pos
else:
# Object is on table, should be within table's bounding box
new_pos = pos.copy()
new_pos[:2] = np.clip(new_pos[:2], self.table_bbox_min[:2], self.table_bbox_max[:2])
return new_pos
def get_robot_base_pos(self):
robot_base_pos = [1, 1, 0.28]
return robot_base_pos
def get_robot_init_joint_angles(self):
init_joint_angles = [0 for _ in range(len(self.robot.right_arm_joint_indices))]
if self.robot_name == 'panda':
init_joint_angles = [0, -1.10916842e-04, 7.33823451e-05, -5.47701370e-01, -5.94950533e-01,
2.62857916e+00, -4.85316284e-01, 1.96042022e+00, 2.15271531e+00,
-7.35304443e-01]
return init_joint_angles
def set_scene(
self,
):
### simulation preparation
p.resetSimulation(physicsClientId=self.id)
if self.gui:
p.resetDebugVisualizerCamera(cameraDistance=1.75, cameraYaw=-25, cameraPitch=-45, cameraTargetPosition=[-0.2, 0, 0.4], physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0, physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0, physicsClientId=self.id)
p.setRealTimeSimulation(0, physicsClientId=self.id)
p.setGravity(0, 0, self.gravity, physicsClientId=self.id)
### load restore state
restore_state = None
if self.restore_state_file is not None:
with open(self.restore_state_file, 'rb') as f:
restore_state = pickle.load(f)
### load plane
planeId = p.loadURDF(osp.join(self.asset_dir, "plane", "plane.urdf"), physicsClientId=self.id)
### create and load a robot
robot_base_pos = self.load_robot(restore_state)
### load and parse task config (including semantically meaningful distractor objects)
self.urdf_ids = {
"robot": self.robot.body,
"plane": planeId,
}
self.urdf_paths = {}
self.urdf_types = {}
self.init_positions = {}
self.on_tables = {}
self.simulator_sizes = {}
self.is_distractor = {
"robot": 0,
"plane": 0,
}
urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables, \
use_table, articulated_init_joint_angles, spatial_relationships = self.load_and_parse_config(restore_state)
### handle the case if there is a table
self.load_table(use_table, restore_state)
### load each object from the task config
self.load_object(urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables)
### adjusting object positions
### place the lowest point on the object to be the height where GPT specifies
object_height = self.adjust_object_positions(robot_base_pos)
### resolve collisions between objects
self.resolve_collision(robot_base_pos, object_height, spatial_relationships)
### handle any special relationships outputted by GPT
self.handle_gpt_special_relationships(spatial_relationships)
### set all object's joint angles to the lower joint limit
self.set_to_default_joint_angles()
### overwrite joint angles specified by GPT
self.handle_gpt_joint_angle(articulated_init_joint_angles)
### record initial joint angles and positions
self.record_initial_joint_and_pose()
### stabilize the scene
for _ in range(500):
p.stepSimulation(physicsClientId=self.id)
### restore to a state if provided
if self.restore_state_file is not None:
load_env(self, self.restore_state_file)
### Enable debug rendering
if self.gui:
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)
self.init_state = p.saveState(physicsClientId=self.id)
def load_robot(self, restore_state):
robot_classes = {
"panda": Panda,
"sawyer": Sawyer,
"ur5": UR5,
}
robot_names = list(robot_classes.keys())
self.robot_name = robot_names[np.random.randint(len(robot_names))]
if restore_state is not None and "robot_name" in restore_state:
self.robot_name = restore_state['robot_name']
self.robot_class = robot_classes[self.robot_name]
# Create robot
self.robot = self.robot_class()
self.robot.init(self.asset_dir, self.id, self.np_random, fixed_base=True, use_suction=self.use_suction)
self.agents = [self.robot]
self.suction_id = self.robot.right_gripper_indices[0]
# Update robot motor gains
self.robot.motor_gains = 0.05
self.robot.motor_forces = 100.0
# Set robot base position & orientation, and joint angles
robot_base_pos = self.get_robot_base_pos()
robot_base_orient = [0, 0, 0, 1]
self.robot_base_orient = robot_base_orient
self.robot.set_base_pos_orient(robot_base_pos, robot_base_orient)
init_joint_angles = self.get_robot_init_joint_angles()
self.robot.set_joint_angles(self.robot.right_arm_joint_indices, init_joint_angles)
return robot_base_pos
def load_and_parse_config(self, restore_state):
### select and download objects from objaverse
res = download_and_parse_objavarse_obj_from_yaml_config(self.config_path, candidate_num=self.object_candidate_num, vhacd=self.vhacd)
if not res:
print("=" * 20)
print("some objects cannot be found in objaverse, task_build failed, now exit ...")
print("=" * 20)
exit()
self.config = None
while self.config is None:
with open(self.config_path, 'r') as file:
self.config = yaml.safe_load(file)
for obj in self.config:
if "solution_path" in obj:
self.solution_path = obj["solution_path"]
break
### parse config
urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, use_table, \
articulated_init_joint_angles, spatial_relationships, distractor_config_path, urdf_movables = parse_config(self.config,
use_bard=True, obj_id=self.obj_id)
if not use_table:
urdf_on_table = [False for _ in urdf_on_table]
urdf_names = [x.lower() for x in urdf_names]
for name in urdf_names:
self.is_distractor[name] = 0
### parse distractor object config (semantically meaningful objects that are related but not used for the task)
if distractor_config_path is not None:
self.distractor_config_path = distractor_config_path
res = download_and_parse_objavarse_obj_from_yaml_config(distractor_config_path, candidate_num=self.object_candidate_num, vhacd=self.vhacd)
with open(distractor_config_path, 'r') as f:
self.distractor_config = yaml.safe_load(f)
distractor_urdf_paths, distractor_urdf_sizes, distractor_urdf_positions, distractor_urdf_names, distractor_urdf_types, \
distractor_urdf_on_table, _, _, _, _, _ = \
parse_config(self.distractor_config, use_bard=True, obj_id=self.obj_id, use_vhacd=False)
distractor_urdf_names = [x.lower() for x in distractor_urdf_names]
if not use_table:
distractor_urdf_on_table = [False for _ in distractor_urdf_on_table]
for name in distractor_urdf_names:
self.is_distractor[name] = 1
distractor_movables = [True for _ in distractor_urdf_names]
urdf_paths += distractor_urdf_paths
urdf_sizes += distractor_urdf_sizes
urdf_positions += distractor_urdf_positions
urdf_names += distractor_urdf_names
urdf_types += distractor_urdf_types
urdf_on_table += distractor_urdf_on_table
urdf_movables += distractor_movables
if restore_state is not None:
if "urdf_paths" in restore_state:
self.urdf_paths = restore_state['urdf_paths']
urdf_paths = [self.urdf_paths[name] for name in urdf_names]
if "object_sizes" in restore_state:
self.simulator_sizes = restore_state['object_sizes']
urdf_sizes = [self.simulator_sizes[name] for name in urdf_names]
return urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables, \
use_table, articulated_init_joint_angles, spatial_relationships
def load_table(self, use_table, restore_state):
self.use_table = use_table
if use_table:
self.table_path = table_paths[np.random.randint(len(table_paths))]
if restore_state is not None:
self.table_path = restore_state['table_path']
table_scale = table_scales[self.table_path]
table_pos = table_poses[self.table_path]
table_orientation = [np.pi/2, 0, 0]
self.table = p.loadURDF(osp.join(self.asset_dir, self.table_path, "material.urdf"), physicsClientId=self.id, useFixedBase=True,
globalScaling=table_scale)
if not self.randomize:
random_orientation = p.getQuaternionFromEuler(table_orientation, physicsClientId=self.id)
else:
random_orientations = [0, np.pi / 2, np.pi, np.pi * 3 / 2]
random_orientation = p.getQuaternionFromEuler([np.pi/2, 0, random_orientations[np.random.randint(4)]], physicsClientId=self.id)
p.resetBasePositionAndOrientation(self.table, table_pos, random_orientation, physicsClientId=self.id)
self.table_bbox_min, self.table_bbox_max = self.get_aabb(self.table)
table_range = self.table_bbox_max - self.table_bbox_min
self.table_bbox_min[:2] += table_range[:2] * table_bbox_scale_down_factors[self.table_path]
self.table_bbox_max[:2] -= table_range[:2] * table_bbox_scale_down_factors[self.table_path]
self.table_height = self.table_bbox_max[2]
p.addUserDebugLine([*self.table_bbox_min[:2], self.table_height], self.table_bbox_max, [1, 0, 0], lineWidth=10, lifeTime=0, physicsClientId=self.id)
self.simulator_sizes["init_table"] = table_scale
self.urdf_ids["init_table"] = self.table
self.is_distractor['init_table'] = 0
def load_object(self, urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables):
for path, size, pos, name, type, on_table, moveable in zip(urdf_paths, urdf_sizes, urdf_positions, urdf_names, urdf_types, urdf_on_table, urdf_movables):
name = name.lower()
# by default, all objects movable, except the urdf files
use_fixed_base = (type == 'urdf' and not self.is_distractor[name])
if type == 'urdf' and moveable: # if gpt specified the object is movable, then it is movable
use_fixed_base = False
size = min(size, 1.2)
size = max(size, 0.1) # if the object is too small, current gripper cannot really manipulate it.
x_orient = np.pi/2 if type == 'mesh' else 0 # handle different coordinate axis by objaverse and partnet-mobility
if self.randomize or self.is_distractor[name]:
orientation = p.getQuaternionFromEuler([x_orient, 0, self.np_random.uniform(-np.pi/3, np.pi/3)], physicsClientId=self.id)
else:
orientation = p.getQuaternionFromEuler([x_orient, 0, 0], physicsClientId=self.id)
if not on_table:
load_pos = pos
else: # change to be table coordinate
table_xy_range = self.table_bbox_max[:2] - self.table_bbox_min[:2]
obj_x = self.table_bbox_min[0] + pos[0] * table_xy_range[0]
obj_y = self.table_bbox_min[1] + pos[1] * table_xy_range[1]
obj_z = self.table_height
load_pos = [obj_x, obj_y, obj_z]
id = p.loadURDF(path, basePosition=load_pos, baseOrientation=orientation, physicsClientId=self.id, useFixedBase=use_fixed_base, globalScaling=size)
# scale size
if name in self.simulator_sizes:
p.removeBody(id, physicsClientId=self.id)
saved_size = self.simulator_sizes[name]
id = p.loadURDF(path, basePosition=load_pos, baseOrientation=orientation, physicsClientId=self.id, useFixedBase=use_fixed_base, globalScaling=saved_size)
else:
min_aabb, max_aabb = self.get_aabb(id)
actual_size = np.linalg.norm(max_aabb - min_aabb)
if np.abs(actual_size - size) > 0.05:
p.removeBody(id, physicsClientId=self.id)
id = p.loadURDF(path, basePosition=load_pos, baseOrientation=orientation, physicsClientId=self.id, useFixedBase=use_fixed_base, globalScaling=size ** 2 / actual_size)
self.simulator_sizes[name] = size ** 2 / actual_size
else:
self.simulator_sizes[name] = size
self.urdf_ids[name] = id
self.urdf_paths[name] = path
self.urdf_types[name] = type
self.init_positions[name] = np.array(load_pos)
self.on_tables[name] = on_table
print("Finished loading object: ", name)
def adjust_object_positions(self, robot_base_pos):
object_height = {}
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table': continue
min_aabb, max_aabb = self.get_aabb(id)
min_z = min_aabb[2]
object_height[id] = 2 * self.init_positions[name][2] - min_z
pos, orient = p.getBasePositionAndOrientation(id, physicsClientId=self.id)
new_pos = np.array(pos)
new_pos = self.clip_within_workspace(robot_base_pos, new_pos, self.on_tables[name])
new_pos[2] = object_height[id]
p.resetBasePositionAndOrientation(id, new_pos, orient, physicsClientId=self.id)
self.init_positions[name] = new_pos
return object_height
def resolve_collision(self, robot_base_pos, object_height, spatial_relationships):
collision = True
collision_cnt = 1
while collision:
if collision_cnt % 50 == 0: # if collision is not resolved every 50 iterations, we randomly reset object's position
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == "init_table": continue
pos = self.init_positions[name]
_, orient = p.getBasePositionAndOrientation(id, physicsClientId=self.id)
new_pos = np.array(pos) + np.random.uniform(-0.2, 0.2, size=3)
new_pos = self.clip_within_workspace(robot_base_pos, new_pos, self.on_tables[name])
new_pos[2] = object_height[id]
p.resetBasePositionAndOrientation(id, new_pos, orient, physicsClientId=self.id)
p.stepSimulation(physicsClientId=self.id)
push_directions = defaultdict(list) # store the push direction for each object
# detect collisions between objects
detected_collision = False
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table': continue
for name2, id2 in self.urdf_ids.items():
if name == name2 or name2 == 'robot' or name2 == 'plane' or name2 == 'init_table': continue
# if gpt specifies obj a and obj b should have some special relationship, then skip collision resolution
skip = False
for spatial_relationship in spatial_relationships:
words = spatial_relationship.lower().split(",")
words = [word.strip().lstrip() for word in words]
if name in words and name2 in words:
skip = True
break
if skip: continue
contact_points = p.getClosestPoints(id, id2, 0.01, physicsClientId=self.id)
if len(contact_points) > 0:
contact_point = contact_points[0]
push_direction = contact_point[7]
push_direction = np.array([push_direction[0], push_direction[1], push_direction[2]])
# both are distractors or both are not, push both objects away
if (self.is_distractor[name] and self.is_distractor[name2]) or \
(not self.is_distractor[name] and not self.is_distractor[name2]):
push_directions[id].append(-push_direction)
push_directions[id2].append(push_direction)
# only 1 is distractor, only pushes the distractor
if self.is_distractor[name] and not self.is_distractor[name2]:
push_directions[id].append(push_direction)
if not self.is_distractor[name] and self.is_distractor[name2]:
push_directions[id2].append(-push_direction)
detected_collision = True
# collisions between robot and objects, only push object away
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table':
continue
contact_points = p.getClosestPoints(self.robot.body, id, 0.05, physicsClientId=self.id)
if len(contact_points) > 0:
contact_point = contact_points[0]
push_direction = contact_point[7]
push_direction = np.array([push_direction[0], push_direction[1], push_direction[2]])
push_directions[id].append(-push_direction)
detected_collision = True
# between table and objects that should not be placed on table
if self.use_table:
for name, id in self.urdf_ids.items():
if name == 'robot' or name == 'plane' or name == 'init_table':
continue
if self.on_tables[name]:
continue
contact_points = p.getClosestPoints(self.robot.body, id, 0.05, physicsClientId=self.id)
if len(contact_points) > 0:
contact_point = contact_points[0]
push_direction = contact_point[7]
push_direction = np.array([push_direction[0], push_direction[1], push_direction[2]])
push_directions[id].append(-push_direction)
detected_collision = True
# move objects
push_distance = 0.1
for id in push_directions:
for direction in push_directions[id]:
pos, orient = p.getBasePositionAndOrientation(id, physicsClientId=self.id)
new_pos = np.array(pos) + push_distance * direction
new_pos = self.clip_within_workspace(robot_base_pos, new_pos, self.on_tables[name])
new_pos[2] = object_height[id]
p.resetBasePositionAndOrientation(id, new_pos, orient, physicsClientId=self.id)
p.stepSimulation(physicsClientId=self.id)
collision = detected_collision
collision_cnt += 1
if collision_cnt > 1000:
break
def record_initial_joint_and_pose(self):
self.initial_joint_angle = {}
for name in self.urdf_ids:
obj_id = self.urdf_ids[name.lower()]
if name == 'robot' or name == 'plane' or name == "init_table": continue
if self.urdf_types[name.lower()] == 'urdf':
self.initial_joint_angle[name] = {}
num_joints = p.getNumJoints(obj_id, physicsClientId=self.id)
for joint_idx in range(num_joints):
joint_name = p.getJointInfo(obj_id, joint_idx, physicsClientId=self.id)[1].decode("utf-8")
joint_angle = p.getJointState(obj_id, joint_idx, physicsClientId=self.id)[0]
self.initial_joint_angle[name][joint_name] = joint_angle
self.initial_pos = {}
self.initial_orient = {}
for name in self.urdf_ids:
obj_id = self.urdf_ids[name.lower()]
if name == 'robot' or name == 'plane' or name == "init_table": continue
pos, orient = p.getBasePositionAndOrientation(obj_id, physicsClientId=self.id)
self.initial_pos[name] = pos
self.initial_orient[name] = orient
def set_to_default_joint_angles(self):
for obj_name in self.urdf_ids:
if obj_name == 'robot' or obj_name == 'plane' or obj_name == "init_table": continue
obj_id = self.urdf_ids[obj_name]
num_joints = p.getNumJoints(obj_id, physicsClientId=self.id)
for joint_idx in range(num_joints):
joint_limit_low, joint_limit_high = p.getJointInfo(obj_id, joint_idx, physicsClientId=self.id)[8:10]
if joint_limit_low > joint_limit_high:
joint_limit_low, joint_limit_high = joint_limit_high, joint_limit_low
joint_val = joint_limit_low + 0.06 * (joint_limit_high - joint_limit_low)
p.resetJointState(obj_id, joint_idx, joint_val, physicsClientId=self.id)
def handle_gpt_special_relationships(self, spatial_relationships):
# we support "on" and "in" for now, but this can be extended to more relationships
for spatial_relationship in spatial_relationships:
words = spatial_relationship.lower().split(",")
words = [word.strip().lstrip() for word in words]
if words[0] == "on":
obj_a = words[1]
obj_b = words[2]
if len(words) == 4:
obj_b_link = words[3]
obj_b_link_id = get_link_id_from_name(self, obj_b, obj_b_link)
else:
obj_b_link_id = -1
obj_a_id, obj_b_id = self.urdf_ids[obj_a], self.urdf_ids[obj_b]
obj_a_bbox_min, obj_a_bbox_max = self.get_aabb(obj_a_id)
obj_a_size = obj_a_bbox_max - obj_a_bbox_min
target_aabb_min, target_aabb_max = self.get_aabb_link(obj_b_id, obj_b_link_id)
id_line = p.addUserDebugLine(target_aabb_min, target_aabb_max, [1, 0, 0], lineWidth=10, lifeTime=0, physicsClientId=self.id)
id_point = p.addUserDebugPoints([(target_aabb_min + target_aabb_max) / 2], [[0, 0, 1]], 10, 0, physicsClientId=self.id)
new_pos = (target_aabb_min + target_aabb_max) / 2
new_pos[2] = target_aabb_max[2] # put obj a on top of obj b.
new_pos[2] += obj_a_size[2] # add the height of obj a
if not self.randomize:
obj_a_orientation = p.getQuaternionFromEuler([np.pi/2, 0, 0], physicsClientId=self.id)
else:
random_orientations = [0, np.pi / 2, np.pi, np.pi * 3 / 2]
obj_a_orientation = p.getQuaternionFromEuler([np.pi/2, 0, random_orientations[np.random.randint(4)]], physicsClientId=self.id)
p.resetBasePositionAndOrientation(obj_a_id, new_pos, obj_a_orientation, physicsClientId=self.id)
p.removeUserDebugItem(id_line, physicsClientId=self.id)
p.removeUserDebugItem(id_point, physicsClientId=self.id)
if words[0] == 'in':
obj_a = words[1]
obj_b = words[2]
if len(words) == 4:
obj_b_link = words[3]
obj_b_link_id = get_link_id_from_name(self, obj_b, obj_b_link)
else:
obj_b_link_id = -1
obj_a_id, obj_b_id = self.urdf_ids[obj_a], self.urdf_ids[obj_b]
# if after a lot of trying times, there is still collision, we should scale down the size of object A.
cnt = 1
collision_free = False
obj_a_new_size = self.simulator_sizes[obj_a]
obj_a_ori_pos, obj_a_orientation = p.getBasePositionAndOrientation(obj_a_id, physicsClientId=self.id)
target_aabb_min, target_aabb_max = self.get_aabb_link(obj_b_id, obj_b_link_id)
while not collision_free:
if cnt % 100 == 0:
print("scaling down! object size is {}".format(obj_a_new_size))
obj_a_new_size = obj_a_new_size * 0.9
p.removeBody(obj_a_id, physicsClientId=self.id)
obj_a_id = p.loadURDF(self.urdf_paths[obj_a],
basePosition=obj_a_ori_pos,
baseOrientation=obj_a_orientation,
physicsClientId=self.id, useFixedBase=False, globalScaling=obj_a_new_size)
self.urdf_ids[obj_a] = obj_a_id
self.simulator_sizes[obj_a] = obj_a_new_size
obj_a_bbox_min, obj_a_bbox_max = self.get_aabb(obj_a_id)
obj_a_size = obj_a_bbox_max - obj_a_bbox_min
id_line = p.addUserDebugLine(target_aabb_min, target_aabb_max, [1, 0, 0], lineWidth=10, lifeTime=0, physicsClientId=self.id)
id_point = p.addUserDebugPoints([(target_aabb_min + target_aabb_max) / 2], [[0, 0, 1]], 10, 0, physicsClientId=self.id)
center_pos = (target_aabb_min + target_aabb_max) / 2
up_pos = center_pos.copy()
up_pos[2] += obj_a_size[2]
possible_locations = [center_pos, up_pos]
obj_a_orientation = p.getQuaternionFromEuler([np.pi/2, 0, 0], physicsClientId=self.id)
for pos in possible_locations: # we try two possible locations to put obj a in obj b
p.resetBasePositionAndOrientation(obj_a_id, pos, obj_a_orientation, physicsClientId=self.id)
contact_points = p.getClosestPoints(obj_a_id, obj_b_id, 0.002, physicsClientId=self.id)
if len(contact_points) == 0:
collision_free = True
break
p.removeUserDebugItem(id_line, physicsClientId=self.id)
p.removeUserDebugItem(id_point, physicsClientId=self.id)
cnt += 1
if cnt > 1000: # if after scaling for 10 times it still does not work, let it be.
break
def handle_gpt_joint_angle(self, articulated_init_joint_angles):
for name in articulated_init_joint_angles:
obj_id = self.urdf_ids[name.lower()]
for joint_name, joint_angle in articulated_init_joint_angles[name].items(): | joint_idx = get_joint_id_from_name(self, name.lower(), joint_name) | 6 | 2023-10-31 19:44:09+00:00 | 12k |
KoeAI/LLVC | minimal_rvc/model.py | [
{
"identifier": "SynthesizerTrnMs256NSFSid",
"path": "minimal_rvc/models.py",
"snippet": "class SynthesizerTrnMs256NSFSid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n emb_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n self.emb_channels = emb_channels\n self.sr = sr\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder(\n inter_channels,\n hidden_channels,\n filter_channels,\n emb_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\n \"gin_channels:\",\n gin_channels,\n \"self.spk_embed_dim:\",\n self.spk_embed_dim,\n \"emb_channels:\",\n emb_channels,\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs256NSFSidNono",
"path": "minimal_rvc/models.py",
"snippet": "class SynthesizerTrnMs256NSFSidNono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n emb_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n self.emb_channels = emb_channels\n self.sr = sr\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder(\n inter_channels,\n hidden_channels,\n filter_channels,\n emb_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\n \"gin_channels:\",\n gin_channels,\n \"self.spk_embed_dim:\",\n self.spk_embed_dim,\n \"emb_channels:\",\n emb_channels,\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "VocalConvertPipeline",
"path": "minimal_rvc/pipeline.py",
"snippet": "class VocalConvertPipeline(object):\n def __init__(self, tgt_sr: int, device: Union[str, torch.device], is_half: bool, no_pad: bool = False):\n if isinstance(device, str):\n device = torch.device(device)\n if device.type == \"cuda\":\n vram = torch.cuda.get_device_properties(\n device).total_memory / 1024**3\n else:\n vram = None\n\n if vram is not None and vram <= 4:\n self.x_pad = 1\n self.x_query = 5\n self.x_center = 30\n self.x_max = 32\n elif vram is not None and vram <= 5:\n self.x_pad = 1\n self.x_query = 6\n self.x_center = 38\n self.x_max = 41\n else:\n self.x_pad = 3\n self.x_query = 10\n self.x_center = 60\n self.x_max = 65\n if no_pad:\n self.x_pad = 0\n\n self.sr = 16000 # hubert input sample rate\n self.window = 160 # hubert input window\n self.t_pad = self.sr * self.x_pad # padding time for each utterance\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # query time before and after query point\n self.t_center = self.sr * self.x_center # query cut point position\n self.t_max = self.sr * self.x_max # max time for no query\n self.device = device\n self.is_half = is_half\n\n self.model_rmvpe = RMVPE(\n f\"llvc_models/models/f0/rmvpe.pt\",\n is_half=self.is_half,\n device=self.device,\n )\n\n def get_optimal_torch_device(self, index: int = 0) -> torch.device:\n # Get cuda device\n if torch.cuda.is_available():\n # Very fast\n return torch.device(f\"cuda:{index % torch.cuda.device_count()}\")\n elif torch.backends.mps.is_available():\n return torch.device(\"mps\")\n # Insert an else here to grab \"xla\" devices if available. TO DO later. Requires the torch_xla.core.xla_model library\n # Else wise return the \"cpu\" as a torch device,\n return torch.device(\"cpu\")\n\n def get_f0_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n p_len,\n # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.\n hop_length=64,\n model=\"full\", # Either use crepe-tiny \"tiny\" or crepe \"full\". Default is full\n ):\n # fixes the F.conv2D exception. We needed to convert double to float.\n x = x.astype(np.float32)\n x /= np.quantile(np.abs(x), 0.999)\n torch_device = self.get_optimal_torch_device()\n audio = torch.from_numpy(x).to(torch_device, copy=True)\n audio = torch.unsqueeze(audio, dim=0)\n if audio.ndim == 2 and audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True).detach()\n audio = audio.detach()\n print(\"Initiating prediction with a crepe_hop_length of: \" + str(hop_length))\n pitch: Tensor = torchcrepe.predict(\n audio,\n self.sr,\n hop_length,\n f0_min,\n f0_max,\n model,\n batch_size=hop_length * 2,\n device=torch_device,\n pad=True\n )\n p_len = p_len or x.shape[0] // hop_length\n # Resize the pitch for final f0\n source = np.array(pitch.squeeze(0).cpu().float().numpy())\n source[source < 0.001] = np.nan\n target = np.interp(\n np.arange(0, len(source) * p_len, len(source)) / p_len,\n np.arange(0, len(source)),\n source\n )\n f0 = np.nan_to_num(target)\n return f0 # Resized f0\n\n def get_f0_official_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n model=\"full\",\n ):\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n return f0\n\n def get_f0(\n self,\n x: np.ndarray,\n p_len: int,\n f0_up_key: int,\n f0_method: str,\n f0_relative: bool,\n inp_f0: np.ndarray = None,\n ):\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n\n if f0_method == \"harvest\":\n f0, t = pyworld.harvest(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"dio\":\n f0, t = pyworld.dio(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"mangio-crepe\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, 160, \"full\")\n elif f0_method == \"crepe\":\n f0 = self.get_f0_official_crepe_computation(\n x, f0_min, f0_max, \"full\")\n elif f0_method == \"rmvpe\":\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n if f0_relative:\n if f0_method == \"rmvpe\" or f0_method == \"rmvpe_onnx\":\n # this is the average f0 of /test_wavs/2086-149214-0000.wav\n # by calculating f0 relative to this wav, we can ensure\n # consistent output pitch when converting from different speakers\n rel_f0 = 126.21\n else:\n raise ValueError(\"TODO: find rel_f0 for \" + f0_method)\n mean_f0 = np.mean(f0[f0 > 0])\n offset = np.round(12 * np.log2(mean_f0 / rel_f0))\n # print(\"offset: \" + str(offset))\n f0_up_key = f0_up_key - offset\n f0 *= pow(2, f0_up_key / 12)\n tf0 = self.sr // self.window # f0 points per second\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0: self.x_pad *\n tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(int)\n return f0_coarse, f0bak # 1-0\n\n def _convert(\n self,\n model: HubertModel,\n embedding_output_layer: int,\n net_g: SynthesizerTrnMs256NSFSid,\n sid: int,\n audio: np.ndarray,\n pitch: np.ndarray,\n pitchf: np.ndarray,\n index: faiss.IndexIVFFlat,\n big_npy: np.ndarray,\n index_rate: float,\n ):\n feats = torch.from_numpy(audio)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(\n feats.shape).to(self.device).fill_(False)\n\n half_support = (\n self.device.type == \"cuda\"\n and torch.cuda.get_device_capability(self.device)[0] >= 5.3\n )\n is_feats_dim_768 = net_g.emb_channels == 768\n\n if isinstance(model, tuple):\n feats = model[0](\n feats.squeeze(0).squeeze(0).to(self.device),\n return_tensors=\"pt\",\n sampling_rate=16000,\n )\n if self.is_half:\n feats = feats.input_values.to(self.device).half()\n else:\n feats = feats.input_values.to(self.device)\n with torch.no_grad():\n if is_feats_dim_768:\n feats = model[1](feats).last_hidden_state\n else:\n feats = model[1](feats).extract_features\n else:\n inputs = {\n \"source\": feats.half().to(self.device)\n if half_support\n else feats.to(self.device),\n \"padding_mask\": padding_mask.to(self.device),\n \"output_layer\": embedding_output_layer,\n }\n\n if not half_support:\n model = model.float()\n inputs[\"source\"] = inputs[\"source\"].float()\n\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n if is_feats_dim_768:\n feats = logits[0]\n else:\n feats = model.final_proj(logits[0])\n\n if (\n isinstance(index, type(None)) == False\n and isinstance(big_npy, type(None)) == False\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1),\n scale_factor=2).permute(0, 2, 1)\n\n p_len = audio.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch != None and pitchf != None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n if pitch != None and pitchf != None:\n audio1 = (\n (net_g.infer(feats, p_len, pitch,\n pitchf, sid)[0][0, 0] * 32768)\n .data.cpu()\n .float()\n .numpy()\n .astype(np.int16)\n )\n else:\n audio1 = (\n (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)\n .data.cpu()\n .float()\n .numpy()\n .astype(np.int16)\n )\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio1\n\n def __call__(\n self,\n model: HubertModel,\n embedding_output_layer: int,\n net_g: SynthesizerTrnMs256NSFSid,\n sid: int,\n audio: np.ndarray,\n transpose: int,\n f0_method: str,\n file_index: str,\n index_rate: float,\n if_f0: bool,\n f0_relative: bool,\n f0_file: str = None,\n ):\n\n index = big_npy = None\n\n bh, ah = signal.butter(N=5, Wn=48, btype=\"high\", fs=16000)\n audio = signal.filtfilt(bh, ah, audio)\n\n audio_pad = np.pad(\n audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += audio_pad[i: i - self.window]\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n np.abs(audio_sum[t - self.t_query: t + self.t_query])\n == np.abs(audio_sum[t - self.t_query: t + self.t_query]).min()\n )[0][0]\n )\n\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\"):\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n audio_pad, p_len, transpose, f0_method, f0_relative, inp_f0)\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if self.device.type == \"mps\":\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(\n pitchf, device=self.device).unsqueeze(0).float()\n\n audio_opt = []\n\n s = 0\n t = None\n\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self._convert(\n model,\n embedding_output_layer,\n net_g,\n sid,\n audio_pad[s: t + self.t_pad2 + self.window],\n pitch[:, s //\n self.window: (t + self.t_pad2) // self.window],\n pitchf[:, s //\n self.window: (t + self.t_pad2) // self.window],\n index,\n big_npy,\n index_rate,\n )[self.t_pad_tgt: -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self._convert(\n model,\n embedding_output_layer,\n net_g,\n sid,\n audio_pad[s: t + self.t_pad2 + self.window],\n None,\n None,\n index,\n big_npy,\n index_rate,\n )[self.t_pad_tgt: -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self._convert(\n model,\n embedding_output_layer,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window:] if t is not None else pitch,\n pitchf[:, t // self.window:] if t is not None else pitchf,\n index,\n big_npy,\n index_rate,\n )[self.t_pad_tgt: -self.t_pad_tgt]\n )\n else:\n result = self._convert(\n model,\n embedding_output_layer,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n index,\n big_npy,\n index_rate,\n )\n audio_opt.append(\n result[self.t_pad_tgt: result.shape[-1] - self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt"
},
{
"identifier": "opts",
"path": "minimal_rvc/cmd_opts.py",
"snippet": ""
},
{
"identifier": "ROOT_DIR",
"path": "minimal_rvc/shared.py",
"snippet": "ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nMODELS_DIR = os.path.join(ROOT_DIR, \"llvc_models\", \"models\")\ndef has_mps():"
},
{
"identifier": "load_audio",
"path": "minimal_rvc/utils.py",
"snippet": "def load_audio(file: str, sr):\n try:\n # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26\n # This launches a subprocess to decode audio while down-mixing and resampling as necessary.\n # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.\n file = (\n file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # Prevent small white copy path head and tail with spaces and \" and return\n out, _ = (\n ffmpeg.input(file, threads=0)\n .output(\"-\", format=\"f32le\", acodec=\"pcm_f32le\", ac=1, ar=sr)\n .run(cmd=[\"ffmpeg\", \"-nostdin\"], capture_stdout=True, capture_stderr=True)\n )\n except Exception as e:\n raise RuntimeError(f\"Failed to load audio: {e}\")\n\n return np.frombuffer(out, np.float32).flatten()"
}
] | import os
import re
import torch
from typing import *
from fairseq import checkpoint_utils
from fairseq.models.hubert.hubert import HubertModel
from pydub import AudioSegment
from .models import (SynthesizerTrnMs256NSFSid, SynthesizerTrnMs256NSFSidNono)
from .pipeline import VocalConvertPipeline
from .cmd_opts import opts
from .shared import ROOT_DIR, device, is_half
from .utils import load_audio | 7,837 | # This module is based on code from ddPn08, liujing04, and teftef6220
# https://github.com/ddPn08/rvc-webui
# https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI
# https://github.com/teftef6220/Voice_Separation_and_Selection
# These modules are licensed under the MIT License.
AUDIO_OUT_DIR = opts.output_dir or os.path.join(ROOT_DIR, "outputs")
EMBEDDINGS_LIST = {
"hubert-base-japanese": (
"rinna_hubert_base_jp.pt",
"hubert-base-japanese",
"local",
),
"contentvec": ("checkpoint_best_legacy_500.pt", "contentvec", "local"),
}
def update_state_dict(state_dict):
if "params" in state_dict and state_dict["params"] is not None:
return
keys = [
"spec_channels",
"segment_size",
"inter_channels",
"hidden_channels",
"filter_channels",
"n_heads",
"n_layers",
"kernel_size",
"p_dropout",
"resblock",
"resblock_kernel_sizes",
"resblock_dilation_sizes",
"upsample_rates",
"upsample_initial_channel",
"upsample_kernel_sizes",
"spk_embed_dim",
"gin_channels",
"emb_channels",
"sr",
]
state_dict["params"] = {}
n = 0
for i, key in enumerate(keys):
i = i - n
if len(state_dict["config"]) != 19 and key == "emb_channels":
# backward compat.
n += 1
continue
state_dict["params"][key] = state_dict["config"][i]
if not "emb_channels" in state_dict["params"]:
if state_dict.get("version", "v1") == "v1":
state_dict["params"]["emb_channels"] = 256 # for backward compat.
state_dict["embedder_output_layer"] = 9
else:
state_dict["params"]["emb_channels"] = 768 # for backward compat.
state_dict["embedder_output_layer"] = 12
class VoiceConvertModel:
def __init__(self, model_name: str, state_dict: Dict[str, Any]) -> None:
update_state_dict(state_dict)
self.model_name = model_name
self.state_dict = state_dict
self.tgt_sr = state_dict["params"]["sr"]
f0 = state_dict.get("f0", 1)
state_dict["params"]["spk_embed_dim"] = state_dict["weight"][
"emb_g.weight"
].shape[0]
if not "emb_channels" in state_dict["params"]:
state_dict["params"]["emb_channels"] = 768 # for backward compat.
if f0 == 1:
self.net_g = SynthesizerTrnMs256NSFSid(
**state_dict["params"], is_half=is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFSidNono(**state_dict["params"])
del self.net_g.enc_q
self.net_g.load_state_dict(state_dict["weight"], strict=False)
| # This module is based on code from ddPn08, liujing04, and teftef6220
# https://github.com/ddPn08/rvc-webui
# https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI
# https://github.com/teftef6220/Voice_Separation_and_Selection
# These modules are licensed under the MIT License.
AUDIO_OUT_DIR = opts.output_dir or os.path.join(ROOT_DIR, "outputs")
EMBEDDINGS_LIST = {
"hubert-base-japanese": (
"rinna_hubert_base_jp.pt",
"hubert-base-japanese",
"local",
),
"contentvec": ("checkpoint_best_legacy_500.pt", "contentvec", "local"),
}
def update_state_dict(state_dict):
if "params" in state_dict and state_dict["params"] is not None:
return
keys = [
"spec_channels",
"segment_size",
"inter_channels",
"hidden_channels",
"filter_channels",
"n_heads",
"n_layers",
"kernel_size",
"p_dropout",
"resblock",
"resblock_kernel_sizes",
"resblock_dilation_sizes",
"upsample_rates",
"upsample_initial_channel",
"upsample_kernel_sizes",
"spk_embed_dim",
"gin_channels",
"emb_channels",
"sr",
]
state_dict["params"] = {}
n = 0
for i, key in enumerate(keys):
i = i - n
if len(state_dict["config"]) != 19 and key == "emb_channels":
# backward compat.
n += 1
continue
state_dict["params"][key] = state_dict["config"][i]
if not "emb_channels" in state_dict["params"]:
if state_dict.get("version", "v1") == "v1":
state_dict["params"]["emb_channels"] = 256 # for backward compat.
state_dict["embedder_output_layer"] = 9
else:
state_dict["params"]["emb_channels"] = 768 # for backward compat.
state_dict["embedder_output_layer"] = 12
class VoiceConvertModel:
def __init__(self, model_name: str, state_dict: Dict[str, Any]) -> None:
update_state_dict(state_dict)
self.model_name = model_name
self.state_dict = state_dict
self.tgt_sr = state_dict["params"]["sr"]
f0 = state_dict.get("f0", 1)
state_dict["params"]["spk_embed_dim"] = state_dict["weight"][
"emb_g.weight"
].shape[0]
if not "emb_channels" in state_dict["params"]:
state_dict["params"]["emb_channels"] = 768 # for backward compat.
if f0 == 1:
self.net_g = SynthesizerTrnMs256NSFSid(
**state_dict["params"], is_half=is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFSidNono(**state_dict["params"])
del self.net_g.enc_q
self.net_g.load_state_dict(state_dict["weight"], strict=False) | self.net_g.eval().to(device) | 4 | 2023-10-28 01:58:49+00:00 | 12k |
baaivision/JudgeLM | judgelm/serve/multi_model_worker.py | [
{
"identifier": "WORKER_HEART_BEAT_INTERVAL",
"path": "judgelm/constants.py",
"snippet": "WORKER_HEART_BEAT_INTERVAL = int(os.getenv(\"JUDGELM_WORKER_HEART_BEAT_INTERVAL\", 45))"
},
{
"identifier": "ErrorCode",
"path": "judgelm/constants.py",
"snippet": "class ErrorCode(IntEnum):\n \"\"\"\n https://platform.openai.com/docs/guides/error-codes/api-errors\n \"\"\"\n\n VALIDATION_TYPE_ERROR = 40001\n\n INVALID_AUTH_KEY = 40101\n INCORRECT_AUTH_KEY = 40102\n NO_PERMISSION = 40103\n\n INVALID_MODEL = 40301\n PARAM_OUT_OF_RANGE = 40302\n CONTEXT_OVERFLOW = 40303\n\n RATE_LIMIT = 42901\n QUOTA_EXCEEDED = 42902\n ENGINE_OVERLOADED = 42903\n\n INTERNAL_ERROR = 50001\n CUDA_OUT_OF_MEMORY = 50002\n GRADIO_REQUEST_ERROR = 50003\n GRADIO_STREAM_UNKNOWN_ERROR = 50004\n CONTROLLER_NO_WORKER = 50005\n CONTROLLER_WORKER_TIMEOUT = 50006"
},
{
"identifier": "SERVER_ERROR_MSG",
"path": "judgelm/constants.py",
"snippet": "SERVER_ERROR_MSG = (\n \"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**\"\n)"
},
{
"identifier": "load_model",
"path": "judgelm/model/model_adapter.py",
"snippet": "def load_model(self, model_path: str, from_pretrained_kwargs: dict):\n revision = from_pretrained_kwargs.get(\"revision\", \"main\")\n try:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n use_fast=self.use_fast_tokenizer,\n revision=revision,\n )\n except TypeError:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n use_fast=False,\n revision=revision,\n )\n try:\n model = AutoModelForCausalLM.from_pretrained(\n model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs\n )\n except NameError:\n model = AutoModel.from_pretrained(\n model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs\n )\n return model, tokenizer"
},
{
"identifier": "add_model_args",
"path": "judgelm/model/model_adapter.py",
"snippet": "def add_model_args(parser):\n parser.add_argument(\n \"--model-path\",\n type=str,\n default=\"lmsys/vicuna-7b-v1.3\",\n help=\"The path to the weights. This can be a local folder or a Hugging Face repo ID.\",\n )\n parser.add_argument(\n \"--revision\",\n type=str,\n default=\"main\",\n help=\"Hugging Face Hub model revision identifier\",\n )\n parser.add_argument(\n \"--device\",\n type=str,\n choices=[\"cpu\", \"cuda\", \"mps\", \"xpu\"],\n default=\"cuda\",\n help=\"The device type\",\n )\n parser.add_argument(\n \"--gpus\",\n type=str,\n default=None,\n help=\"A single GPU like 1 or multiple GPUs like 0,2\",\n )\n parser.add_argument(\"--num-gpus\", type=int, default=1)\n parser.add_argument(\n \"--max-gpu-memory\",\n type=str,\n help=\"The maximum memory per gpu. Use a string like '13Gib'\",\n )\n parser.add_argument(\n \"--load-8bit\", action=\"store_true\", help=\"Use 8-bit quantization\"\n )\n parser.add_argument(\n \"--cpu-offloading\",\n action=\"store_true\",\n help=\"Only when using 8-bit quantization: Offload excess weights to the CPU that don't fit on the GPU\",\n )\n parser.add_argument(\n \"--gptq-ckpt\",\n type=str,\n default=None,\n help=\"Load quantized model. The path to the local GPTQ checkpoint.\",\n )\n parser.add_argument(\n \"--gptq-wbits\",\n type=int,\n default=16,\n choices=[2, 3, 4, 8, 16],\n help=\"#bits to use for quantization\",\n )\n parser.add_argument(\n \"--gptq-groupsize\",\n type=int,\n default=-1,\n help=\"Groupsize to use for quantization; default uses full row.\",\n )\n parser.add_argument(\n \"--gptq-act-order\",\n action=\"store_true\",\n help=\"Whether to apply the activation order GPTQ heuristic\",\n )"
},
{
"identifier": "get_conversation_template",
"path": "judgelm/model/model_adapter.py",
"snippet": "def get_conversation_template(model_path: str) -> Conversation:\n \"\"\"Get the default conversation template.\"\"\"\n adapter = get_model_adapter(model_path)\n return adapter.get_default_conv_template(model_path)"
},
{
"identifier": "generate_stream_chatglm",
"path": "judgelm/model/model_chatglm.py",
"snippet": "@torch.inference_mode()\ndef generate_stream_chatglm(\n model,\n tokenizer,\n params,\n device,\n context_len=2048,\n stream_interval=2,\n judge_sent_end=False,\n):\n prompt = params[\"prompt\"]\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n max_new_tokens = int(params.get(\"max_new_tokens\", 256))\n echo = params.get(\"echo\", True)\n\n inputs = tokenizer([prompt], return_tensors=\"pt\").to(model.device)\n input_echo_len = len(inputs[\"input_ids\"][0])\n\n gen_kwargs = {\n \"max_length\": max_new_tokens + input_echo_len,\n \"do_sample\": True if temperature > 1e-5 else False,\n \"top_p\": top_p,\n \"repetition_penalty\": repetition_penalty,\n \"logits_processor\": [invalid_score_processor],\n }\n if temperature > 1e-5:\n gen_kwargs[\"temperature\"] = temperature\n\n total_len = 0\n for total_ids in model.stream_generate(**inputs, **gen_kwargs):\n total_ids = total_ids.tolist()[0]\n total_len = len(total_ids)\n if echo:\n output_ids = total_ids\n else:\n output_ids = total_ids[input_echo_len:]\n response = tokenizer.decode(output_ids)\n response = process_response(response)\n\n yield {\n \"text\": response,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": total_len - input_echo_len,\n \"total_tokens\": total_len,\n },\n \"finish_reason\": None,\n }\n\n # TODO: ChatGLM stop when it reach max length\n # Only last stream result contains finish_reason, we set finish_reason as stop\n ret = {\n \"text\": response,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": total_len - input_echo_len,\n \"total_tokens\": total_len,\n },\n \"finish_reason\": \"stop\",\n }\n yield ret"
},
{
"identifier": "generate_stream_falcon",
"path": "judgelm/model/model_falcon.py",
"snippet": "@torch.inference_mode()\ndef generate_stream_falcon(\n model,\n tokenizer,\n params,\n device,\n context_len=2048,\n stream_interval=2,\n judge_sent_end=False,\n):\n prompt = params[\"prompt\"]\n len_prompt = len(prompt)\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n top_k = int(params.get(\"top_k\", 50)) # -1 means disable\n max_new_tokens = int(params.get(\"max_new_tokens\", 256))\n stop_str = params.get(\"stop\", None)\n echo = bool(params.get(\"echo\", True))\n stop_token_ids = params.get(\"stop_token_ids\", None) or []\n stop_token_ids.append(tokenizer.eos_token_id)\n\n inputs = tokenizer(prompt, return_tensors=\"pt\").to(model.device)\n input_ids = inputs[\"input_ids\"]\n attention_mask = inputs[\"attention_mask\"]\n\n max_src_len = context_len - max_new_tokens - 8\n\n input_ids = input_ids[-max_src_len:] # truncate from the left\n attention_mask = attention_mask[-max_src_len:] # truncate from the left\n input_echo_len = len(input_ids)\n\n decode_config = dict(skip_special_tokens=True, clean_up_tokenization_spaces=True)\n streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, **decode_config)\n\n generation_config = GenerationConfig(\n max_new_tokens=max_new_tokens,\n do_sample=temperature >= 1e-5,\n temperature=temperature,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=10,\n top_p=top_p,\n top_k=top_k,\n eos_token_id=stop_token_ids,\n )\n\n generation_kwargs = dict(\n inputs=input_ids,\n attention_mask=attention_mask,\n streamer=streamer,\n generation_config=generation_config,\n )\n\n thread = Thread(target=model.generate, kwargs=generation_kwargs)\n thread.start()\n\n if echo:\n # means keep the prompt\n output = prompt\n else:\n output = \"\"\n\n for i, new_text in enumerate(streamer):\n output += new_text\n if i % stream_interval == 0:\n if echo:\n rfind_start = len_prompt\n else:\n rfind_start = 0\n\n partially_stopped = False\n if stop_str:\n if isinstance(stop_str, str):\n pos = output.rfind(stop_str, rfind_start)\n if pos != -1:\n output = output[:pos]\n else:\n partially_stopped = is_partial_stop(output, stop_str)\n elif isinstance(stop_str, Iterable):\n for each_stop in stop_str:\n pos = output.rfind(each_stop, rfind_start)\n if pos != -1:\n output = output[:pos]\n break\n else:\n partially_stopped = is_partial_stop(output, each_stop)\n if partially_stopped:\n break\n else:\n raise ValueError(\"Invalid stop field type.\")\n\n # prevent yielding partial stop sequence\n if not partially_stopped:\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": None,\n }\n output = output.strip()\n\n # finish stream event, which contains finish reason\n if i == max_new_tokens - 1:\n finish_reason = \"length\"\n elif partially_stopped:\n finish_reason = None\n else:\n finish_reason = \"stop\"\n\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": finish_reason,\n }\n\n # clean\n gc.collect()\n torch.cuda.empty_cache()\n if device == \"xpu\":\n torch.xpu.empty_cache()"
},
{
"identifier": "generate_stream_codet5p",
"path": "judgelm/model/model_codet5p.py",
"snippet": "@torch.inference_mode()\ndef generate_stream_codet5p(\n model,\n tokenizer,\n params,\n device,\n context_len=2048,\n stream_interval=2,\n judge_sent_end=False,\n):\n prompt = params[\"prompt\"]\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n top_k = int(params.get(\"top_k\", 50)) # -1 means disable\n max_new_tokens = int(params.get(\"max_new_tokens\", 1024))\n stop_token_ids = params.get(\"stop_token_ids\", None) or []\n stop_token_ids.append(tokenizer.eos_token_id)\n\n decode_config = dict(skip_special_tokens=True, clean_up_tokenization_spaces=True)\n streamer = TextIteratorStreamer(tokenizer, **decode_config)\n encoding = tokenizer(prompt, return_tensors=\"pt\").to(device)\n input_ids = encoding.input_ids\n encoding[\"decoder_input_ids\"] = encoding[\"input_ids\"].clone()\n input_echo_len = len(input_ids)\n\n generation_config = GenerationConfig(\n max_new_tokens=max_new_tokens,\n do_sample=temperature >= 1e-5,\n temperature=temperature,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=10,\n top_p=top_p,\n top_k=top_k,\n eos_token_id=stop_token_ids,\n )\n\n class CodeBlockStopper(StoppingCriteria):\n def __call__(\n self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs\n ) -> bool:\n # Code-completion is open-end generation.\n # We check \\n\\n to stop at end of a code block.\n if list(input_ids[0][-2:]) == [628, 198]:\n return True\n return False\n\n gen_kwargs = dict(\n **encoding,\n streamer=streamer,\n generation_config=generation_config,\n stopping_criteria=StoppingCriteriaList([CodeBlockStopper()]),\n )\n thread = Thread(target=model.generate, kwargs=gen_kwargs)\n thread.start()\n i = 0\n output = \"\"\n for new_text in streamer:\n i += 1\n output += new_text\n if i % stream_interval == 0 or i == max_new_tokens - 1:\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": None,\n }\n if i >= max_new_tokens:\n break\n\n if i >= max_new_tokens:\n finish_reason = \"length\"\n else:\n finish_reason = \"stop\"\n\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": finish_reason,\n }\n thread.join()\n\n # clean\n gc.collect()\n torch.cuda.empty_cache()\n if device == \"xpu\":\n torch.xpu.empty_cache()"
},
{
"identifier": "GptqConfig",
"path": "judgelm/modules/gptq.py",
"snippet": "class GptqConfig:\n ckpt: str = field(\n default=None,\n metadata={\n \"help\": \"Load quantized model. The path to the local GPTQ checkpoint.\"\n },\n )\n wbits: int = field(default=16, metadata={\"help\": \"#bits to use for quantization\"})\n groupsize: int = field(\n default=-1,\n metadata={\"help\": \"Groupsize to use for quantization; default uses full row.\"},\n )\n act_order: bool = field(\n default=True,\n metadata={\"help\": \"Whether to apply the activation order GPTQ heuristic\"},\n )"
},
{
"identifier": "generate_stream",
"path": "judgelm/serve/inference.py",
"snippet": "@torch.inference_mode()\ndef generate_stream(\n model,\n tokenizer,\n params: Dict,\n device: str,\n context_len: int,\n stream_interval: int = 2,\n judge_sent_end: bool = False,\n):\n # Read parameters\n prompt = params[\"prompt\"]\n len_prompt = len(prompt)\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n top_k = int(params.get(\"top_k\", -1)) # -1 means disable\n max_new_tokens = int(params.get(\"max_new_tokens\", 256))\n echo = bool(params.get(\"echo\", True))\n stop_str = params.get(\"stop\", None)\n stop_token_ids = params.get(\"stop_token_ids\", None) or []\n stop_token_ids.append(tokenizer.eos_token_id)\n\n logits_processor = prepare_logits_processor(\n temperature, repetition_penalty, top_p, top_k\n )\n input_ids = tokenizer(prompt).input_ids\n print(\"prompt is: \", prompt)\n\n if model.config.is_encoder_decoder:\n max_src_len = context_len\n else: # truncate\n max_src_len = context_len - max_new_tokens - 1\n\n input_ids = input_ids[-max_src_len:]\n output_ids = list(input_ids)\n input_echo_len = len(input_ids)\n\n if model.config.is_encoder_decoder:\n encoder_output = model.encoder(\n input_ids=torch.as_tensor([input_ids], device=device)\n )[0]\n start_ids = torch.as_tensor(\n [[model.generation_config.decoder_start_token_id]],\n dtype=torch.int64,\n device=device,\n )\n\n past_key_values = out = None\n sent_interrupt = False\n for i in range(max_new_tokens):\n if i == 0: # prefill\n if model.config.is_encoder_decoder:\n out = model.decoder(\n input_ids=start_ids,\n encoder_hidden_states=encoder_output,\n use_cache=True,\n )\n logits = model.lm_head(out[0])\n else:\n out = model(torch.as_tensor([input_ids], device=device), use_cache=True) # 初始的交互,输入的是聊天 prompt\n logits = out.logits\n past_key_values = out.past_key_values\n else: # decoding\n if model.config.is_encoder_decoder:\n out = model.decoder(\n input_ids=torch.as_tensor(\n [[token] if not sent_interrupt else output_ids], device=device\n ),\n encoder_hidden_states=encoder_output,\n use_cache=True,\n past_key_values=past_key_values if not sent_interrupt else None,\n )\n sent_interrupt = False\n\n logits = model.lm_head(out[0])\n else:\n out = model(\n input_ids=torch.as_tensor(\n [[token] if not sent_interrupt else output_ids], device=device\n ),\n use_cache=True,\n past_key_values=past_key_values if not sent_interrupt else None,\n )\n sent_interrupt = False\n logits = out.logits\n past_key_values = out.past_key_values\n\n if logits_processor:\n if repetition_penalty > 1.0:\n tmp_output_ids = torch.as_tensor([output_ids], device=logits.device)\n else:\n tmp_output_ids = None\n last_token_logits = logits_processor(tmp_output_ids, logits[:, -1, :])[0]\n else:\n last_token_logits = logits[0, -1, :]\n\n if device == \"mps\":\n # Switch to CPU by avoiding some bugs in mps backend.\n last_token_logits = last_token_logits.float().to(\"cpu\")\n\n if temperature < 1e-5 or top_p < 1e-8: # greedy\n _, indices = torch.topk(last_token_logits, 2)\n tokens = [int(index) for index in indices.tolist()]\n else:\n probs = torch.softmax(last_token_logits, dim=-1)\n indices = torch.multinomial(probs, num_samples=2)\n tokens = [int(token) for token in indices.tolist()]\n token = tokens[0]\n output_ids.append(token)\n\n if token in stop_token_ids:\n stopped = True\n else:\n stopped = False\n\n # Yield the output tokens\n if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:\n if echo:\n tmp_output_ids = output_ids\n rfind_start = len_prompt\n else:\n tmp_output_ids = output_ids[input_echo_len:]\n rfind_start = 0\n\n output = tokenizer.decode(\n tmp_output_ids,\n skip_special_tokens=True,\n spaces_between_special_tokens=False,\n clean_up_tokenization_spaces=True,\n )\n\n debug_output = tokenizer.decode(\n output_ids,\n skip_special_tokens=True,\n spaces_between_special_tokens=False,\n clean_up_tokenization_spaces=True,\n )\n print(debug_output)\n\n # TODO: For the issue of incomplete sentences interrupting output, apply a patch and others can also modify it to a more elegant way\n if judge_sent_end and stopped and not is_sentence_complete(output):\n if len(tokens) > 1:\n token = tokens[1]\n output_ids[-1] = token\n else:\n output_ids.pop()\n stopped = False\n sent_interrupt = True\n\n partially_stopped = False\n if stop_str:\n if isinstance(stop_str, str):\n pos = output.rfind(stop_str, rfind_start)\n if pos != -1:\n output = output[:pos]\n stopped = True\n else:\n partially_stopped = is_partial_stop(output, stop_str)\n elif isinstance(stop_str, Iterable):\n for each_stop in stop_str:\n pos = output.rfind(each_stop, rfind_start)\n if pos != -1:\n output = output[:pos]\n stopped = True\n break\n else:\n partially_stopped = is_partial_stop(output, each_stop)\n if partially_stopped:\n break\n else:\n raise ValueError(\"Invalid stop field type.\")\n\n # Prevent yielding partial stop sequence\n if not partially_stopped:\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": None,\n }\n\n if stopped:\n break\n\n # Finish stream event, which contains finish reason\n if i == max_new_tokens - 1:\n finish_reason = \"length\"\n elif stopped:\n finish_reason = \"stop\"\n else:\n finish_reason = None\n\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": finish_reason,\n }\n\n # Clean\n del past_key_values, out\n gc.collect()\n torch.cuda.empty_cache()\n if device == \"xpu\":\n torch.xpu.empty_cache()"
},
{
"identifier": "ModelWorker",
"path": "judgelm/serve/model_worker.py",
"snippet": "def heart_beat_worker(obj):\n def __init__(\n self,\n controller_addr: str,\n worker_addr: str,\n worker_id: str,\n model_path: str,\n model_names: List[str],\n limit_worker_concurrency: int,\n conv_template: str = None,\n ):\n def init_heart_beat(self):\n def register_to_controller(self):\n def send_heart_beat(self):\n def get_queue_length(self):\n def get_status(self):\n def count_token(self, params):\n def get_conv_template(self):\n def __init__(\n self,\n controller_addr: str,\n worker_addr: str,\n worker_id: str,\n model_path: str,\n model_names: List[str],\n limit_worker_concurrency: int,\n no_register: bool,\n device: str,\n num_gpus: int,\n max_gpu_memory: str,\n load_8bit: bool = False,\n cpu_offloading: bool = False,\n gptq_config: bool = None,\n stream_interval: int = 2,\n conv_template: str = None,\n ):\n def generate_stream_gate(self, params):\n def generate_gate(self, params):\n def get_embeddings(self, params):\ndef release_worker_semaphore():\ndef acquire_worker_semaphore():\ndef create_background_tasks():\nasync def api_generate_stream(request: Request):\nasync def api_generate(request: Request):\nasync def api_get_embeddings(request: Request):\nasync def api_get_status(request: Request):\nasync def api_count_token(request: Request):\nasync def api_get_conv(request: Request):\nasync def api_model_details(request: Request):\nclass BaseModelWorker:\nclass ModelWorker(BaseModelWorker):"
}
] | import argparse
import asyncio
import dataclasses
import logging
import json
import os
import time
import threading
import uuid
import requests
import torch
import torch.nn.functional as F
import uvicorn
from typing import List, Union
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse, JSONResponse
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LlamaTokenizer,
AutoModel,
)
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LLaMATokenizer,
AutoModel,
)
from judgelm.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG
from judgelm.model.model_adapter import (
load_model,
add_model_args,
get_conversation_template,
)
from judgelm.model.model_chatglm import generate_stream_chatglm
from judgelm.model.model_falcon import generate_stream_falcon
from judgelm.model.model_codet5p import generate_stream_codet5p
from judgelm.modules.gptq import GptqConfig
from judgelm.serve.inference import generate_stream
from judgelm.serve.model_worker import ModelWorker, worker_id, logger
from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length | 7,302 | # Note: for all the calls below, we make a hard assumption that the caller
# includes the model name in the payload, otherwise we can't figure out which
# underlying sub-worker to call.
@app.post("/worker_generate_stream")
async def api_generate_stream(request: Request):
params = await request.json()
await acquire_worker_semaphore()
worker = worker_map[params["model"]]
generator = worker.generate_stream_gate(params)
background_tasks = create_background_tasks()
return StreamingResponse(generator, background=background_tasks)
@app.post("/worker_generate")
async def api_generate(request: Request):
params = await request.json()
await acquire_worker_semaphore()
worker = worker_map[params["model"]]
output = worker.generate_gate(params)
release_worker_semaphore()
return JSONResponse(output)
@app.post("/worker_get_embeddings")
async def api_get_embeddings(request: Request):
params = await request.json()
await acquire_worker_semaphore()
worker = worker_map[params["model"]]
embedding = worker.get_embeddings(params)
background_tasks = create_background_tasks()
return JSONResponse(content=embedding, background=background_tasks)
@app.post("/worker_get_status")
async def api_get_status(request: Request):
return {
"model_names": [m for w in workers for m in w.model_names],
"speed": 1,
"queue_length": sum([w.get_queue_length() for w in workers]),
}
@app.post("/count_token")
async def api_count_token(request: Request):
params = await request.json()
worker = worker_map[params["model"]]
return worker.count_token(params)
@app.post("/worker_get_conv_template")
async def api_get_conv(request: Request):
params = await request.json()
worker = worker_map[params["model"]]
return worker.get_conv_template()
@app.post("/model_details")
async def api_model_details(request: Request):
params = await request.json()
worker = worker_map[params["model"]]
return {"context_length": worker.context_len}
if __name__ == "__main__":
# Note: Ensure we resolve arg conflicts. We let `add_model_args` add MOST
# of the model args but we'll override one to have an append action that
# supports multiple values.
parser = argparse.ArgumentParser(conflict_handler="resolve")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=21002)
parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
parser.add_argument(
"--controller-address", type=str, default="http://localhost:21001"
)
add_model_args(parser)
# Override the model path to be repeated and align it with model names.
parser.add_argument(
"--model-path",
type=str,
default=[],
action="append",
help="One or more paths to model weights to load. This can be a local folder or a Hugging Face repo ID.",
)
parser.add_argument(
"--model-names",
type=lambda s: s.split(","),
action="append",
help="One or more model names. Values must be aligned with `--model-path` values.",
)
parser.add_argument("--limit-worker-concurrency", type=int, default=5)
parser.add_argument("--stream-interval", type=int, default=2)
parser.add_argument("--no-register", action="store_true")
args = parser.parse_args()
logger.info(f"args: {args}")
if args.gpus:
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
gptq_config = GptqConfig(
ckpt=args.gptq_ckpt or args.model_path,
wbits=args.gptq_wbits,
groupsize=args.gptq_groupsize,
act_order=args.gptq_act_order,
)
if args.model_names is None:
args.model_names = [[x.split("/")[-1]] for x in args.model_path]
# Launch all workers
workers = []
for model_path, model_names in zip(args.model_path, args.model_names):
w = ModelWorker(
args.controller_address,
args.worker_address,
| """
A multi-model worker that contains multiple sub-works one for each model. This
supports running a list of models on the same machine so that they can
(potentially) share the same background weights.
Each model can have one or more model names.
This multi-model worker assumes the models shares some underlying weights and
thus reports the combined queue lengths for health checks.
We recommend using this with multiple Peft models (with `peft` in the name)
where all Peft models are trained on the exact same base model.
"""
try:
except ImportError:
# We store both the underlying workers and a mapping from their model names to
# the worker instance. This makes it easy to fetch the appropriate worker for
# each API call.
workers = []
worker_map = {}
app = FastAPI()
def release_worker_semaphore():
workers[0].semaphore.release()
def acquire_worker_semaphore():
if workers[0].semaphore is None:
# Share the same semaphore for all workers because
# all workers share the same GPU.
semaphore = asyncio.Semaphore(workers[0].limit_worker_concurrency)
for w in workers:
w.semaphore = semaphore
return workers[0].semaphore.acquire()
def create_background_tasks():
background_tasks = BackgroundTasks()
background_tasks.add_task(release_worker_semaphore)
return background_tasks
# Note: for all the calls below, we make a hard assumption that the caller
# includes the model name in the payload, otherwise we can't figure out which
# underlying sub-worker to call.
@app.post("/worker_generate_stream")
async def api_generate_stream(request: Request):
params = await request.json()
await acquire_worker_semaphore()
worker = worker_map[params["model"]]
generator = worker.generate_stream_gate(params)
background_tasks = create_background_tasks()
return StreamingResponse(generator, background=background_tasks)
@app.post("/worker_generate")
async def api_generate(request: Request):
params = await request.json()
await acquire_worker_semaphore()
worker = worker_map[params["model"]]
output = worker.generate_gate(params)
release_worker_semaphore()
return JSONResponse(output)
@app.post("/worker_get_embeddings")
async def api_get_embeddings(request: Request):
params = await request.json()
await acquire_worker_semaphore()
worker = worker_map[params["model"]]
embedding = worker.get_embeddings(params)
background_tasks = create_background_tasks()
return JSONResponse(content=embedding, background=background_tasks)
@app.post("/worker_get_status")
async def api_get_status(request: Request):
return {
"model_names": [m for w in workers for m in w.model_names],
"speed": 1,
"queue_length": sum([w.get_queue_length() for w in workers]),
}
@app.post("/count_token")
async def api_count_token(request: Request):
params = await request.json()
worker = worker_map[params["model"]]
return worker.count_token(params)
@app.post("/worker_get_conv_template")
async def api_get_conv(request: Request):
params = await request.json()
worker = worker_map[params["model"]]
return worker.get_conv_template()
@app.post("/model_details")
async def api_model_details(request: Request):
params = await request.json()
worker = worker_map[params["model"]]
return {"context_length": worker.context_len}
if __name__ == "__main__":
# Note: Ensure we resolve arg conflicts. We let `add_model_args` add MOST
# of the model args but we'll override one to have an append action that
# supports multiple values.
parser = argparse.ArgumentParser(conflict_handler="resolve")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=21002)
parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
parser.add_argument(
"--controller-address", type=str, default="http://localhost:21001"
)
add_model_args(parser)
# Override the model path to be repeated and align it with model names.
parser.add_argument(
"--model-path",
type=str,
default=[],
action="append",
help="One or more paths to model weights to load. This can be a local folder or a Hugging Face repo ID.",
)
parser.add_argument(
"--model-names",
type=lambda s: s.split(","),
action="append",
help="One or more model names. Values must be aligned with `--model-path` values.",
)
parser.add_argument("--limit-worker-concurrency", type=int, default=5)
parser.add_argument("--stream-interval", type=int, default=2)
parser.add_argument("--no-register", action="store_true")
args = parser.parse_args()
logger.info(f"args: {args}")
if args.gpus:
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
gptq_config = GptqConfig(
ckpt=args.gptq_ckpt or args.model_path,
wbits=args.gptq_wbits,
groupsize=args.gptq_groupsize,
act_order=args.gptq_act_order,
)
if args.model_names is None:
args.model_names = [[x.split("/")[-1]] for x in args.model_path]
# Launch all workers
workers = []
for model_path, model_names in zip(args.model_path, args.model_names):
w = ModelWorker(
args.controller_address,
args.worker_address, | worker_id, | 11 | 2023-10-26 19:41:07+00:00 | 12k |
EulerSearch/embedding_studio | embedding_studio/workers/fine_tuning/finetune_embedding.py | [
{
"identifier": "QueryRetriever",
"path": "embedding_studio/embeddings/data/clickstream/query_retriever.py",
"snippet": "class QueryRetriever(object):\n \"\"\"As we can't exactly predict a schema of storing queries:\n 1. As text exceptly in clickstream service\n 2. As ID of a record with a text\n 3. As a path to an image\n\n We provide an ability to use any query item. So, a user can specify any.\n\n \"\"\"\n\n def setup(self, clickstream_sessions: List[ClickstreamSession]):\n pass\n\n def __call__(self, query: QueryItem):\n return query"
},
{
"identifier": "RankingData",
"path": "embedding_studio/embeddings/data/ranking_data.py",
"snippet": "class RankingData:\n def __init__(self, clickstream: DatasetDict, items: DatasetDict):\n self.clickstream = clickstream\n self.items = items"
},
{
"identifier": "EmbeddingsModelInterface",
"path": "embedding_studio/embeddings/models/interface.py",
"snippet": "class EmbeddingsModelInterface(pl.LightningModule):\n def __init__(self, same_query_and_items: bool = False):\n \"\"\"In search we have two entities, which could be multi domain: query and search result (item).\n This is the interface we used in fine-tuning procedure.\n\n :param same_query_and_items: are query and items models acutally the same model (default: False)\n \"\"\"\n super(EmbeddingsModelInterface, self).__init__()\n self.same_query_and_items = same_query_and_items\n\n @abstractmethod\n def get_query_model_params(self) -> Iterator[Parameter]:\n pass\n\n @abstractmethod\n def get_items_model_params(self) -> Iterator[Parameter]:\n pass\n\n @abstractmethod\n def fix_query_model(self, num_fixed_layers: int):\n \"\"\"One of fine-tuning hyperparams is num of fixed layers at a query model\n\n :param num_fixed_layers: how many layers to fix\n \"\"\"\n\n @abstractmethod\n def unfix_query_model(self):\n \"\"\"Unfix all layers of a query model.\"\"\"\n\n @abstractmethod\n def fix_item_model(self, num_fixed_layers: int):\n \"\"\"One of fine-tuning hyperparams is num of fixed layers at an item model\n\n :param num_fixed_layers: how many layers to fix\n \"\"\"\n\n @abstractmethod\n def unfix_item_model(self):\n \"\"\"Unfix all layers of an item model.\"\"\"\n\n @abstractmethod\n def forward_query(self, query: Any) -> FloatTensor:\n pass\n\n @abstractmethod\n def forward_items(self, items: List[Any]) -> FloatTensor:\n pass"
},
{
"identifier": "ExperimentsManager",
"path": "embedding_studio/workers/fine_tuning/experiments/experiments_tracker.py",
"snippet": "class ExperimentsManager:\n def __init__(\n self,\n tracking_uri: str,\n main_metric: str,\n accumulators: List[MetricsAccumulator],\n is_loss: bool = False,\n n_top_runs: int = 10,\n requirements: Optional[str] = None,\n retry_config: Optional[RetryConfig] = None,\n ):\n \"\"\"Wrapper over mlflow package to manage certain fine-tuning experiments.\n\n :param tracking_uri: url of MLFlow server\n :param main_metric: name of main metric that will be used to find best model\n :param accumulators: accumulators of metrics to be logged\n :param is_loss: is main metric loss (if True, then best quality is minimal) (default: False)\n :param n_top_runs: how many hyper params group consider to be used in following tuning steps (default: 10)\n :param requirements: extra requirements to be passed to mlflow.pytorch.log_model (default: None)\n :param retry_config: retry policy (default: None)\n \"\"\"\n if not isinstance(tracking_uri, str) or len(tracking_uri) == 0:\n raise ValueError(\n f\"MLFlow tracking URI value should be a not empty string\"\n )\n mlflow.set_tracking_uri(tracking_uri)\n self._tracking_uri = tracking_uri\n if self._tracking_uri.endswith(\"/\"):\n self._tracking_uri = self._tracking_uri[:-1]\n\n self.retry_config = (\n retry_config\n if retry_config\n else ExperimentsManager._get_default_retry_config()\n )\n self.attempt_exception_types = [RestException]\n\n if not isinstance(main_metric, str) or len(main_metric) == 0:\n raise ValueError(f\"main_metric value should be a not empty string\")\n self.main_metric = main_metric\n self._metric_field = f\"metrics.{self.main_metric}\"\n\n self._n_top_runs = n_top_runs\n self._is_loss = is_loss\n\n if len(accumulators) == 0:\n logger.warning(\n \"No accumulators were provided, there will be no metrics logged except loss\"\n )\n self._accumulators = accumulators\n\n self._requirements: List[str] = (\n _get_base_requirements() if requirements is None else requirements\n )\n\n self._iteration_experiment = None\n self._tuning_iteration = None\n self._tuning_iteration_id = None\n\n self._run = None\n self._run_params = None\n self._run_id = None\n\n def _check_artifact_exists(self, run_id, artifact_path):\n client = mlflow.MlflowClient()\n artifacts = client.list_artifacts(run_id, path=artifact_path)\n return any(artifact.path == artifact_path for artifact in artifacts)\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"log_metric\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_METRIC_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS,\n )\n config[\"log_param\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_PARAM_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS,\n )\n config[\"log_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"load_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOAD_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"delete_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"search_runs\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_RUNS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS,\n )\n config[\"end_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_END_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_END_RUN_WAIT_TIME_SECONDS,\n )\n config[\"get_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_RUN_WAIT_TIME_SECONDS,\n )\n config[\"search_experiments\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS,\n )\n config[\"delete_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"create_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_CREATE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"get_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n\n return config\n\n @property\n def is_loss(self) -> bool:\n return self._is_loss\n\n def __del__(self):\n self.finish_run()\n self.finish_iteration()\n\n def is_retryable_error(self, e: Exception) -> bool:\n return False\n\n def _get_model_exists_filter(self) -> str:\n return \"metrics.model_uploaded = 1\"\n\n def _get_artifact_url(self, run_id: str, artifact_path: str) -> str:\n return (\n f\"{self._tracking_uri}/get-artifact?path=\"\n f'{urllib.parse.quote(artifact_path, safe=\"\")}&run_uuid={run_id}'\n )\n\n @retry_method(name=\"log_model\")\n def upload_initial_model(self, model: EmbeddingsModelInterface):\n \"\"\"Upload the very first, initial model to the mlflow server\n\n :param model: model to be uploaded\n \"\"\"\n self.finish_iteration()\n experiment_id = get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME)\n if experiment_id is None:\n logger.info(\n f\"Can't find any active iteration with name: {INITIAL_EXPERIMENT_NAME}\"\n )\n try:\n logger.info(\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n except MlflowException as e:\n if \"Cannot set a deleted experiment\" in str(e):\n logger.error(\n f\"Creation of initial experiment is failed: experiment with the same name {INITIAL_EXPERIMENT_NAME} is deleted, but not archived\"\n )\n experiments = mlflow.search_experiments(\n view_type=mlflow.entities.ViewType.ALL\n )\n deleted_experiment_id = None\n\n for exp in experiments:\n if exp.name == INITIAL_EXPERIMENT_NAME:\n deleted_experiment_id = exp.experiment_id\n break\n\n logger.info(\n f\"Restore deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().restore_experiment(\n deleted_experiment_id\n )\n logger.info(\n f\"Archive deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n deleted_experiment_id,\n INITIAL_EXPERIMENT_NAME + \"_archive\",\n )\n logger.info(\n f\"Delete archived experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.delete_experiment(deleted_experiment_id)\n logger.info(f\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n else:\n raise e\n\n with mlflow.start_run(\n experiment_id=get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n run_name=INITIAL_RUN_NAME,\n ) as run:\n logger.info(\n f\"Upload initial model to {INITIAL_EXPERIMENT_NAME} / {INITIAL_RUN_NAME}\"\n )\n if self._check_artifact_exists(\n get_run_id_by_name(\n get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n INITIAL_RUN_NAME,\n ),\n \"model\",\n ):\n logger.info(\"Model is already uploaded\")\n return\n\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n logger.info(\"Uploading is finished\")\n\n @retry_method(name=\"load_model\")\n def download_initial_model(self) -> EmbeddingsModelInterface:\n \"\"\"Download initial model.\n\n :return: initial embeddings model\n \"\"\"\n model_uri: str = f\"runs:/{get_run_id_by_name(get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME), INITIAL_RUN_NAME)}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_runs\")\n def get_top_params(self) -> Optional[List[FineTuningParams]]:\n \"\"\"Get top N previous fine-tuning iteration best params\n\n :return: fine-tuning iteration params\n \"\"\"\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id:\n logger.warning(\n \"Can't retrieve top params, no previous iteration in history\"\n )\n return None\n\n else:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[last_session_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and only finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"Can't retrieve top params, no previous iteration's finished runs with uploaded model in history\"\n )\n return None\n\n # Get the indices that would sort the DataFrame based on the specified parameter\n sorted_indices: np.ndarray = np.argsort(\n runs[self._metric_field].values\n )\n if not self.is_loss:\n sorted_indices = sorted_indices[\n ::-1\n ] # Use [::-1] to sort in descending order\n\n # Extract the top N rows based on the sorted indices\n top_n_rows: np.ndarray = runs.iloc[\n sorted_indices[: self._n_top_runs]\n ]\n\n # Define a mapping dictionary to remove the \"params.\" prefix\n column_mapping: Dict[str, str] = {\n col: col.replace(\"params.\", \"\") for col in top_n_rows.columns\n }\n\n # Rename the columns\n top_n_rows: np.ndarray = top_n_rows.rename(\n columns=column_mapping\n ).to_dict(orient=\"records\")\n\n return [FineTuningParams(**row) for row in top_n_rows]\n\n def _get_best_previous_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id or last_session_id is None:\n return None, True\n else:\n run_id, _ = self._get_best_quality(last_session_id)\n return run_id, False\n\n def _get_best_current_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n if (\n initial_id == self._tuning_iteration_id\n or self._tuning_iteration_id is None\n ):\n return None, True\n else:\n run_id, _ = self._get_best_quality(self._tuning_iteration_id)\n return run_id, False\n\n @retry_method(name=\"load_model\")\n def get_last_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, no previous iteration in history\"\n )\n return None\n else:\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no previous iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_current_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_current_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, current run is initial\"\n )\n return None\n\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_last_model(self) -> EmbeddingsModelInterface:\n \"\"\"Get previous iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Download initial model, no previous iteration in history\"\n )\n return self.download_initial_model()\n\n else:\n if run_id is None:\n logger.warning(\n \"Download initial model, no previous iteration's \"\n \"finished runs with uploaded model in history\"\n )\n return self.download_initial_model()\n else:\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"load_model\")\n def get_current_model(self) -> Optional[EmbeddingsModelInterface]:\n \"\"\"Get current iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n if self._tuning_iteration is None:\n logger.error(\"No current iteration, can't get any model\")\n return\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n logger.info(\"Download initial model\")\n return self.download_initial_model()\n\n run_id, is_initial = self._get_best_current_run_id()\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_experiments\")\n def get_previous_iteration_id(self) -> Optional[str]:\n if (\n self._tuning_iteration == INITIAL_EXPERIMENT_NAME\n or self._tuning_iteration is None\n ):\n logger.warning(\n f\"Can't find previous iteration - no current iteration was setup\"\n )\n return None\n\n plugin_name = f\"{self._tuning_iteration.plugin_name}\"\n experiments: List[Experiment] = [\n e\n for e in mlflow.search_experiments()\n if (\n e.name.startswith(EXPERIMENT_PREFIX)\n and e.name.find(plugin_name) != -1\n and e.name != str(self._tuning_iteration)\n )\n ]\n if len(experiments) == 0:\n logger.warning(\"No iteration found\")\n return None\n else:\n return max(\n experiments, key=lambda exp: exp.creation_time\n ).experiment_id\n\n @retry_method(name=\"delete_experiment\")\n def delete_previous_iteration(self):\n experiment_id: Optional[str] = self.get_previous_iteration_id()\n\n logger.info(\"Delete models of previous iteration.\")\n runs = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"]\n run_ids = runs[\"run_id\"].tolist()\n\n for run_id in run_ids:\n self.delete_model(run_id, experiment_id)\n\n if experiment_id is not None:\n logger.info(\n f\"Iteration with ID {experiment_id} is going to be deleted\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n experiment_id, INITIAL_EXPERIMENT_NAME + \"_archive\"\n )\n mlflow.delete_experiment(experiment_id)\n else:\n logger.warning(\n \"Can't delete a previous iteration, no previous iteration in history\"\n )\n\n @retry_method(name=\"create_experiment\")\n def set_iteration(self, iteration: FineTuningIteration):\n \"\"\"Start a new fine-tuning session.\n\n :param iteration: fine-tuning iteration info\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n self.finish_iteration()\n\n logger.info(\"Start a new fine-tuning iterations\")\n\n self._tuning_iteration = iteration\n self._tuning_iteration_id = get_experiment_id_by_name(str(iteration))\n if self._tuning_iteration_id is None:\n self._tuning_iteration_id = mlflow.create_experiment(\n str(iteration)\n )\n\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n @retry_method(name=\"start_run\")\n def set_run(self, params: FineTuningParams) -> bool:\n \"\"\"Start a new run with provided fine-tuning params\n\n :param params: provided fine-tuning params\n :return: True if it's a finished run (otherwise False)\n \"\"\"\n convert_value = (\n lambda value: \", \".join(map(str, value))\n if isinstance(value, list)\n else value\n )\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n # TODO: implement exception\n raise ValueError(\"You can't start run for initial iteration\")\n\n if self._run is not None:\n self.finish_run()\n\n logger.info(\n f\"Start a new run for iteration {self._tuning_iteration_id} with params:\\n\\t{str(params)}\"\n )\n\n self._run_params = params\n run_name: str = self._run_params.id\n self._run_id = get_run_id_by_name(self._tuning_iteration_id, run_name)\n\n self._run = mlflow.start_run(\n self._run_id, self._tuning_iteration_id, run_name\n )\n if self._run_id is None:\n self._run_id = self._run.info.run_id\n for key, value in dict(self._tuning_iteration).items():\n mlflow.log_param(key, convert_value(value))\n\n for key, value in dict(self._run_params).items():\n mlflow.log_param(key, convert_value(value))\n\n mlflow.log_metric(\"model_uploaded\", 0)\n\n return False\n else:\n return self._run.info.status == \"FINISHED\"\n\n @retry_method(name=\"search_runs\")\n def model_is_uploaded(self) -> bool:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs[\"run_id\"] == self._run_id]\n return runs.shape[0] > 0\n\n @retry_method(name=\"get_experiment\")\n def finish_iteration(self):\n logger.info(f\"Finish current iteration {self._tuning_iteration_id}\")\n self._tuning_iteration = INITIAL_EXPERIMENT_NAME\n self._tuning_iteration_id = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n\n if self._tuning_iteration_id is None:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_name=INITIAL_EXPERIMENT_NAME\n )\n self._tuning_iteration_id = (\n self._iteration_experiment.experiment_id\n )\n else:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n logger.info(f\"Current iteration is finished\")\n\n @retry_method(name=\"end_run\")\n def finish_run(self):\n logger.info(\n f\"Finish current run {self._tuning_iteration_id} / {self._run_id}\"\n )\n for accumulator in self._accumulators:\n accumulator.clear()\n\n mlflow.end_run()\n\n # Set params to default None\n self._run = None\n self._run_params = None\n self._run_id = None\n\n logger.info(f\"Current run is finished\")\n\n @retry_method(name=\"log_param\")\n def _set_model_as_deleted(self, run_id: str, experiment_id: str):\n with mlflow.start_run(\n run_id=run_id, experiment_id=experiment_id\n ) as run:\n mlflow.log_metric(\"model_deleted\", 1)\n mlflow.log_metric(\"model_uploaded\", 0)\n\n @retry_method(name=\"delete_model\")\n def _delete_model(self, run_id: str, experiment_id: str) -> bool:\n logger.warning(\n f\"Unable to delete a model for run {run_id}, MLFlow has no such functionality, please implement on your own.\"\n )\n return False\n\n @retry_method(name=\"get_run\")\n def delete_model(self, run_id: str, experiment_id: Optional[str] = None):\n experiment_id = (\n self._tuning_iteration_id\n if experiment_id is None\n else experiment_id\n )\n if experiment_id is None:\n raise ValueError(\n f\"No iteration was initialized, unable to delete model.\"\n )\n\n if experiment_id == INITIAL_EXPERIMENT_NAME:\n raise ValueError(f\"Initial model can't be deleted.\")\n\n run_info = None\n try:\n run_info = mlflow.get_run(run_id=run_id)\n except RestException as e:\n if e.get_http_status_code() == 404:\n logger.exception(f\"Run with ID {run_id} doesn't exist.\")\n else:\n raise e\n\n if run_info is not None:\n runs: pd.DataFrame = mlflow.search_runs(\n filter_string=self._get_model_exists_filter()\n )\n runs = runs[runs[\"run_id\"] == run_id]\n if runs.shape[0] == 0:\n logger.warning(\n f\"Run {run_id} has no model being uploaded. Nothing to delete\"\n )\n\n else:\n deleted = None\n try:\n deleted = self._delete_model(run_id, experiment_id)\n except MaxAttemptsReachedException:\n pass\n\n if deleted:\n self._set_model_as_deleted(run_id, experiment_id)\n\n @retry_method(name=\"log_model\")\n def save_model(\n self, model: EmbeddingsModelInterface, best_only: bool = True\n ):\n \"\"\"Save fine-tuned embedding model\n\n :param model: model to be saved\n :param best_only: save only if it's the best (default: True)\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"Can't save not initial model for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n logger.info(\n f\"Save model for {self._tuning_iteration_id} / {self._run_id}\"\n )\n if not best_only:\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n else:\n current_quality = self.get_quality()\n best_run_id, best_quality = self.get_best_quality()\n\n if best_run_id is None or (\n current_quality <= best_quality\n if self.is_loss\n else current_quality >= best_quality\n ):\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n\n if best_run_id is not None:\n self.delete_model(best_run_id)\n else:\n logger.info(\"Not the best run - ignore saving\")\n\n @retry_method(name=\"log_metric\")\n def save_metric(self, metric_value: MetricValue):\n \"\"\"Accumulate and save metric value\n\n :param metric_value: value to be logged\n \"\"\"\n for accumulator in self._accumulators:\n for name, value in accumulator.accumulate(metric_value):\n mlflow.log_metric(name, value)\n\n @retry_method(name=\"search_runs\")\n def get_quality(self) -> float:\n \"\"\"Current run quality value\n\n :return: quality value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id]\n )\n quality: np.ndarray = runs[runs.run_id == self._run_id][\n self._metric_field\n ]\n return float(quality) if quality.shape[0] == 1 else float(quality[0])\n\n @retry_method(name=\"search_runs\")\n def _get_best_quality(\n self, experiment_id: str\n ) -> Tuple[Optional[str], float]:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and not finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"No finished experiments found with model uploaded, except initial\"\n )\n return None, 0.0\n\n else:\n value: float = (\n runs[self._metric_field].min()\n if self.is_loss\n else runs[self._metric_field].max()\n )\n best: pd.DataFrame = runs[runs[self._metric_field] == value][\n [\"run_id\", self._metric_field]\n ]\n return list(best.itertuples(index=False, name=None))[0]\n\n def get_best_quality(self) -> Tuple[str, float]:\n \"\"\"Get current fine-tuning iteration best quality\n\n :return: run_id and best metric value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n return self._get_best_quality(self._tuning_iteration_id)"
},
{
"identifier": "FineTuningIteration",
"path": "embedding_studio/workers/fine_tuning/experiments/finetuning_iteration.py",
"snippet": "class FineTuningIteration(BaseModel):\n \"\"\"Fine tuning iteration.\n\n :param batch_id: session batch id\n :param plugin_name: name of tuned embedding (default: \"\")\n \"\"\"\n\n batch_id: str = \"\"\n plugin_name: str = \"\"\n\n class Config:\n arbitrary_types_allowed = True\n\n def __str__(self) -> str:\n return (\n f\"{EXPERIMENT_PREFIX} / {self.plugin_name} / \" + f\"{self.batch_id}\"\n )"
},
{
"identifier": "FineTuningParams",
"path": "embedding_studio/workers/fine_tuning/experiments/finetuning_params.py",
"snippet": "class FineTuningParams(BaseModel):\n \"\"\"Params of fine-tuning procedure\n\n :param num_fixed_layers: number of fixed embeddings layers\n :param query_lr: learning rate of query model optimizer\n :param items_lr: learning rate of items model optimizer\n :param query_weight_decay: weight decay of query model optimizer\n :param items_weight_decay: weight decay of items model optimizer\n :param margin: margin from MarginRankingLoss\n :param not_irrelevant_only: use only not irrelevant sessions\n :param negative_downsampling: ratio of negative samples to be used\n :param min_abs_difference_threshold: filter out soft pairs abs(neg_dist - pos_dist) < small value (default: 0.0)\n :param max_abs_difference_threshold: filter out hard pairs abs(neg_dist - pos_dist) > huge value (default: 1.0)\n :param examples_order: order of passing examples to a trainer (default: None)\n \"\"\"\n\n num_fixed_layers: int\n query_lr: float\n items_lr: float\n query_weight_decay: float\n items_weight_decay: float\n margin: float\n not_irrelevant_only: bool\n negative_downsampling: float\n min_abs_difference_threshold: float = 0.0\n max_abs_difference_threshold: float = 1.0\n examples_order: List[ExamplesType] = [ExamplesType.all_examples]\n\n class Config:\n arbitrary_types_allowed = True\n\n @validator(\"examples_order\", pre=True, always=True)\n def validate_examples_order(cls, value):\n if isinstance(value, str):\n value = list(map(int, value.split(\",\")))\n elif isinstance(value, tuple):\n value = list(value)\n return [ExamplesType(v) for v in value]\n\n @validator(\"items_lr\", \"query_lr\", pre=True, always=True)\n def validate_positive_float(cls, value):\n if not (isinstance(value, float) and value > 0):\n raise ValueError(f\"{value} must be a positive float\")\n return value\n\n @validator(\n \"items_weight_decay\", \"query_weight_decay\", pre=True, always=True\n )\n def validate_non_negative_float(cls, value):\n if not (isinstance(value, float) and value >= 0):\n raise ValueError(f\"{value} must be a non-negative float\")\n return value\n\n @validator(\"margin\", pre=True, always=True)\n def validate_non_negative_float_margin(cls, value):\n if not (isinstance(value, float) and value >= 0):\n raise ValueError(f\"{value} must be a non-negative float\")\n return value\n\n @validator(\"num_fixed_layers\", pre=True, always=True)\n def validate_non_negative_int(cls, value):\n if not (isinstance(value, int) and value >= 0):\n raise ValueError(f\"{value} must be a non-negative integer\")\n return value\n\n @root_validator(skip_on_failure=True)\n def validate_example_order(cls, values):\n examples_order = values.get(\"examples_order\")\n if examples_order:\n if isinstance(examples_order, str):\n examples_order = list(map(int, examples_order.split(\",\")))\n elif isinstance(examples_order, tuple):\n examples_order = list(examples_order)\n values[\"examples_order\"] = [\n ExamplesType(v) for v in examples_order\n ]\n return values\n\n @property\n def id(self) -> str:\n # Convert the value to bytes (assuming it's a string)\n value_bytes: bytes = str(self).encode(\"utf-8\")\n\n # Create a hash object\n hash_object = hashlib.sha256()\n\n # Update the hash object with the value\n hash_object.update(value_bytes)\n\n # Get the hexadecimal representation of the hash\n unique_id: str = hash_object.hexdigest()\n\n return unique_id\n\n def __str__(self) -> str:\n vals: List[str] = []\n for key, value in sorted(dict(self).items()):\n value = (\n \",\".join(map(str, value)) if isinstance(value, list) else value\n )\n vals.append(f\"{key}: {value}\")\n\n return \" / \".join(vals)"
},
{
"identifier": "FineTuningSettings",
"path": "embedding_studio/workers/fine_tuning/experiments/finetuning_settings.py",
"snippet": "class FineTuningSettings(BaseModel):\n \"\"\"\n\n :param loss_func: loss object for a ranking task\n :param metric_calculators: list of trackable metrics calculators (default: None)\n by default only DistanceShift metric\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param step_size: optimizer steps (default: 500)\n :param gamma: optimizers gamma (default: 0.9)\n :param num_epochs: num of training epochs (default: 10)\n :param batch_size: count of sessions in a batch (default: 1)\n :param test_each_n_sessions: frequency of validation, if value in range [0, 1] - used as ratio (default: -1)\n \"\"\"\n\n loss_func: RankingLossInterface\n metric_calculators: Optional[List[MetricCalculator]] = None\n ranker: Optional[\n Callable[[FloatTensor, FloatTensor], FloatTensor]\n ] = COSINE_SIMILARITY\n is_similarity: Optional[bool] = True\n confidence_calculator: Optional[Callable] = dummy_confidences\n step_size: Optional[int] = 500\n gamma: Optional[float] = 0.9\n num_epochs: Optional[int] = 10\n batch_size: Optional[int] = 1\n test_each_n_sessions: Optional[Union[float, int]] = -1\n\n class Config:\n arbitrary_types_allowed = True"
},
{
"identifier": "fine_tune_embedding_model_one_param",
"path": "embedding_studio/workers/fine_tuning/finetune_embedding_one_param.py",
"snippet": "def fine_tune_embedding_model_one_param(\n initial_model: EmbeddingsModelInterface,\n settings: FineTuningSettings,\n ranking_data: RankingData,\n query_retriever: QueryRetriever,\n fine_tuning_params: FineTuningParams,\n tracker: ExperimentsManager,\n) -> float:\n \"\"\"Run embeddings fine-tuning over single fine-tuning params set\n\n :param initial_model: embedding model itself\n :param settings: fine-tuning settings\n :param ranking_data: dataset with clickstream and items\n :param query_retriever: object to get item related to query, that can be used in \"forward\"\n :param fine_tuning_params: hyper params of fine-tuning task\n :param tracker: experiment management object\n :return: the best quality value\n \"\"\"\n use_cuda = torch.cuda.is_available()\n device = torch.device(\n \"cuda\" if use_cuda else \"cpu\"\n ) # TODO: use multiple devices\n\n if not use_cuda:\n logger.warning(\"No CUDA is available, use CPU device\")\n\n # Start run\n is_finished = tracker.set_run(fine_tuning_params)\n start_fine_tuning = True\n quality = None\n if is_finished:\n logger.warning(\n f\"Run with params {str(fine_tuning_params)} is finished.\"\n )\n # Read current embedding quality\n quality: Optional[float] = tracker.get_quality()\n _, best_quality = tracker.get_best_quality()\n if quality is not None and best_quality is not None:\n if quality < best_quality:\n start_fine_tuning = False\n logger.info(\n f\"Do not retry: Run with params {str(fine_tuning_params)} has not the best quality: {quality} < {best_quality}.\"\n )\n\n elif tracker.model_is_uploaded():\n start_fine_tuning = False\n logger.info(\n f\"Do not retry Run with params {str(fine_tuning_params)} has already had a model being uploaded.\"\n )\n\n if start_fine_tuning:\n logger.info(\"Init embeddings fine-tuner\")\n fine_tuner: EmbeddingsFineTuner = EmbeddingsFineTuner.create(\n initial_model,\n settings,\n ranking_data.items,\n query_retriever,\n fine_tuning_params,\n tracker,\n )\n logger.info(\"Trying to move to the device...\")\n fine_tuner.to(device)\n logger.info(\"Trying to move to the device... OK\")\n fine_tuner.preprocess_sessions(ranking_data.clickstream)\n\n # Init train / test clickstream data loaders\n train_dataloader: DataLoader = DataLoader(\n ranking_data.clickstream[\"train\"],\n batch_size=settings.batch_size,\n collate_fn=CustomDataCollator(),\n shuffle=True,\n )\n test_dataloader: DataLoader = DataLoader(\n ranking_data.clickstream[\"test\"],\n batch_size=1,\n collate_fn=CustomDataCollator(),\n shuffle=False,\n )\n\n # If val loss is not changing - stop training\n early_stop_callback: EarlyStopping = EarlyStopping(\n monitor=\"val_loss\",\n patience=3,\n strict=False,\n verbose=False,\n mode=\"min\",\n )\n\n logger.info(\"Start fine-tuning\")\n if 0 < settings.test_each_n_sessions <= 1:\n settings.test_each_n_sessions *= len(\n ranking_data.clickstream[\"train\"]\n )\n # Start fine-tuning\n trainer: Trainer = Trainer(\n max_epochs=settings.num_epochs,\n callbacks=[early_stop_callback],\n val_check_interval=int(\n settings.test_each_n_sessions\n if settings.test_each_n_sessions > 0\n else len(train_dataloader)\n ),\n )\n trainer.fit(fine_tuner, train_dataloader, test_dataloader)\n\n # Move model back to CPU\n fine_tuner.cpu()\n\n # Unfix layers\n initial_model.unfix_item_model()\n initial_model.unfix_query_model()\n\n # Read current embedding quality\n quality: Optional[float] = tracker.get_quality()\n logger.info(f\"Save model (best only, current quality: {quality})\")\n try:\n # Save model, best only\n tracker.save_model(initial_model, True)\n logger.info(\"Saving is finished\")\n except Exception as e:\n logger.exception(f\"Unable to save a model: {str(e)}\")\n\n tracker.finish_run()\n\n return quality"
}
] | import gc
import logging
import os
import tempfile
import traceback
import torch
from typing import Any, Dict, List, Optional
from hyperopt import Trials, fmin, hp, tpe
from embedding_studio.embeddings.data.clickstream.query_retriever import (
QueryRetriever,
)
from embedding_studio.embeddings.data.ranking_data import RankingData
from embedding_studio.embeddings.models.interface import (
EmbeddingsModelInterface,
)
from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import (
ExperimentsManager,
)
from embedding_studio.workers.fine_tuning.experiments.finetuning_iteration import (
FineTuningIteration,
)
from embedding_studio.workers.fine_tuning.experiments.finetuning_params import (
FineTuningParams,
)
from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import (
FineTuningSettings,
)
from embedding_studio.workers.fine_tuning.finetune_embedding_one_param import (
fine_tune_embedding_model_one_param,
) | 10,176 |
logger = logging.getLogger(__name__)
def _finetune_embedding_model_one_step(
initial_model_path: str,
settings: FineTuningSettings,
ranking_data: RankingData,
query_retriever: QueryRetriever,
|
logger = logging.getLogger(__name__)
def _finetune_embedding_model_one_step(
initial_model_path: str,
settings: FineTuningSettings,
ranking_data: RankingData,
query_retriever: QueryRetriever, | fine_tuning_params: FineTuningParams, | 5 | 2023-10-31 00:33:13+00:00 | 12k |
facebookresearch/minimax | src/minimax/envs/maze/maze_ood.py | [
{
"identifier": "DIR_TO_VEC",
"path": "src/minimax/envs/maze/common.py",
"snippet": "DIR_TO_VEC = jnp.array([\n\t# Pointing right (positive X)\n\t(1, 0), # right\n\t(0, 1), # down\n\t(-1, 0), # left\n\t(0, -1), # up\n], dtype=jnp.int8)"
},
{
"identifier": "OBJECT_TO_INDEX",
"path": "src/minimax/envs/maze/common.py",
"snippet": "OBJECT_TO_INDEX = {\n\t\"unseen\": 0,\n\t\"empty\": 1,\n\t\"wall\": 2,\n\t\"floor\": 3,\n\t\"door\": 4,\n\t\"key\": 5,\n\t\"ball\": 6,\n\t\"box\": 7,\n\t\"goal\": 8,\n\t\"lava\": 9,\n\t\"agent\": 10,\n}"
},
{
"identifier": "COLOR_TO_INDEX",
"path": "src/minimax/envs/maze/common.py",
"snippet": "COLOR_TO_INDEX = {\n 'red' : 0,\n 'green' : 1,\n 'blue' : 2,\n 'purple': 3,\n 'yellow': 4,\n 'grey' : 5,\n}"
},
{
"identifier": "make_maze_map",
"path": "src/minimax/envs/maze/common.py",
"snippet": "def make_maze_map(\n\tparams,\n\twall_map, \n\tgoal_pos, \n\tagent_pos, \n\tagent_dir_idx,\n\tpad_obs=False):\n\t# Expand maze map to H x W x C\n\tempty = jnp.array([OBJECT_TO_INDEX['empty'], 0, 0], dtype=jnp.uint8)\n\twall = jnp.array([OBJECT_TO_INDEX['wall'], COLOR_TO_INDEX['grey'], 0], dtype=jnp.uint8)\n\tmaze_map = jnp.array(jnp.expand_dims(wall_map, -1), dtype=jnp.uint8)\n\tmaze_map = jnp.where(maze_map > 0, wall, empty)\n\t\n\tagent = jnp.array([OBJECT_TO_INDEX['agent'], COLOR_TO_INDEX['red'], agent_dir_idx], dtype=jnp.uint8)\n\tagent_x,agent_y = agent_pos\n\tmaze_map = maze_map.at[agent_y,agent_x,:].set(agent)\n\n\tgoal = jnp.array([OBJECT_TO_INDEX['goal'], COLOR_TO_INDEX['green'], 0], dtype=jnp.uint8)\n\tgoal_x,goal_y = goal_pos\n\tmaze_map = maze_map.at[goal_y,goal_x,:].set(goal)\n\n\t# Add observation padding\n\tif pad_obs:\n\t\tpadding = params.agent_view_size-1\n\telse:\n\t\tpadding = 1\n\n\tmaze_map_padded = jnp.tile(wall.reshape((1,1,*empty.shape)), (maze_map.shape[0]+2*padding, maze_map.shape[1]+2*padding, 1))\n\tmaze_map_padded = maze_map_padded.at[padding:-padding,padding:-padding,:].set(maze_map)\n\n\t# Add surrounding walls\n\twall_start = padding-1 # start index for walls\n\twall_end_y = maze_map_padded.shape[0] - wall_start - 1\n\twall_end_x = maze_map_padded.shape[1] - wall_start - 1\n\tmaze_map_padded = maze_map_padded.at[wall_start,wall_start:wall_end_x+1,:].set(wall) # top\n\tmaze_map_padded = maze_map_padded.at[wall_end_y,wall_start:wall_end_x+1,:].set(wall) # bottom\n\tmaze_map_padded = maze_map_padded.at[wall_start:wall_end_y+1,wall_start,:].set(wall) # left\n\tmaze_map_padded = maze_map_padded.at[wall_start:wall_end_y+1,wall_end_x,:].set(wall) # right\n\n\treturn maze_map_padded"
},
{
"identifier": "Maze",
"path": "src/minimax/envs/maze/maze.py",
"snippet": "class Maze(environment.Environment):\n\tdef __init__(\n\t\tself,\n\t\theight=13,\n\t\twidth=13,\n\t\tn_walls=25,\n\t\tagent_view_size=5,\n\t\treplace_wall_pos=False,\n\t\tsee_through_walls=True,\n\t\tsee_agent=False,\n\t\tmax_episode_steps=250,\n\t\tnormalize_obs=False,\n\t\tsample_n_walls=False,\n\t\tobs_agent_pos=False,\n\t\tsingleton_seed=-1\n\t):\n\t\tsuper().__init__()\n\n\t\tself.obs_shape = (agent_view_size, agent_view_size, 3)\n\t\tself.action_set = jnp.array([\n\t\t\tActions.left,\n\t\t\tActions.right,\n\t\t\tActions.forward,\n\t\t\tActions.pickup,\n\t\t\tActions.drop,\n\t\t\tActions.toggle,\n\t\t\tActions.done\n\t\t])\n\n\t\tself.params = EnvParams(\n\t\t\theight=height,\n\t\t\twidth=width,\n\t\t\tn_walls=n_walls,\n\t\t\tagent_view_size=agent_view_size,\n\t\t\treplace_wall_pos=replace_wall_pos and not sample_n_walls,\n\t\t\tsee_through_walls=see_through_walls,\n\t\t\tsee_agent=see_agent,\n\t\t\tmax_episode_steps=max_episode_steps,\n\t\t\tnormalize_obs=normalize_obs,\n\t\t\tsample_n_walls=sample_n_walls,\n\t\t\tobs_agent_pos=obs_agent_pos,\n\t\t\tsingleton_seed=-1,\n\t\t)\n\n\t@property\n\tdef default_params(self) -> EnvParams:\n\t\t# Default environment parameters\n\t\treturn EnvParams()\n\n\tdef step_env(\n\t\tself,\n\t\tkey: chex.PRNGKey,\n\t\tstate: EnvState,\n\t\taction: int,\n\t) -> Tuple[chex.Array, EnvState, float, bool, dict]:\n\t\t\"\"\"Perform single timestep state transition.\"\"\"\n\t\ta = self.action_set[action]\n\t\tstate, reward = self.step_agent(key, state, a)\n\t\t# Check game condition & no. steps for termination condition\n\t\tstate = state.replace(time=state.time + 1)\n\t\tdone = self.is_terminal(state)\n\t\tstate = state.replace(terminal=done)\n\n\t\treturn (\n\t\t\tlax.stop_gradient(self.get_obs(state)),\n\t\t\tlax.stop_gradient(state),\n\t\t\treward.astype(jnp.float32),\n\t\t\tdone,\n\t\t\t{},\n\t\t)\n\n\tdef reset_env(\n\t\tself, \n\t\tkey: chex.PRNGKey, \n\t) -> Tuple[chex.Array, EnvState]:\n\t\t\"\"\"Reset environment state by resampling contents of maze_map\n\t\t- initial agent position\n\t\t- goal position\n\t\t- wall positions\n\t\t\"\"\"\n\t\tparams = self.params\n\t\th = params.height\n\t\tw = params.width\n\t\tall_pos = np.arange(np.prod([h, w]), dtype=jnp.uint32)\n\n\t\t# Reset wall map, with shape H x W, and value of 1 at (i,j) iff there is a wall at (i,j)\n\t\tkey, subkey = jax.random.split(key)\n\t\twall_idx = jax.random.choice(\n\t\t\tsubkey, all_pos, \n\t\t\tshape=(params.n_walls,), \n\t\t\treplace=params.replace_wall_pos)\n\n\t\tif params.sample_n_walls:\n\t\t\tkey, subkey = jax.random.split(key)\n\t\t\tsampled_n_walls = jax.random.randint(subkey, (), minval=0, maxval=params.n_walls)\n\t\t\tsample_wall_mask = jnp.arange(params.n_walls) < sampled_n_walls\n\t\t\tdummy_wall_idx = wall_idx.at[0].get().repeat(params.n_walls)\n\t\t\twall_idx = jax.lax.select(\n\t\t\t\tsample_wall_mask,\n\t\t\t\twall_idx,\n\t\t\t\tdummy_wall_idx\n\t\t\t)\n\n\t\toccupied_mask = jnp.zeros_like(all_pos)\n\t\toccupied_mask = occupied_mask.at[wall_idx].set(1)\n\t\twall_map = occupied_mask.reshape(h, w).astype(jnp.bool_)\n\n\t\t# Reset agent position + dir\n\t\tkey, subkey = jax.random.split(key)\n\t\tagent_idx = jax.random.choice(subkey, all_pos, shape=(1,), p=(~occupied_mask.astype(jnp.bool_)).astype(jnp.float32))\n\t\toccupied_mask = occupied_mask.at[agent_idx].set(1)\n\t\tagent_pos = jnp.array([agent_idx%w,agent_idx//w], dtype=jnp.uint32).flatten()\n\n\t\tkey, subkey = jax.random.split(key)\n\t\tagent_dir_idx = jax.random.choice(subkey, jnp.arange(len(DIR_TO_VEC), dtype=jnp.uint8))\n\t\tagent_dir = DIR_TO_VEC.at[agent_dir_idx].get()\n\n\t\t# Reset goal position\n\t\tkey, subkey = jax.random.split(key)\n\t\tgoal_idx = jax.random.choice(subkey, all_pos, shape=(1,), p=(~occupied_mask.astype(jnp.bool_)).astype(jnp.float32))\n\t\tgoal_pos = jnp.array([goal_idx%w,goal_idx//w], dtype=jnp.uint32).flatten()\n\n\t\tmaze_map = make_maze_map(\n\t\t\tparams,\n\t\t\twall_map, \n\t\t\tgoal_pos, \n\t\t\tagent_pos, \n\t\t\tagent_dir_idx, \n\t\t\tpad_obs=True)\n\n\t\tstate = EnvState(\n\t\t\tagent_pos=agent_pos,\n\t\t\tagent_dir=agent_dir,\n\t\t\tagent_dir_idx=agent_dir_idx,\n\t\t\tgoal_pos=goal_pos,\n\t\t\twall_map=wall_map.astype(jnp.bool_),\n\t\t\tmaze_map=maze_map,\n\t\t\ttime=0,\n\t\t\tterminal=False,\n\t\t)\n\n\t\treturn self.get_obs(state), state\n\n\tdef set_env_instance(\n\t\t\tself, \n\t\t\tencoding: EnvInstance):\n\t\t\"\"\"\n\t\tInstance is encoded as a PyTree containing the following fields:\n\t\tagent_pos, agent_dir, goal_pos, wall_map\n\t\t\"\"\"\n\t\tparams = self.params\n\t\tagent_pos = encoding.agent_pos\n\t\tagent_dir_idx = encoding.agent_dir_idx\n\n\t\tagent_dir = DIR_TO_VEC.at[agent_dir_idx].get()\n\t\tgoal_pos = encoding.goal_pos\n\t\twall_map = encoding.wall_map\n\t\tmaze_map = make_maze_map(\n\t\t\tparams,\n\t\t\twall_map, \n\t\t\tgoal_pos, \n\t\t\tagent_pos, \n\t\t\tagent_dir_idx, # ued instances include wall padding\n\t\t\tpad_obs=True)\n\n\t\tstate = EnvState(\n\t\t\tagent_pos=agent_pos,\n\t\t\tagent_dir=agent_dir,\n\t\t\tagent_dir_idx=agent_dir_idx,\n\t\t\tgoal_pos=goal_pos,\n\t\t\twall_map=wall_map,\n\t\t\tmaze_map=maze_map,\n\t\t\ttime=0,\n\t\t\tterminal=False\n\t\t)\n\n\t\treturn self.get_obs(state), state\n\n\tdef get_obs(self, state: EnvState) -> chex.Array:\n\t\t\"\"\"Return limited grid view ahead of agent.\"\"\"\n\t\tobs = jnp.zeros(self.obs_shape, dtype=jnp.uint8)\n\t\t\n\t\tagent_x, agent_y = state.agent_pos\n\n\t\tobs_fwd_bound1 = state.agent_pos\n\t\tobs_fwd_bound2 = state.agent_pos + state.agent_dir*(self.obs_shape[0]-1)\n\n\t\tside_offset = self.obs_shape[0]//2\n\t\tobs_side_bound1 = state.agent_pos + (state.agent_dir == 0)*side_offset\n\t\tobs_side_bound2 = state.agent_pos - (state.agent_dir == 0)*side_offset\n\n\t\tall_bounds = jnp.stack([obs_fwd_bound1, obs_fwd_bound2, obs_side_bound1, obs_side_bound2])\n\n\t\t# Clip obs to grid bounds appropriately\n\t\tpadding = obs.shape[0]-1\n\t\tobs_bounds_min = np.min(all_bounds, 0) + padding\n\t\tobs_range_x = jnp.arange(obs.shape[0]) + obs_bounds_min[1]\n\t\tobs_range_y = jnp.arange(obs.shape[0]) + obs_bounds_min[0]\n\n\t\tmeshgrid = jnp.meshgrid(obs_range_y,obs_range_x)\n\t\tcoord_y = meshgrid[1].flatten()\n\t\tcoord_x = meshgrid[0].flatten()\n\n\t\tobs = state.maze_map.at[\n\t\t\tcoord_y,coord_x,:].get().reshape(obs.shape[0],obs.shape[1],3)\n\n\t\tobs = (state.agent_dir_idx == 0)*jnp.rot90(obs, 1) + \\\n\t\t\t \t(state.agent_dir_idx == 1)*jnp.rot90(obs, 2) + \\\n\t\t\t \t(state.agent_dir_idx == 2)*jnp.rot90(obs, 3) + \\\n\t\t\t \t(state.agent_dir_idx == 3)*jnp.rot90(obs, 4)\n\n\t\tif not self.params.see_agent:\n\t\t\tobs = obs.at[-1, side_offset].set(\n\t\t\t\tjnp.array([OBJECT_TO_INDEX['empty'], 0, 0], dtype=jnp.uint8)\n\t\t\t)\n\n\t\tif not self.params.see_through_walls:\n\t\t\tpass\n\n\t\timage = obs.astype(jnp.uint8)\n\t\tif self.params.normalize_obs:\n\t\t\timage = image/10.0\n\n\t\tobs_dict = dict(\n\t\t\timage=image,\n\t\t\tagent_dir=state.agent_dir_idx\n\t\t)\n\t\tif self.params.obs_agent_pos:\n\t\t\tobs_dict.update(dict(agent_pos=state.agent_pos))\n\n\t\treturn OrderedDict(obs_dict)\n\n\tdef step_agent(self, key: chex.PRNGKey, state: EnvState, action: int) -> Tuple[EnvState, float]:\n\t\tparams = self.params\n\n\t\t# Update agent position (forward action)\n\t\tfwd_pos = jnp.minimum(\n\t\t\tjnp.maximum(state.agent_pos + (action == Actions.forward)*state.agent_dir, 0), \n\t\t\tjnp.array((params.width-1, params.height-1), dtype=jnp.uint32))\n\n\t\t# Can't go past wall or goal\n\t\tfwd_pos_has_wall = state.wall_map.at[fwd_pos[1], fwd_pos[0]].get()\n\t\tfwd_pos_has_goal = jnp.logical_and(fwd_pos[0] == state.goal_pos[0], fwd_pos[1] == state.goal_pos[1])\n\n\t\tfwd_pos_blocked = jnp.logical_or(fwd_pos_has_wall, fwd_pos_has_goal)\n\n\t\tagent_pos_prev = jnp.array(state.agent_pos)\n\t\tagent_pos = (fwd_pos_blocked*state.agent_pos + (~fwd_pos_blocked)*fwd_pos).astype(jnp.uint32)\n\n\t\t# Update agent direction (left_turn or right_turn action)\n\t\tagent_dir_offset = \\\n\t\t\t0 \\\n\t\t\t+ (action == Actions.left)*(-1) \\\n\t\t\t+ (action == Actions.right)*1\n\n\t\tagent_dir_idx = (state.agent_dir_idx + agent_dir_offset) % 4\n\t\tagent_dir = DIR_TO_VEC[agent_dir_idx]\n\n\t\t# Update agent component in maze_map\n\t\tempty = jnp.array([OBJECT_TO_INDEX['empty'], 0, 0], dtype=jnp.uint8)\n\t\tagent = jnp.array([OBJECT_TO_INDEX['agent'], COLOR_TO_INDEX['red'], agent_dir_idx], dtype=jnp.uint8)\n\t\tpadding = self.obs_shape[0]-1\n\t\tmaze_map = state.maze_map\n\t\tmaze_map = maze_map.at[padding+agent_pos_prev[1],padding+agent_pos_prev[0],:].set(empty)\n\t\tmaze_map = maze_map.at[padding+agent_pos[1],padding+agent_pos[0],:].set(agent)\n\n\t\t# Return reward\n\t\t# rng = jax.random.PRNGKey(agent_dir_idx + agent_pos[0] + agent_pos[1])\n\t\t# rand_reward = jax.random.uniform(rng)\n\t\treward = (1.0 - 0.9*((state.time+1)/params.max_episode_steps))*fwd_pos_has_goal # rand_reward\n\n\t\treturn (\n\t\t\tstate.replace(\n\t\t\t\tagent_pos=agent_pos,\n\t\t\t\tagent_dir_idx=agent_dir_idx,\n\t\t\t\tagent_dir=agent_dir,\n\t\t\t\tmaze_map=maze_map,\t\n\t\t\t\tterminal=fwd_pos_has_goal),\n\t\t\treward\n\t\t)\n\n\tdef is_terminal(self, state: EnvState) -> bool:\n\t\t\"\"\"Check whether state is terminal.\"\"\"\n\t\tdone_steps = state.time >= self.params.max_episode_steps\n\t\treturn jnp.logical_or(done_steps, state.terminal)\n\n\tdef get_eval_solved_rate_fn(self):\n\t\tdef _fn(ep_stats):\n\t\t\treturn ep_stats['return'] > 0\n\n\t\treturn _fn\n\n\t@property\n\tdef name(self) -> str:\n\t\t\"\"\"Environment name.\"\"\"\n\t\treturn \"Maze\"\n\n\t@property\n\tdef num_actions(self) -> int:\n\t\t\"\"\"Number of actions possible in environment.\"\"\"\n\t\treturn len(self.action_set)\n\n\tdef action_space(self) -> spaces.Discrete:\n\t\t\"\"\"Action space of the environment.\"\"\"\n\t\treturn spaces.Discrete(\n\t\t\tlen(self.action_set),\n\t\t\tdtype=jnp.uint32\n\t\t)\n\n\tdef observation_space(self) -> spaces.Dict:\n\t\t\"\"\"Observation space of the environment.\"\"\"\n\t\tspaces_dict = {\n\t\t\t'image':spaces.Box(0, 255, self.obs_shape),\n\t\t\t'agent_dir': spaces.Discrete(4)\n\t\t}\n\t\tif self.params.obs_agent_pos:\n\t\t\tparams = self.params\n\t\t\th = params.height\n\t\t\tw = params.width\n\t\t\tspaces_dict.update({'agent_pos': spaces.Box(0, max(w, h), (2,), dtype=jnp.uint32)})\n\n\t\treturn spaces.Dict(spaces_dict)\n\n\tdef state_space(self) -> spaces.Dict:\n\t\t\"\"\"State space of the environment.\"\"\"\n\t\tparams = self.params\n\t\th = params.height\n\t\tw = params.width\n\t\tagent_view_size = params.agent_view_size\n\t\treturn spaces.Dict({\n\t\t\t\"agent_pos\": spaces.Box(0, max(w, h), (2,), dtype=jnp.uint32),\n\t\t\t\"agent_dir\": spaces.Discrete(4),\n\t\t\t\"goal_pos\": spaces.Box(0, max(w, h), (2,), dtype=jnp.uint32),\n\t\t\t\"maze_map\": spaces.Box(0, 255, (w + agent_view_size, h + agent_view_size, 3), dtype=jnp.uint32),\n\t\t\t\"time\": spaces.Discrete(params.max_episode_steps),\n\t\t\t\"terminal\": spaces.Discrete(2),\n\t\t})\n\n\tdef max_episode_steps(self) -> int:\n\t\treturn self.params.max_episode_steps\n\n\tdef get_env_metrics(self, state: EnvState) -> dict:\n\t\tn_walls = state.wall_map.sum()\n\t\tshortest_path_length = _graph_util.shortest_path_len(\n\t\t\tstate.wall_map,\n\t\t\tstate.agent_pos,\n\t\t\tstate.goal_pos\n\t\t)\n\n\t\treturn dict(\n\t\t\tn_walls=n_walls,\n\t\t\tshortest_path_length=shortest_path_length,\n\t\t\tpassable=shortest_path_length > 0,\n\t\t)"
},
{
"identifier": "EnvParams",
"path": "src/minimax/envs/maze/maze.py",
"snippet": "class EnvParams:\n\theight: int = 15\n\twidth: int = 15\n\tn_walls: int = 25 \n\tagent_view_size: int = 5\n\treplace_wall_pos: bool = False\n\tsee_through_walls: bool = True\n\tsee_agent: bool = False\n\tnormalize_obs: bool = False\n\tsample_n_walls: bool = False # Sample n_walls uniformly in [0, n_walls]\n\tobs_agent_pos: bool = False\n\tmax_episode_steps: int = 250\n\tsingleton_seed: int = -1,"
},
{
"identifier": "EnvState",
"path": "src/minimax/envs/maze/maze.py",
"snippet": "class EnvState:\n\tagent_pos: chex.Array\n\tagent_dir: chex.Array\n\tagent_dir_idx: int\n\tgoal_pos: chex.Array\n\twall_map: chex.Array\n\tmaze_map: chex.Array\n\ttime: int\n\tterminal: bool"
},
{
"identifier": "Actions",
"path": "src/minimax/envs/maze/maze.py",
"snippet": "class Actions(IntEnum):\n # Turn left, turn right, move forward\n left = 0\n right = 1\n forward = 2\n\n # Pick up an object\n pickup = 3\n # Drop an object\n drop = 4\n # Toggle/activate an object\n toggle = 5\n\n # Done completing task\n done = 6"
}
] | from typing import Tuple, Optional
from flax import struct
from minimax.envs.registration import register
from .common import (
DIR_TO_VEC,
OBJECT_TO_INDEX,
COLOR_TO_INDEX,
make_maze_map,
)
from .maze import (
Maze,
EnvParams,
EnvState,
Actions
)
import jax
import jax.numpy as jnp
import chex | 7,859 | (wall_map, visited_map, vstack, vstack_size), _ = jax.lax.scan(
_scan_step,
(wall_map, visited_map, vstack, vstack_size),
jnp.array(subkeys),
length=max_n_steps
)
# Randomize goal position
all_pos_idx = jnp.arange(height*width)
key, subkey = jax.random.split(key)
goal_mask = ~wall_map.flatten()
goal_pos_idx = jax.random.choice(subkey, all_pos_idx, p=goal_mask)
goal_pos = jnp.array([goal_pos_idx%width, goal_pos_idx//width])
# Randomize agent position
key, subkey = jax.random.split(key)
agent_mask = goal_mask.at[goal_pos_idx].set(False)
agent_pos_idx = jax.random.choice(subkey, all_pos_idx, p=agent_mask)
agent_pos = jnp.array([agent_pos_idx%width, agent_pos_idx//width], dtype=jnp.uint32)
# Randomize agent dir
key, subkey = jax.random.split(key)
agent_dir_idx = jax.random.choice(subkey, 4)
maze_map = make_maze_map(
self.params,
wall_map,
goal_pos,
agent_pos,
agent_dir_idx,
pad_obs=True)
state = EnvState(
agent_pos=agent_pos,
agent_dir=DIR_TO_VEC[agent_dir_idx],
agent_dir_idx=agent_dir_idx,
goal_pos=goal_pos,
wall_map=wall_map,
maze_map=maze_map,
time=0,
terminal=False,
)
return self.get_obs(state), state
class PerfectMazeMedium(PerfectMaze):
def __init__(self, *args, **kwargs):
super().__init__(height=19, width=19, *args, **kwargs)
class PerfectMazeExtraLarge(PerfectMaze):
def __init__(self, *args, **kwargs):
super().__init__(height=101, width=101, *args, **kwargs)
class Memory(MazeSingleton):
def __init__(
self,
height=17,
width=17,
agent_view_size=7,
see_through_walls=True,
see_agent=False,
normalize_obs=False,
obs_agent_pos=False,
max_episode_steps=250,
singleton_seed=-1):
# Generate walls
wall_map = [
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"1 1 1 1 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 1 1 1 1 1 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 0 0 1 0 0 0 0",
"0 0 0 1 1 1 1 1 1 0 1 0 0 0 0",
"1 1 1 1 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0"
]
super().__init__(
wall_map=wall_map,
goal_pos=(9,5),
agent_pos=(0,7),
agent_dir_idx=0,
see_agent=see_agent,
normalize_obs=normalize_obs,
obs_agent_pos=obs_agent_pos,
max_episode_steps=max_episode_steps
)
self.top_pos = jnp.array([9,5], dtype=jnp.uint32)
self.bottom_pos = jnp.array([9,9], dtype=jnp.uint32)
def reset_env(
self,
key: chex.PRNGKey,
) -> Tuple[chex.Array, EnvState]:
params = self.params
height, width = params.height, params.width
agent_pos = jnp.array([0,7], dtype=jnp.uint32)
agent_dir_idx = 0
# Randomly generate a memory location
is_top_goal = jax.random.randint(key, minval=0, maxval=2, shape=(1,), dtype=jnp.uint8)
clue_pos = jnp.array((0,6), dtype=jnp.uint32)
self.goal_pos = is_top_goal*self.top_pos + (1-is_top_goal)*self.bottom_pos
self.distractor_pos = is_top_goal*self.bottom_pos + (1-is_top_goal)*self.top_pos
| """
Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
# ======== Singleton mazes ========
class MazeSingleton(Maze):
def __init__(
self,
height=15,
width=15,
wall_map=None,
goal_pos=None,
agent_pos=None,
agent_dir_idx=None,
agent_view_size=5,
see_through_walls=True,
see_agent=False,
normalize_obs=False,
obs_agent_pos=False,
max_episode_steps=None,
singleton_seed=-1,
):
super().__init__(
height=height,
width=width,
agent_view_size=agent_view_size,
see_through_walls=see_through_walls,
see_agent=see_agent,
normalize_obs=normalize_obs,
obs_agent_pos=obs_agent_pos,
max_episode_steps=max_episode_steps,
singleton_seed=singleton_seed
)
if wall_map is None:
self.wall_map = jnp.zeros((height,width), dtype=jnp.bool_)
else:
self.wall_map = \
jnp.array(
[[int(x) for x in row.split()]
for row in wall_map], dtype=jnp.bool_)
height, width = self.wall_map.shape
if max_episode_steps is None:
max_episode_steps = 2*(height+2)*(width+2) # Match original eval steps
self.goal_pos_choices = None
if goal_pos is None:
self.goal_pos = jnp.array([height, width]) - jnp.ones(2, dtype=jnp.uint32)
elif isinstance(goal_pos, (tuple, list)) \
and isinstance(goal_pos[0], (tuple, list)):
self.goal_pos_choices = jnp.array(goal_pos, dtype=jnp.uint32)
self.goal_pos = goal_pos[0]
else:
self.goal_pos = jnp.array(goal_pos, dtype=jnp.uint32)
if agent_pos is None:
self.agent_pos = jnp.zeros(2, dtype=jnp.uint32)
else:
self.agent_pos = jnp.array(agent_pos, dtype=jnp.uint32)
self.agent_dir_idx = agent_dir_idx
if self.agent_dir_idx is None:
self.agent_dir_idx = 0
self.params = EnvParams(
height=height,
width=width,
agent_view_size=agent_view_size,
see_through_walls=see_through_walls,
see_agent=see_agent,
normalize_obs=normalize_obs,
obs_agent_pos=obs_agent_pos,
max_episode_steps=max_episode_steps,
singleton_seed=-1,
)
self.maze_map = make_maze_map(
self.params,
self.wall_map,
self.goal_pos,
self.agent_pos,
self.agent_dir_idx,
pad_obs=True)
@property
def default_params(self) -> EnvParams:
# Default environment parameters
return EnvParams()
def reset_env(
self,
key: chex.PRNGKey,
) -> Tuple[chex.Array, EnvState]:
if self.agent_dir_idx is None:
key, subkey = jax.random.split(key)
agent_dir_idx = jax.random.choice(subkey, 4)
else:
agent_dir_idx = self.agent_dir_idx
if self.goal_pos_choices is not None:
key, subkey = jax.random.split(key)
goal_pos = jax.random.choice(subkey, self.goal_pos_choices)
maze_map = make_maze_map(
self.params,
self.wall_map,
goal_pos,
self.agent_pos,
agent_dir_idx,
pad_obs=True)
else:
goal_pos = self.goal_pos
maze_map = self.maze_map
state = EnvState(
agent_pos=self.agent_pos,
agent_dir=DIR_TO_VEC[agent_dir_idx],
agent_dir_idx=agent_dir_idx,
goal_pos=goal_pos,
wall_map=self.wall_map,
maze_map=maze_map,
time=0,
terminal=False,
)
return self.get_obs(state), state
# ======== Specific mazes ========
class SixteenRooms(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
"0 0 0 1 0 0 1 0 0 1 0 0 0",
"0 0 0 0 0 0 0 0 0 1 0 0 0",
"0 0 0 1 0 0 1 0 0 0 0 0 0",
"1 0 1 1 1 0 1 1 0 1 1 1 0",
"0 0 0 1 0 0 0 0 0 0 0 0 0",
"0 0 0 0 0 0 1 0 0 1 0 0 0",
"1 1 0 1 0 1 1 0 1 1 1 0 1",
"0 0 0 1 0 0 0 0 0 1 0 0 0",
"0 0 0 1 0 0 1 0 0 0 0 0 0",
"0 1 1 1 1 0 1 1 0 1 0 1 1",
"0 0 0 1 0 0 1 0 0 1 0 0 0",
"0 0 0 0 0 0 1 0 0 0 0 0 0",
"0 0 0 1 0 0 0 0 0 1 0 0 0"
]
goal_pos = (11,11)
agent_pos = (1,1)
agent_dir_idx = 0
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class SixteenRooms2(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
"0 0 0 1 0 0 0 0 0 1 0 0 0",
"0 0 0 0 0 0 1 0 0 1 0 0 0",
"0 0 0 1 0 0 1 0 0 1 0 0 0",
"1 1 1 1 0 1 1 0 1 1 1 0 1",
"0 0 0 1 0 0 1 0 0 0 0 0 0",
"0 0 0 0 0 0 1 0 0 1 0 0 0",
"1 0 1 1 1 1 1 0 1 1 1 1 1",
"0 0 0 1 0 0 1 0 0 1 0 0 0",
"0 0 0 1 0 0 0 0 0 0 0 0 0",
"1 1 0 1 1 0 1 1 0 1 1 1 1",
"0 0 0 1 0 0 1 0 0 1 0 0 0",
"0 0 0 0 0 0 1 0 0 0 0 0 0",
"0 0 0 1 0 0 1 0 0 1 0 0 0"
]
goal_pos = (11,11)
agent_pos = (1,1)
agent_dir_idx = None
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class Labyrinth(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
"0 0 0 0 0 0 0 0 0 0 0 0 0",
"0 1 1 1 1 1 1 1 1 1 1 1 0",
"0 1 0 0 0 0 0 0 0 0 0 1 0",
"0 1 0 1 1 1 1 1 1 1 0 1 0",
"0 1 0 1 0 0 0 0 0 1 0 1 0",
"0 1 0 1 0 1 1 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 0 0 1 0 0 0 1 0 1 0",
"0 1 1 1 1 1 1 1 1 1 0 1 0",
"0 0 0 0 0 1 0 0 0 0 0 1 0",
"1 1 1 1 0 1 0 1 1 1 1 1 0",
"0 0 0 0 0 1 0 0 0 0 0 0 0"
]
goal_pos = (6,6)
agent_pos = (0,12)
agent_dir_idx = 0
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class LabyrinthFlipped(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
'0 0 0 0 0 0 0 0 0 0 0 0 0',
'0 1 1 1 1 1 1 1 1 1 1 1 0',
'0 1 0 0 0 0 0 0 0 0 0 1 0',
'0 1 0 1 1 1 1 1 1 1 0 1 0',
'0 1 0 1 0 0 0 0 0 1 0 1 0',
'0 1 0 1 0 1 1 1 0 1 0 1 0',
'0 1 0 1 0 1 0 1 0 1 0 1 0',
'0 1 0 1 0 1 0 1 0 1 0 1 0',
'0 1 0 1 0 0 0 1 0 0 0 1 0',
'0 1 0 1 1 1 1 1 1 1 1 1 0',
'0 1 0 0 0 0 0 1 0 0 0 0 0',
'0 1 1 1 1 1 0 1 0 1 1 1 1',
'0 0 0 0 0 0 0 1 0 0 0 0 0'
]
goal_pos = (6,6)
agent_pos = (12,12)
agent_dir_idx = 2
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class Labyrinth2(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
"0 1 0 0 0 0 0 0 0 0 0 0 0",
"0 1 0 1 1 1 1 1 1 1 1 1 0",
"0 1 0 1 0 0 0 0 0 0 0 1 0",
"0 1 0 1 0 1 1 1 1 1 0 1 0",
"0 1 0 1 0 1 0 0 0 1 0 1 0",
"0 0 0 1 0 1 0 1 0 1 0 1 0",
"1 1 1 1 0 1 0 1 0 1 0 1 0",
"0 0 0 1 0 1 1 1 0 1 0 1 0",
"0 1 0 1 0 0 0 0 0 1 0 1 0",
"0 1 0 1 1 1 1 1 1 1 0 1 0",
"0 1 0 0 0 0 0 0 0 0 0 1 0",
"0 1 1 1 1 1 1 1 1 1 1 1 0",
"0 0 0 0 0 0 0 0 0 0 0 0 0"
]
goal_pos = (6,6)
agent_pos = (0,0)
agent_dir_idx = None
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class StandardMaze(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
"0 0 0 0 0 1 0 0 0 0 1 0 0",
"0 1 1 1 0 1 1 1 1 0 1 1 0",
"0 1 0 0 0 0 0 0 0 0 0 0 0",
"0 1 1 1 1 1 1 1 1 0 1 1 1",
"0 0 0 0 0 0 0 0 1 0 0 0 0",
"1 1 1 1 1 1 0 1 1 1 1 1 0",
"0 0 0 0 1 0 0 1 0 0 0 0 0",
"0 1 1 0 0 0 1 1 0 1 1 1 1",
"0 0 1 0 1 0 0 1 0 0 0 1 0",
"1 0 1 0 1 1 0 1 1 1 0 1 0",
"1 0 1 0 0 1 0 0 0 1 0 0 0",
"1 0 1 1 0 1 1 1 0 1 1 1 0",
"0 0 0 1 0 0 0 1 0 1 0 0 0"
]
goal_pos = (6,12)
agent_pos = (6,0)
agent_dir_idx = 0
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class StandardMaze2(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
"0 0 0 1 0 1 0 0 0 0 1 0 0",
"0 1 0 1 0 1 1 1 1 0 0 0 1",
"0 1 0 0 0 0 0 0 0 0 1 0 0",
"0 1 1 1 1 1 1 1 1 0 1 1 1",
"0 0 0 1 0 0 1 0 1 0 1 0 0",
"1 1 0 1 0 1 1 0 1 0 1 0 0",
"0 1 0 1 0 0 0 0 1 0 1 1 0",
"0 1 0 1 1 0 1 1 1 0 0 1 0",
"0 1 0 0 1 0 0 1 1 1 0 1 0",
"0 1 1 0 1 1 0 1 0 1 0 1 0",
"0 1 0 0 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 0 0 1 0 0 0 1 0 0 0 0 0"
]
goal_pos = (12,4)
agent_pos = (0,6)
agent_dir_idx = None
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class StandardMaze3(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
"0 0 0 0 1 0 1 0 0 0 0 0 0",
"0 1 1 1 1 0 1 0 1 1 1 1 0",
"0 1 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 1 1 1 1 0 1 0 1 0 1",
"1 1 0 1 0 0 0 0 1 0 1 0 0",
"0 0 0 1 0 1 1 0 1 0 1 1 0",
"0 1 0 1 0 1 0 0 1 0 0 1 0",
"0 1 0 1 0 1 0 1 1 1 0 1 1",
"0 1 0 0 0 1 0 1 0 1 0 0 0",
"0 1 1 1 0 1 0 1 0 1 1 1 0",
"0 1 0 0 0 1 0 1 0 0 0 1 0",
"0 1 0 1 1 1 0 1 0 1 0 1 0",
"0 1 0 0 0 1 0 0 0 1 0 0 0"
]
goal_pos = (12,6)
agent_pos = (3,0)
agent_dir_idx = None
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class SmallCorridor(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
"0 0 0 0 0 0 0 0 0 0 0 0 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 1 1 1 1 1 1 1 1 1 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 0 0 0 0 0 0 0 0 0 0 0 0"
]
goal_pos = [
(2,5),(4,5),(6,5),(8,5),(10,5),
(2,7),(4,7),(6,7),(8,7),(10,7),
]
agent_pos = (0,6)
agent_dir_idx = None
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class LargeCorridor(MazeSingleton):
def __init__(
self,
see_agent=False,
normalize_obs=False):
wall_map = [
"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0",
"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0",
]
goal_pos = [
(2,8),(4,8),(6,8),(8,8),(10,8),(12,8),(14,8),(16,8),
(2,10),(4,10),(6,10),(8,10),(10,10),(12,10),(14,10),(16,10)
]
agent_pos = (0,9)
agent_dir_idx = None
super().__init__(
wall_map=wall_map,
goal_pos=goal_pos,
agent_pos=agent_pos,
agent_dir_idx=agent_dir_idx,
see_agent=see_agent,
normalize_obs=normalize_obs
)
class FourRooms(Maze):
def __init__(
self,
height=17,
width=17,
agent_view_size=5,
see_through_walls=True,
see_agent=False,
normalize_obs=False,
max_episode_steps=250,
singleton_seed=-1):
super().__init__(
height=height,
width=width,
agent_view_size=agent_view_size,
see_through_walls=see_through_walls,
see_agent=see_agent,
normalize_obs=normalize_obs,
max_episode_steps=max_episode_steps,
singleton_seed=singleton_seed
)
assert height % 2 == 1 and width % 2 == 1, \
'Grid height and width must be odd'
wall_map = jnp.zeros((height, width), dtype=jnp.bool_)
wall_map = wall_map.at[height//2, :].set(True)
wall_map = wall_map.at[:, width//2].set(True)
self.wall_map = wall_map
self.room_h = height//2
self.room_w = width//2
self.all_pos_idxs = jnp.arange(height*width)
self.goal_pos_mask = (~wall_map).flatten()
self.agent_pos_mask = self.goal_pos_mask
def reset_env(
self,
key: chex.PRNGKey
) -> Tuple[chex.Array, EnvState]:
# Randomize door positions
params = self.params
key, x_rng, y_rng = jax.random.split(key,3)
x_door_idxs = jax.random.randint(x_rng, (2,), 0, self.room_w) \
+ jnp.array([0, self.room_w+1], dtype=jnp.uint32)
y_door_idxs = jax.random.randint(y_rng, (2,), 0, self.room_h) \
+ jnp.array([0, self.room_h+1], dtype=jnp.uint32)
wall_map = self.wall_map.at[self.room_h, x_door_idxs].set(False)
wall_map = wall_map.at[y_door_idxs,self.room_w].set(False)
# Randomize goal pos
key, subkey = jax.random.split(key)
goal_pos_idx = jax.random.choice(subkey, self.all_pos_idxs, shape=(), p=self.goal_pos_mask)
goal_pos = jnp.array([goal_pos_idx%params.width, goal_pos_idx//params.width], dtype=jnp.uint32)
# Randomize agent pos
key, subkey = jax.random.split(key)
agent_pos_mask = self.agent_pos_mask.at[goal_pos_idx].set(False)
agent_pos_idx = jax.random.choice(subkey, self.all_pos_idxs, shape=(), p=self.agent_pos_mask)
agent_pos = jnp.array([agent_pos_idx%params.width, agent_pos_idx//params.width], dtype=jnp.uint32)
key, subkey = jax.random.split(key)
agent_dir_idx = jax.random.choice(subkey, 4)
maze_map = make_maze_map(
self.params,
wall_map,
goal_pos,
agent_pos,
agent_dir_idx,
pad_obs=True)
state = EnvState(
agent_pos=agent_pos,
agent_dir=DIR_TO_VEC[agent_dir_idx],
agent_dir_idx=agent_dir_idx,
goal_pos=goal_pos,
wall_map=wall_map,
maze_map=maze_map,
time=0,
terminal=False,
)
return self.get_obs(state), state
class Crossing(Maze):
def __init__(
self,
height=9,
width=9,
n_crossings=5,
agent_view_size=5,
see_through_walls=True,
see_agent=False,
normalize_obs=False,
max_episode_steps=250,
singleton_seed=-1):
self.n_crossings = n_crossings
max_episode_steps = 4*(height+2)*(width+2)
super().__init__(
height=height,
width=width,
agent_view_size=agent_view_size,
see_through_walls=see_through_walls,
see_agent=see_agent,
normalize_obs=normalize_obs,
max_episode_steps=max_episode_steps,
singleton_seed=singleton_seed
)
def reset_env(
self,
key: chex.PRNGKey
) -> Tuple[chex.Array, EnvState]:
params = self.params
height, width = params.height, params.width
goal_pos = jnp.array([width-1, height-1])
agent_pos = jnp.array([0,0], dtype=jnp.uint32)
agent_dir_idx = 0
# Generate walls
wall_map = jnp.zeros((height, width), dtype=jnp.bool_)
row_y_choices = jnp.arange(1,height-1,2)
col_x_choices = jnp.arange(1,width-1,2)
rng, subrng = jax.random.split(key)
dirs = jax.random.permutation(
subrng,
jnp.concatenate(
(jnp.zeros(len(row_y_choices)),
jnp.ones(len(col_x_choices)))
)
)[:self.n_crossings]
n_v = sum(dirs.astype(jnp.uint32))
n_h = len(dirs) - n_v
rng, row_rng, col_rng = jax.random.split(rng, 3)
row_ys_mask = jax.random.permutation(row_rng, (jnp.arange(len(row_y_choices)) < n_v).repeat(2))
if height % 2 == 0:
row_ys_mask = jnp.concatenate((row_ys_mask, jnp.zeros(2)))
else:
row_ys_mask = jnp.concatenate((row_ys_mask, jnp.zeros(1)))
row_ys_mask = jnp.logical_and(
jnp.zeros(height, dtype=jnp.bool_).at[row_y_choices].set(True),
row_ys_mask
)
col_xs_mask = jax.random.permutation(col_rng, (jnp.arange(len(col_x_choices)) < n_h).repeat(2))
if width % 2 == 0:
col_xs_mask = jnp.concatenate((col_xs_mask, jnp.zeros(2)))
else:
col_xs_mask = jnp.concatenate((col_xs_mask, jnp.zeros(1)))
col_xs_mask = jnp.logical_and(
jnp.zeros(width, dtype=jnp.bool_).at[col_x_choices].set(True),
col_xs_mask
)
wall_map = jnp.logical_or(
wall_map,
jnp.tile(jnp.expand_dims(row_ys_mask,-1), (1,width))
)
wall_map = jnp.logical_or(
wall_map,
jnp.tile(jnp.expand_dims(col_xs_mask,0), (height,1))
)
# Generate wall openings
def _scan_step(carry, rng):
wall_map, pos, passed_wall, last_dir, last_dir_idx = carry
dir_idx = jax.random.randint(rng,(),0,2)
go_dir = (~passed_wall)*DIR_TO_VEC[dir_idx] + passed_wall*last_dir
next_pos = pos + go_dir
# If next pos is the right border, force direction to be down
collide = jnp.logical_or(
(next_pos[0] >= width),
(next_pos[1] >= height)
)
go_dir = collide*DIR_TO_VEC[(dir_idx+1)%2] + (~collide)*go_dir
dir_idx = (dir_idx+1)%2 + (~collide)*dir_idx
next_pos = collide*(pos + go_dir) + (~collide)*next_pos
last_dir = go_dir
last_dir_idx = dir_idx
pos = next_pos
passed_wall = wall_map[pos[1],pos[0]]
wall_map = wall_map.at[pos[1], pos[0]].set(False)
return (wall_map, pos.astype(jnp.uint32), passed_wall, last_dir, last_dir_idx), None
n_steps_to_goal = width + height - 2
rng, *subrngs = jax.random.split(rng, n_steps_to_goal+1)
pos = agent_pos
passed_wall = jnp.array(False)
last_dir = DIR_TO_VEC[0]
(wall_map, pos, passed_wall, last_dir, last_dir_idx), _ = jax.lax.scan(
_scan_step,
(wall_map, pos, passed_wall, last_dir, 0),
jnp.array(subrngs),
length=n_steps_to_goal
)
maze_map = make_maze_map(
self.params,
wall_map,
goal_pos,
agent_pos,
agent_dir_idx,
pad_obs=True)
state = EnvState(
agent_pos=agent_pos,
agent_dir=DIR_TO_VEC[agent_dir_idx],
agent_dir_idx=agent_dir_idx,
goal_pos=goal_pos,
wall_map=wall_map,
maze_map=maze_map,
time=0,
terminal=False,
)
return self.get_obs(state), state
NEIGHBOR_WALL_OFFSETS = jnp.array([
[1,0], # right
[0,1], # bottom
[-1,0], # left
[0,-1], # top
[0,0] # self
], dtype=jnp.int32)
class PerfectMaze(Maze):
def __init__(
self,
height=13,
width=13,
agent_view_size=5,
see_through_walls=True,
see_agent=False,
normalize_obs=False,
max_episode_steps=250,
singleton_seed=-1):
assert height % 2 == 1 and width % 2 == 1, \
'Maze dimensions must be odd.'
max_episode_steps = 2*(width+2)*(height+2)
super().__init__(
height=height,
width=width,
agent_view_size=agent_view_size,
see_through_walls=see_through_walls,
see_agent=see_agent,
normalize_obs=normalize_obs,
max_episode_steps=max_episode_steps,
singleton_seed=singleton_seed
)
def reset_env(
self,
key: chex.PRNGKey
) -> Tuple[chex.Array, EnvState]:
"""
Generate a perfect maze using an iterated search procedure.
"""
params = self.params
height, width = self.params.height, self.params.width
n_tiles = height*width
# Track maze wall map
wall_map = jnp.ones((height, width), dtype=jnp.bool_)
# Track visited, walkable tiles
_h = height//2+1
_w = width//2+1
visited_map = jnp.zeros((_h, _w), dtype=jnp.bool_)
vstack = jnp.zeros((_h*_w, 2), dtype=jnp.uint32)
vstack_size = 0
# Get initial start tile in walkable index
key, subkey = jax.random.split(key)
start_pos_x = jax.random.randint(subkey, (), 0, _w)
start_pos_y = jax.random.randint(subkey, (), 0, _h)
start_pos = jnp.array([start_pos_x,start_pos_y], dtype=jnp.uint32)
# Set initial start tile as visited
visited_map = visited_map.at[
start_pos[1],start_pos[0]
].set(True)
wall_map = wall_map.at[
2*start_pos[1],2*start_pos[0]
].set(False)
vstack = vstack.at[vstack_size:vstack_size+2].set(start_pos)
vstack_size += 2
def _scan_step(carry, key):
# Choose last visited tile and move to a neighbor
wall_map, visited_map, vstack, vstack_size = carry
abs_pos = 2*vstack[vstack_size-1]
neighbor_wall_offsets = NEIGHBOR_WALL_OFFSETS.at[-1].set(
vstack[vstack_size-2] - vstack[vstack_size-1]
)
# Find a random unvisited neighbor
neighbor_pos = \
jnp.minimum(
jnp.maximum(
jnp.tile(abs_pos, (len(NEIGHBOR_WALL_OFFSETS),1)) \
+ 2*neighbor_wall_offsets, 0
),
jnp.array([width, height], dtype=jnp.uint32)
)
# Check for unvisited neighbors. Set self to unvisited if all visited.
neighbor_visited = visited_map.at[
neighbor_pos[:,1]//2, neighbor_pos[:,0]//2
].get()
n_neighbor_visited = neighbor_visited[:4].sum()
all_visited = n_neighbor_visited == 4
all_visited_post = n_neighbor_visited >= 3
neighbor_visited = neighbor_visited.at[-1].set(~all_visited)
# Choose a random unvisited neigbor and remove walls between current tile
# and this neighbor and at this neighbor.
rand_neighbor_idx = jax.random.choice(
key, jnp.arange(len(NEIGHBOR_WALL_OFFSETS)), p=~neighbor_visited)
rand_neighbor_pos = neighbor_pos[rand_neighbor_idx]
rand_neighbor_wall_pos = abs_pos + (~all_visited)*neighbor_wall_offsets[rand_neighbor_idx]
remove_wall_pos = jnp.concatenate(
(jnp.expand_dims(rand_neighbor_pos, 0),
jnp.expand_dims(rand_neighbor_wall_pos,0)), 0)
wall_map = wall_map.at[
remove_wall_pos[:,1], remove_wall_pos[:,0]
].set(False)
# Set selected neighbor as visited
visited_map = visited_map.at[
rand_neighbor_pos[1]//2,rand_neighbor_pos[0]//2
].set(True)
# Pop current tile from stack if all neighbors have been visited
vstack_size -= all_visited_post
# Push selected neighbor onto stack
vstack = vstack.at[vstack_size].set(
rand_neighbor_pos//2
)
vstack_size += ~all_visited
return (wall_map, visited_map, vstack, vstack_size), None
# for i in range(3*_h*_w):
max_n_steps = 2*_w*_h
key, *subkeys = jax.random.split(key, max_n_steps+1)
(wall_map, visited_map, vstack, vstack_size), _ = jax.lax.scan(
_scan_step,
(wall_map, visited_map, vstack, vstack_size),
jnp.array(subkeys),
length=max_n_steps
)
# Randomize goal position
all_pos_idx = jnp.arange(height*width)
key, subkey = jax.random.split(key)
goal_mask = ~wall_map.flatten()
goal_pos_idx = jax.random.choice(subkey, all_pos_idx, p=goal_mask)
goal_pos = jnp.array([goal_pos_idx%width, goal_pos_idx//width])
# Randomize agent position
key, subkey = jax.random.split(key)
agent_mask = goal_mask.at[goal_pos_idx].set(False)
agent_pos_idx = jax.random.choice(subkey, all_pos_idx, p=agent_mask)
agent_pos = jnp.array([agent_pos_idx%width, agent_pos_idx//width], dtype=jnp.uint32)
# Randomize agent dir
key, subkey = jax.random.split(key)
agent_dir_idx = jax.random.choice(subkey, 4)
maze_map = make_maze_map(
self.params,
wall_map,
goal_pos,
agent_pos,
agent_dir_idx,
pad_obs=True)
state = EnvState(
agent_pos=agent_pos,
agent_dir=DIR_TO_VEC[agent_dir_idx],
agent_dir_idx=agent_dir_idx,
goal_pos=goal_pos,
wall_map=wall_map,
maze_map=maze_map,
time=0,
terminal=False,
)
return self.get_obs(state), state
class PerfectMazeMedium(PerfectMaze):
def __init__(self, *args, **kwargs):
super().__init__(height=19, width=19, *args, **kwargs)
class PerfectMazeExtraLarge(PerfectMaze):
def __init__(self, *args, **kwargs):
super().__init__(height=101, width=101, *args, **kwargs)
class Memory(MazeSingleton):
def __init__(
self,
height=17,
width=17,
agent_view_size=7,
see_through_walls=True,
see_agent=False,
normalize_obs=False,
obs_agent_pos=False,
max_episode_steps=250,
singleton_seed=-1):
# Generate walls
wall_map = [
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"1 1 1 1 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 1 1 1 1 1 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 0 0 1 0 0 0 0",
"0 0 0 1 1 1 1 1 1 0 1 0 0 0 0",
"1 1 1 1 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0",
"0 0 0 0 0 0 0 0 1 0 1 0 0 0 0"
]
super().__init__(
wall_map=wall_map,
goal_pos=(9,5),
agent_pos=(0,7),
agent_dir_idx=0,
see_agent=see_agent,
normalize_obs=normalize_obs,
obs_agent_pos=obs_agent_pos,
max_episode_steps=max_episode_steps
)
self.top_pos = jnp.array([9,5], dtype=jnp.uint32)
self.bottom_pos = jnp.array([9,9], dtype=jnp.uint32)
def reset_env(
self,
key: chex.PRNGKey,
) -> Tuple[chex.Array, EnvState]:
params = self.params
height, width = params.height, params.width
agent_pos = jnp.array([0,7], dtype=jnp.uint32)
agent_dir_idx = 0
# Randomly generate a memory location
is_top_goal = jax.random.randint(key, minval=0, maxval=2, shape=(1,), dtype=jnp.uint8)
clue_pos = jnp.array((0,6), dtype=jnp.uint32)
self.goal_pos = is_top_goal*self.top_pos + (1-is_top_goal)*self.bottom_pos
self.distractor_pos = is_top_goal*self.bottom_pos + (1-is_top_goal)*self.top_pos
| goal_color = is_top_goal*COLOR_TO_INDEX['red'] + (1-is_top_goal)*COLOR_TO_INDEX['green'] | 2 | 2023-10-28 12:12:01+00:00 | 12k |
innnky/ar-vits | s2_train.py | [
{
"identifier": "commons",
"path": "module/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef convert_pad_shape(pad_shape):\ndef intersperse(lst, item):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_like(x):\ndef slice_segments(x, ids_str, segment_size=4):\ndef rand_slice_segments(x, x_lengths=None, segment_size=4):\ndef get_timing_signal_1d(\n length, channels, min_timescale=1.0, max_timescale=1.0e4):\ndef add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\ndef cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):\ndef subsequent_mask(length):\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\ndef convert_pad_shape(pad_shape):\ndef shift_1d(x):\ndef sequence_mask(length, max_length=None):\ndef generate_path(duration, mask):\ndef clip_grad_value_(parameters, clip_value, norm_type=2):\ndef squeeze(x, x_mask=None, n_sqz=2):\ndef unsqueeze(x, x_mask=None, n_sqz=2):"
},
{
"identifier": "TextAudioSpeakerLoader",
"path": "module/data_utils.py",
"snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams, get_path=False, meta=None, val=False, phoneme_path='dump/phoneme.npy'):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.val = val\n\n self.get_path = get_path\n self.meta = meta\n self.phoneme_data = np.load(phoneme_path, allow_pickle=True).item()\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n if self.get_path:\n total_process, current_process = self.meta\n audiopaths_sid_text_new = []\n for idx, item in enumerate(self.audiopaths_sid_text):\n if idx % total_process == current_process:\n audiopaths_sid_text_new.append(item)\n self.audiopaths_sid_text = audiopaths_sid_text_new\n\n print(\"phoneme_data_len:\", len(self.phoneme_data.keys()))\n print(\"wav_data_len:\", len(self.audiopaths_sid_text))\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n for item in tqdm(self.audiopaths_sid_text):\n audiopath = item[0]\n try:\n phoneme = self.phoneme_data[audiopath]\n phoneme = phoneme.split(' ')\n phoneme_ids = cleaned_text_to_sequence(phoneme)\n except Exception:\n print(f\"{audiopath} not in self.phoneme_data !\")\n skipped += 1\n continue\n\n sslpath = audiopath.replace('.wav', '.ssl.pt')\n if os.path.exists(audiopath) and os.path.exists(sslpath) and (os.path.getsize(audiopath) / self.sampling_rate /2 > 0.6 or self.val):\n audiopaths_sid_text_new.append([audiopath, phoneme_ids])\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n print(\"skipped: \", skipped, \", total: \", len(self.audiopaths_sid_text))\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, phoneme_ids = audiopath_sid_text\n text = torch.FloatTensor(phoneme_ids)\n # bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language,audiopath)\n try:\n spec, wav = self.get_audio(audiopath)\n except:\n spec = torch.zeros(1025, 100)\n wav = torch.zeros(1, 100*self.hop_length)\n print(\"load audio error!!!!!!\", audiopath)\n ssl = torch.load(audiopath.replace(\".wav\", \".ssl.pt\")).float()\n ssl = F.interpolate(ssl, size=spec.shape[-1], mode=\"nearest\")\n if self.get_path:\n return (ssl, spec, wav, audiopath)\n return (ssl, spec, wav, text)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n audio_norm = audio\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if os.path.exists(spec_filename):\n spec = torch.load(spec_filename)\n else:\n spec = spectrogram_torch(audio_norm, self.filter_length,\n self.sampling_rate, self.hop_length, self.win_length,\n center=False)\n spec = torch.squeeze(spec, 0)\n # torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)\n\n def random_slice(self, ssl, wav, mel):\n assert abs(ssl.shape[-1]- wav.shape[-1]//self.hop_length) < 3, (\"first\", ssl.shape, wav.shape)\n\n len_mel = mel.shape[1]\n if self.val:\n reference_mel = mel[:, :len_mel//3]\n return reference_mel, ssl, wav, mel\n dir = random.randint(0, 1)\n sep_point = random.randint(int(len_mel//3), int(len_mel//3*2))\n\n if dir == 0:\n reference_mel = mel[:, :sep_point]\n ssl = ssl[:, :, sep_point:]\n wav2 = wav[:, sep_point*self.hop_length:]\n mel = mel[:, sep_point:]\n else:\n reference_mel = mel[:, sep_point:]\n ssl = ssl[:, :, :sep_point]\n wav2 = wav[:, :sep_point*self.hop_length]\n mel = mel[:, :sep_point]\n\n assert abs(ssl.shape[-1]- wav2.shape[-1]//self.hop_length) < 3, (ssl.shape, wav.shape,wav2.shape, mel.shape, sep_point,self.hop_length, sep_point*self.hop_length, dir)\n return reference_mel, ssl, wav2, mel"
},
{
"identifier": "TextAudioSpeakerCollate",
"path": "module/data_utils.py",
"snippet": "class TextAudioSpeakerCollate():\n \"\"\" Zero-pads model inputs and targets\n \"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]),\n dim=0, descending=True)\n\n max_ssl_len = max([x[0].size(2) for x in batch])\n max_ssl_len = int(2 * ((max_ssl_len // 2) + 1))\n max_spec_len = max([x[1].size(1) for x in batch])\n max_spec_len = int(2 * ((max_spec_len // 2) + 1))\n max_wav_len = max([x[2].size(1) for x in batch])\n max_text_len = max([x[3].size(0) for x in batch])\n\n ssl_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n text_lengths = torch.LongTensor(len(batch))\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n ssl_padded = torch.FloatTensor(len(batch), batch[0][0].size(1), max_ssl_len)\n text_padded = torch.LongTensor(len(batch), max_text_len)\n\n spec_padded.zero_()\n wav_padded.zero_()\n ssl_padded.zero_()\n text_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n ssl = row[0]\n ssl_padded[i, :, :ssl.size(2)] = ssl[0, :, :]\n ssl_lengths[i] = ssl.size(2)\n\n spec = row[1]\n spec_padded[i, :, :spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, :wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n text = row[3]\n text_padded[i, :text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n\n return ssl_padded, ssl_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, text_padded, text_lengths"
},
{
"identifier": "DistributedBucketSampler",
"path": "module/data_utils.py",
"snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]\n\n # subsample\n ids_bucket = ids_bucket[self.rank::self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size"
},
{
"identifier": "SynthesizerTrn",
"path": "module/models.py",
"snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=0,\n gin_channels=0,\n use_sdp=True,\n semantic_frame_rate=None,\n freeze_quantizer=None,\n **kwargs):\n\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n\n self.use_sdp = use_sdp\n self.enc_p = TextEncoder(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout)\n self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,\n upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)\n self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,\n gin_channels=gin_channels)\n self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)\n\n self.ref_enc = modules.MelStyleEncoder(spec_channels, style_vector_dim=gin_channels)\n\n ssl_dim = 768\n assert semantic_frame_rate in ['25hz', \"50hz\"]\n self.semantic_frame_rate = semantic_frame_rate\n if semantic_frame_rate == '25hz':\n self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 2, stride=2)\n else:\n self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 1, stride=1)\n\n self.quantizer = ResidualVectorQuantizer(\n dimension=ssl_dim,\n n_q=1,\n bins=1024\n )\n if freeze_quantizer:\n self.ssl_proj.requires_grad_(False)\n self.quantizer.requires_grad_(False)\n\n def forward(self, ssl, y, y_lengths, text, text_lengths):\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, y.size(2)), 1).to(y.dtype)\n ge = self.ref_enc(y * y_mask, y_mask)\n\n from torch.cuda.amp import autocast\n with autocast(enabled=False):\n ssl = self.ssl_proj(ssl)\n quantized, codes, commit_loss, quantized_list = self.quantizer(ssl, layers=[0])\n\n if self.semantic_frame_rate == '25hz':\n quantized = F.interpolate(quantized, size=int(quantized.shape[-1] * 2), mode=\"nearest\")\n\n x, m_p, logs_p, y_mask = self.enc_p(quantized, y_lengths, text, text_lengths, ge)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=ge)\n z_p = self.flow(z, y_mask, g=ge)\n\n z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)\n o = self.dec(z_slice, g=ge)\n return o, commit_loss, ids_slice, y_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), quantized\n\n def infer(self, ssl, y, y_lengths, text, text_lengths, test=None, noise_scale=0.5):\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, y.size(2)), 1).to(y.dtype)\n ge = self.ref_enc(y * y_mask, y_mask)\n\n ssl = self.ssl_proj(ssl)\n quantized, codes, commit_loss, _ = self.quantizer(ssl, layers=[0])\n if self.semantic_frame_rate == '25hz':\n quantized = F.interpolate(quantized, size=int(quantized.shape[-1] * 2), mode=\"nearest\")\n\n x, m_p, logs_p, y_mask = self.enc_p(quantized, y_lengths, text, text_lengths, ge, test=test)\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n\n z = self.flow(z_p, y_mask, g=ge, reverse=True)\n\n o = self.dec((z * y_mask)[:, :, :], g=ge)\n return o,y_mask, (z, z_p, m_p, logs_p)\n\n\n @torch.no_grad()\n def decode(self, codes,text, refer, noise_scale=0.5):\n refer_lengths = torch.LongTensor([refer.size(2)]).to(refer.device)\n refer_mask = torch.unsqueeze(commons.sequence_mask(refer_lengths, refer.size(2)), 1).to(refer.dtype)\n ge = self.ref_enc(refer * refer_mask, refer_mask)\n\n y_lengths = torch.LongTensor([codes.size(2)*2]).to(codes.device)\n text_lengths = torch.LongTensor([text.size(-1)]).to(text.device)\n\n quantized = self.quantizer.decode(codes)\n if self.semantic_frame_rate == '25hz':\n quantized = F.interpolate(quantized, size=int(quantized.shape[-1] * 2), mode=\"nearest\")\n\n x, m_p, logs_p, y_mask = self.enc_p(quantized, y_lengths, text, text_lengths, ge)\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n\n z = self.flow(z_p, y_mask, g=ge, reverse=True)\n\n o = self.dec((z * y_mask)[:, :, :], g=ge)\n return o\n\n def extract_latent(self, x):\n ssl = self.ssl_proj(x)\n quantized, codes, commit_loss, quantized_list = self.quantizer(ssl)\n return codes.transpose(0,1)"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "module/models.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "generator_loss",
"path": "module/losses.py",
"snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses"
},
{
"identifier": "discriminator_loss",
"path": "module/losses.py",
"snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses"
},
{
"identifier": "feature_loss",
"path": "module/losses.py",
"snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2 "
},
{
"identifier": "kl_loss",
"path": "module/losses.py",
"snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l"
},
{
"identifier": "mel_spectrogram_torch",
"path": "module/mel_processing.py",
"snippet": "def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):\n if torch.min(y) < -1.:\n print('min value is ', torch.min(y))\n if torch.max(y) > 1.:\n print('max value is ', torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + '_' + str(y.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n wnsize_dtype_device = str(win_size) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],\n center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "spec_to_mel_torch",
"path": "module/mel_processing.py",
"snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + '_' + str(spec.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec"
}
] | import os
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
import logging
import utils
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda.amp import autocast, GradScaler
from tqdm import tqdm
from module import commons
from module.data_utils import (
TextAudioSpeakerLoader,
TextAudioSpeakerCollate,
DistributedBucketSampler
)
from module.models import (
SynthesizerTrn,
MultiPeriodDiscriminator,
)
from module.losses import (
generator_loss,
discriminator_loss,
feature_loss,
kl_loss
)
from module.mel_processing import mel_spectrogram_torch, spec_to_mel_torch | 8,211 | global global_step
if rank == 0:
logger = utils.get_logger(hps.s2_ckpt_dir)
logger.info(hps)
utils.check_git_hash(hps.s2_ckpt_dir)
writer = SummaryWriter(log_dir=hps.s2_ckpt_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval"))
dist.init_process_group(backend='gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus,
rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32, 300, 400, 500, 600, 700, 800, 900, 1000],
num_replicas=n_gpus,
rank=rank,
shuffle=True)
collate_fn = TextAudioSpeakerCollate()
train_loader = DataLoader(train_dataset, num_workers=6, shuffle=False, pin_memory=True,
collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True)
if rank == 0:
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, val=True)
eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
batch_size=1, pin_memory=True,
drop_last=False, collate_fn=collate_fn)
net_g = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model).cuda(rank)
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
for name, param in net_g.named_parameters():
if not param.requires_grad:
print(name,"not requires_grad")
optim_g = torch.optim.AdamW(
filter(lambda p: p.requires_grad, net_g.parameters()),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
optim_d = torch.optim.AdamW(
net_d.parameters(),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
net_g = DDP(net_g, device_ids=[rank])
net_d = DDP(net_d, device_ids=[rank])
pretrain_dir = hps.pretrain
if pretrain_dir is None:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.s2_ckpt_dir, "G_*.pth"), net_g,
optim_g, False)
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.s2_ckpt_dir, "D_*.pth"), net_d,
optim_d, False)
epoch_str = max(epoch_str, 1)
global_step = (epoch_str - 1) * len(train_loader)
else:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g,
optim_g, True)
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d,
optim_d, True)
epoch_str = 1
global_step = 0
if hps.resume_step != None:
global_step = hps.resume_step
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
scaler = GradScaler(enabled=hps.train.fp16_run)
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank == 0:
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
[train_loader, eval_loader], logger, [writer, writer_eval])
else:
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
[train_loader, None], None, None)
scheduler_g.step()
scheduler_d.step()
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
net_g, net_d = nets
optim_g, optim_d = optims
scheduler_g, scheduler_d = schedulers
train_loader, eval_loader = loaders
if writers is not None:
writer, writer_eval = writers
train_loader.batch_sampler.set_epoch(epoch)
global global_step
net_g.train()
net_d.train()
for batch_idx, (ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths) in tqdm(enumerate(train_loader)):
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
ssl = ssl.cuda(rank, non_blocking=True)
ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True)
text, text_lengths = text.cuda(rank, non_blocking=True), text_lengths.cuda(rank, non_blocking=True)
with autocast(enabled=hps.train.fp16_run):
y_hat, kl_ssl, ids_slice, x_mask, z_mask, \
(z, z_p, m_p, logs_p, m_q, logs_q), stats_ssl = net_g(ssl, spec, spec_lengths, text, text_lengths)
mel = spec_to_mel_torch(
spec,
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.mel_fmin,
hps.data.mel_fmax)
| logging.getLogger("matplotlib").setLevel(logging.INFO)
logging.getLogger("h5py").setLevel(logging.INFO)
logging.getLogger("numba").setLevel(logging.INFO)
torch.backends.cudnn.benchmark = True
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '8000'
hps = utils.get_hparams(stage=2)
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
def run(rank, n_gpus, hps):
global global_step
if rank == 0:
logger = utils.get_logger(hps.s2_ckpt_dir)
logger.info(hps)
utils.check_git_hash(hps.s2_ckpt_dir)
writer = SummaryWriter(log_dir=hps.s2_ckpt_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval"))
dist.init_process_group(backend='gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus,
rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
train_sampler = DistributedBucketSampler(
train_dataset,
hps.train.batch_size,
[32, 300, 400, 500, 600, 700, 800, 900, 1000],
num_replicas=n_gpus,
rank=rank,
shuffle=True)
collate_fn = TextAudioSpeakerCollate()
train_loader = DataLoader(train_dataset, num_workers=6, shuffle=False, pin_memory=True,
collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True)
if rank == 0:
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, val=True)
eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
batch_size=1, pin_memory=True,
drop_last=False, collate_fn=collate_fn)
net_g = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model).cuda(rank)
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
for name, param in net_g.named_parameters():
if not param.requires_grad:
print(name,"not requires_grad")
optim_g = torch.optim.AdamW(
filter(lambda p: p.requires_grad, net_g.parameters()),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
optim_d = torch.optim.AdamW(
net_d.parameters(),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps)
net_g = DDP(net_g, device_ids=[rank])
net_d = DDP(net_d, device_ids=[rank])
pretrain_dir = hps.pretrain
if pretrain_dir is None:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.s2_ckpt_dir, "G_*.pth"), net_g,
optim_g, False)
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.s2_ckpt_dir, "D_*.pth"), net_d,
optim_d, False)
epoch_str = max(epoch_str, 1)
global_step = (epoch_str - 1) * len(train_loader)
else:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g,
optim_g, True)
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d,
optim_d, True)
epoch_str = 1
global_step = 0
if hps.resume_step != None:
global_step = hps.resume_step
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
scaler = GradScaler(enabled=hps.train.fp16_run)
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank == 0:
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
[train_loader, eval_loader], logger, [writer, writer_eval])
else:
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
[train_loader, None], None, None)
scheduler_g.step()
scheduler_d.step()
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
net_g, net_d = nets
optim_g, optim_d = optims
scheduler_g, scheduler_d = schedulers
train_loader, eval_loader = loaders
if writers is not None:
writer, writer_eval = writers
train_loader.batch_sampler.set_epoch(epoch)
global global_step
net_g.train()
net_d.train()
for batch_idx, (ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths) in tqdm(enumerate(train_loader)):
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
ssl = ssl.cuda(rank, non_blocking=True)
ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True)
text, text_lengths = text.cuda(rank, non_blocking=True), text_lengths.cuda(rank, non_blocking=True)
with autocast(enabled=hps.train.fp16_run):
y_hat, kl_ssl, ids_slice, x_mask, z_mask, \
(z, z_p, m_p, logs_p, m_q, logs_q), stats_ssl = net_g(ssl, spec, spec_lengths, text, text_lengths)
mel = spec_to_mel_torch(
spec,
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.mel_fmin,
hps.data.mel_fmax) | y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) | 0 | 2023-10-30 04:40:19+00:00 | 12k |
nv-tlabs/vid2player3d | vid2player/utils/torch_transform.py | [
{
"identifier": "quaternion_to_angle_axis",
"path": "vid2player/utils/konia_transform.py",
"snippet": "@torch.jit.script\ndef quaternion_to_angle_axis(\n quaternion: torch.Tensor, eps: float = 1.0e-6, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ\n) -> torch.Tensor:\n \"\"\"Convert quaternion vector to angle axis of rotation.\n\n The quaternion should be in (x, y, z, w) or (w, x, y, z) format.\n\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n\n Args:\n quaternion: tensor with quaternions.\n order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n tensor with angle axis of rotation.\n\n Shape:\n - Input: :math:`(*, 4)` where `*` means, any number of dimensions\n - Output: :math:`(*, 3)`\n\n Example:\n >>> quaternion = torch.rand(2, 4) # Nx4\n >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3\n \"\"\"\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(f\"Input must be a tensor of shape Nx4 or 4. Got {quaternion.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n # unpack input and compute conversion\n q1: torch.Tensor = torch.tensor([])\n q2: torch.Tensor = torch.tensor([])\n q3: torch.Tensor = torch.tensor([])\n cos_theta: torch.Tensor = torch.tensor([])\n\n if order == QuaternionCoeffOrder.XYZW:\n q1 = quaternion[..., 0]\n q2 = quaternion[..., 1]\n q3 = quaternion[..., 2]\n cos_theta = quaternion[..., 3]\n else:\n cos_theta = quaternion[..., 0]\n q1 = quaternion[..., 1]\n q2 = quaternion[..., 2]\n q3 = quaternion[..., 3]\n\n sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3\n\n sin_theta: torch.Tensor = torch.sqrt((sin_squared_theta).clamp_min(eps))\n two_theta: torch.Tensor = 2.0 * torch.where(\n cos_theta < 0.0, torch_safe_atan2(-sin_theta, -cos_theta), torch_safe_atan2(sin_theta, cos_theta)\n )\n\n k_pos: torch.Tensor = safe_zero_division(two_theta, sin_theta, eps)\n k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)\n k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)\n\n angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]\n angle_axis[..., 0] += q1 * k\n angle_axis[..., 1] += q2 * k\n angle_axis[..., 2] += q3 * k\n return angle_axis"
},
{
"identifier": "angle_axis_to_quaternion",
"path": "vid2player/utils/konia_transform.py",
"snippet": "@torch.jit.script\ndef angle_axis_to_quaternion(\n angle_axis: torch.Tensor, eps: float = 1.0e-6, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ\n) -> torch.Tensor:\n r\"\"\"Convert an angle axis to a quaternion.\n\n The quaternion vector has components in (x, y, z, w) or (w, x, y, z) format.\n\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n\n Args:\n angle_axis: tensor with angle axis.\n order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n tensor with quaternion.\n\n Shape:\n - Input: :math:`(*, 3)` where `*` means, any number of dimensions\n - Output: :math:`(*, 4)`\n\n Example:\n >>> angle_axis = torch.rand(2, 3) # Nx3\n >>> quaternion = angle_axis_to_quaternion(angle_axis, order=QuaternionCoeffOrder.WXYZ) # Nx4\n \"\"\"\n\n if not angle_axis.shape[-1] == 3:\n raise ValueError(f\"Input must be a tensor of shape Nx3 or 3. Got {angle_axis.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n # unpack input and compute conversion\n a0: torch.Tensor = angle_axis[..., 0:1]\n a1: torch.Tensor = angle_axis[..., 1:2]\n a2: torch.Tensor = angle_axis[..., 2:3]\n theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2\n\n theta: torch.Tensor = torch.sqrt((theta_squared).clamp_min(eps))\n half_theta: torch.Tensor = theta * 0.5\n\n mask: torch.Tensor = theta_squared > 0.0\n ones: torch.Tensor = torch.ones_like(half_theta)\n\n k_neg: torch.Tensor = 0.5 * ones\n k_pos: torch.Tensor = safe_zero_division(torch.sin(half_theta), theta, eps)\n k: torch.Tensor = torch.where(mask, k_pos, k_neg)\n w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)\n\n quaternion: torch.Tensor = torch.zeros(\n size=angle_axis.shape[:-1] + (4,), dtype=angle_axis.dtype, device=angle_axis.device\n )\n if order == QuaternionCoeffOrder.XYZW:\n quaternion[..., 0:1] = a0 * k\n quaternion[..., 1:2] = a1 * k\n quaternion[..., 2:3] = a2 * k\n quaternion[..., 3:4] = w\n else:\n quaternion[..., 1:2] = a0 * k\n quaternion[..., 2:3] = a1 * k\n quaternion[..., 3:4] = a2 * k\n quaternion[..., 0:1] = w\n return quaternion"
},
{
"identifier": "quaternion_to_rotation_matrix",
"path": "vid2player/utils/konia_transform.py",
"snippet": "@torch.jit.script\ndef quaternion_to_rotation_matrix(\n quaternion: torch.Tensor, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ\n) -> torch.Tensor:\n r\"\"\"Converts a quaternion to a rotation matrix.\n\n The quaternion should be in (x, y, z, w) or (w, x, y, z) format.\n\n Args:\n quaternion: a tensor containing a quaternion to be converted.\n The tensor can be of shape :math:`(*, 4)`.\n order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n the rotation matrix of shape :math:`(*, 3, 3)`.\n\n Example:\n >>> quaternion = torch.tensor((0., 0., 0., 1.))\n >>> quaternion_to_rotation_matrix(quaternion, order=QuaternionCoeffOrder.WXYZ)\n tensor([[-1., 0., 0.],\n [ 0., -1., 0.],\n [ 0., 0., 1.]])\n \"\"\"\n if not isinstance(quaternion, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(quaternion)}\")\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(f\"Input must be a tensor of shape (*, 4). Got {quaternion.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n # normalize the input quaternion\n quaternion_norm: torch.Tensor = normalize_quaternion(quaternion)\n\n # unpack the normalized quaternion components\n if order == QuaternionCoeffOrder.XYZW:\n x, y, z, w = quaternion_norm[..., 0], quaternion_norm[..., 1], quaternion_norm[..., 2], quaternion_norm[..., 3]\n else:\n w, x, y, z = quaternion_norm[..., 0], quaternion_norm[..., 1], quaternion_norm[..., 2], quaternion_norm[..., 3]\n\n # compute the actual conversion\n tx: torch.Tensor = 2.0 * x\n ty: torch.Tensor = 2.0 * y\n tz: torch.Tensor = 2.0 * z\n twx: torch.Tensor = tx * w\n twy: torch.Tensor = ty * w\n twz: torch.Tensor = tz * w\n txx: torch.Tensor = tx * x\n txy: torch.Tensor = ty * x\n txz: torch.Tensor = tz * x\n tyy: torch.Tensor = ty * y\n tyz: torch.Tensor = tz * y\n tzz: torch.Tensor = tz * z\n one: torch.Tensor = torch.tensor(1.0)\n\n matrix: torch.Tensor = torch.stack(\n (\n one - (tyy + tzz),\n txy - twz,\n txz + twy,\n txy + twz,\n one - (txx + tzz),\n tyz - twx,\n txz - twy,\n tyz + twx,\n one - (txx + tyy),\n ),\n dim=-1,\n ).view(quaternion.shape[:-1] + (3, 3))\n\n # if len(quaternion.shape) == 1:\n # matrix = torch.squeeze(matrix, dim=0)\n return matrix"
},
{
"identifier": "rotation_matrix_to_quaternion",
"path": "vid2player/utils/konia_transform.py",
"snippet": "@torch.jit.script\ndef rotation_matrix_to_quaternion(\n rotation_matrix: torch.Tensor, eps: float = 1.0e-6, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ\n) -> torch.Tensor:\n r\"\"\"Convert 3x3 rotation matrix to 4d quaternion vector.\n\n The quaternion vector has components in (w, x, y, z) or (x, y, z, w) format.\n\n .. note::\n The (x, y, z, w) order is going to be deprecated in favor of efficiency.\n\n Args:\n rotation_matrix: the rotation matrix to convert.\n eps: small value to avoid zero division.\n order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n the rotation in quaternion.\n\n Shape:\n - Input: :math:`(*, 3, 3)`\n - Output: :math:`(*, 4)`\n\n Example:\n >>> input = torch.rand(4, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_quaternion(input, eps=torch.finfo(input.dtype).eps,\n ... order=QuaternionCoeffOrder.WXYZ) # Nx4\n \"\"\"\n if not isinstance(rotation_matrix, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(rotation_matrix)}\")\n\n if not rotation_matrix.shape[-2:] == (3, 3):\n raise ValueError(f\"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n m00, m01, m02 = rotation_matrix[..., 0, 0], rotation_matrix[..., 0, 1], rotation_matrix[..., 0, 2]\n m10, m11, m12 = rotation_matrix[..., 1, 0], rotation_matrix[..., 1, 1], rotation_matrix[..., 1, 2]\n m20, m21, m22 = rotation_matrix[..., 2, 0], rotation_matrix[..., 2, 1], rotation_matrix[..., 2, 2]\n\n trace: torch.Tensor = m00 + m11 + m22\n\n sq = torch.sqrt((trace + 1.0).clamp_min(eps)) * 2.0 # sq = 4 * qw.\n qw = 0.25 * sq\n qx = safe_zero_division(m21 - m12, sq)\n qy = safe_zero_division(m02 - m20, sq)\n qz = safe_zero_division(m10 - m01, sq)\n if order == QuaternionCoeffOrder.XYZW:\n trace_positive_cond = torch.stack((qx, qy, qz, qw), dim=-1)\n trace_positive_cond = torch.stack((qw, qx, qy, qz), dim=-1)\n\n sq = torch.sqrt((1.0 + m00 - m11 - m22).clamp_min(eps)) * 2.0 # sq = 4 * qx.\n qw = safe_zero_division(m21 - m12, sq)\n qx = 0.25 * sq\n qy = safe_zero_division(m01 + m10, sq)\n qz = safe_zero_division(m02 + m20, sq)\n if order == QuaternionCoeffOrder.XYZW:\n cond_1 = torch.stack((qx, qy, qz, qw), dim=-1)\n cond_1 = torch.stack((qw, qx, qy, qz), dim=-1)\n\n sq = torch.sqrt((1.0 + m11 - m00 - m22).clamp_min(eps)) * 2.0 # sq = 4 * qy.\n qw = safe_zero_division(m02 - m20, sq)\n qx = safe_zero_division(m01 + m10, sq)\n qy = 0.25 * sq\n qz = safe_zero_division(m12 + m21, sq)\n if order == QuaternionCoeffOrder.XYZW:\n cond_2 = torch.stack((qx, qy, qz, qw), dim=-1)\n cond_2 = torch.stack((qw, qx, qy, qz), dim=-1)\n\n sq = torch.sqrt((1.0 + m22 - m00 - m11).clamp_min(eps)) * 2.0 # sq = 4 * qz.\n qw = safe_zero_division(m10 - m01, sq)\n qx = safe_zero_division(m02 + m20, sq)\n qy = safe_zero_division(m12 + m21, sq)\n qz = 0.25 * sq\n if order == QuaternionCoeffOrder.XYZW:\n cond_3 = torch.stack((qx, qy, qz, qw), dim=-1)\n cond_3 = torch.stack((qw, qx, qy, qz), dim=-1)\n\n where_2 = torch.where((m11 > m22).unsqueeze(-1), cond_2, cond_3)\n where_1 = torch.where(((m00 > m11) & (m00 > m22)).unsqueeze(-1), cond_1, where_2)\n\n quaternion: torch.Tensor = torch.where((trace > 0.0).unsqueeze(-1), trace_positive_cond, where_1)\n return quaternion"
},
{
"identifier": "rotation_matrix_to_angle_axis",
"path": "vid2player/utils/konia_transform.py",
"snippet": "@torch.jit.script\ndef rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor) -> torch.Tensor:\n r\"\"\"Convert 3x3 rotation matrix to Rodrigues vector.\n\n Args:\n rotation_matrix: rotation matrix.\n\n Returns:\n Rodrigues vector transformation.\n\n Shape:\n - Input: :math:`(N, 3, 3)`\n - Output: :math:`(N, 3)`\n\n Example:\n >>> input = torch.rand(2, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_angle_axis(input) # Nx3\n \"\"\"\n if not isinstance(rotation_matrix, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(rotation_matrix)}\")\n\n if not rotation_matrix.shape[-2:] == (3, 3):\n raise ValueError(f\"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}\")\n quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix, order=QuaternionCoeffOrder.WXYZ)\n return quaternion_to_angle_axis(quaternion, order=QuaternionCoeffOrder.WXYZ)"
},
{
"identifier": "angle_axis_to_rotation_matrix",
"path": "vid2player/utils/konia_transform.py",
"snippet": "@torch.jit.script\ndef angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:\n r\"\"\"Convert 3d vector of axis-angle rotation to 3x3 rotation matrix.\n\n Args:\n angle_axis: tensor of 3d vector of axis-angle rotations.\n\n Returns:\n tensor of 3x3 rotation matrices.\n\n Shape:\n - Input: :math:`(N, 3)`\n - Output: :math:`(N, 3, 3)`\n\n Example:\n >>> input = torch.rand(1, 3) # Nx3\n >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3\n \"\"\"\n if not isinstance(angle_axis, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(angle_axis)))\n\n if not angle_axis.shape[-1] == 3:\n raise ValueError(\"Input size must be a (*, 3) tensor. Got {}\".format(angle_axis.shape))\n\n orig_shape = angle_axis.shape\n angle_axis = angle_axis.reshape(-1, 3)\n\n # stolen from ceres/rotation.h\n\n _angle_axis = torch.unsqueeze(angle_axis, dim=1)\n theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))\n theta2 = torch.squeeze(theta2, dim=1)\n\n # compute rotation matrices\n rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)\n rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)\n\n # create mask to handle both cases\n eps = 1e-6\n mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)\n mask_pos = (mask).type_as(theta2)\n mask_neg = (mask == torch.tensor(False)).type_as(theta2) # noqa\n\n # create output pose matrix\n batch_size = angle_axis.shape[0]\n rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis)\n rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1)\n # fill output matrix with masked values\n rotation_matrix[..., :3, :3] = mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor\n\n rotation_matrix = rotation_matrix.view(orig_shape[:-1] + (3, 3))\n return rotation_matrix # Nx3x3"
}
] | import numpy as np
import torch
from .konia_transform import quaternion_to_angle_axis, angle_axis_to_quaternion, quaternion_to_rotation_matrix, rotation_matrix_to_quaternion, rotation_matrix_to_angle_axis, angle_axis_to_rotation_matrix | 7,586 |
def quat_between_two_vec(v1, v2, eps: float = 1e-6):
"""
quaternion for rotating v1 to v2
"""
orig_shape = v1.shape
v1 = v1.reshape(-1, 3)
v2 = v2.reshape(-1, 3)
dot = (v1 * v2).sum(-1)
cross = torch.cross(v1, v2, dim=-1)
out = torch.cat([(1 + dot).unsqueeze(-1), cross], dim=-1)
# handle v1 & v2 with same direction
sind = dot > 1 - eps
out[sind] = torch.tensor([1., 0., 0., 0.], device=v1.device)
# handle v1 & v2 with opposite direction
nind = dot < -1 + eps
if torch.any(nind):
vx = torch.tensor([1., 0., 0.], device=v1.device)
vxdot = (v1 * vx).sum(-1).abs()
nxind = nind & (vxdot < 1 - eps)
if torch.any(nxind):
out[nxind] = angle_axis_to_quaternion(normalize(torch.cross(vx.expand_as(v1[nxind]), v1[nxind], dim=-1)) * np.pi)
# handle v1 & v2 with opposite direction and they are parallel to x axis
pind = nind & (vxdot >= 1 - eps)
if torch.any(pind):
vy = torch.tensor([0., 1., 0.], device=v1.device)
out[pind] = angle_axis_to_quaternion(normalize(torch.cross(vy.expand_as(v1[pind]), v1[pind], dim=-1)) * np.pi)
# normalize and reshape
out = normalize(out).view(orig_shape[:-1] + (4,))
return out
@torch.jit.script
def get_yaw(q, eps: float = 1e-6):
yaw_atany = 2 * (q[..., 0] * q[..., 3] + q[..., 1] * q[..., 2])
yaw_atanx = 1 - 2 * (q[..., 2] * q[..., 2] + q[..., 3] * q[..., 3])
yaw = torch_safe_atan2(yaw_atany, yaw_atanx, eps)
return yaw
@torch.jit.script
def get_yaw_q(q):
yaw = get_yaw(q)
angle_axis = torch.cat([torch.zeros(yaw.shape + (2,), device=q.device), yaw.unsqueeze(-1)], dim=-1)
heading_q = angle_axis_to_quaternion(angle_axis)
return heading_q
@torch.jit.script
def get_heading(q, eps: float = 1e-6):
heading_atany = q[..., 3]
heading_atanx = q[..., 0]
heading = 2 * torch_safe_atan2(heading_atany, heading_atanx, eps)
return heading
def get_heading_q(q):
q_new = q.clone()
q_new[..., 1] = 0
q_new[..., 2] = 0
q_new = normalize(q_new)
return q_new
@torch.jit.script
def heading_to_vec(h_theta):
v = torch.stack([torch.cos(h_theta), torch.sin(h_theta)], dim=-1)
return v
@torch.jit.script
def vec_to_heading(h_vec):
h_theta = torch_safe_atan2(h_vec[..., 1], h_vec[..., 0])
return h_theta
@torch.jit.script
def heading_to_quat(h_theta):
angle_axis = torch.cat([torch.zeros(h_theta.shape + (2,), device=h_theta.device), h_theta.unsqueeze(-1)], dim=-1)
heading_q = angle_axis_to_quaternion(angle_axis)
return heading_q
def deheading_quat(q, heading_q=None):
if heading_q is None:
heading_q = get_heading_q(q)
dq = quat_mul(quat_conjugate(heading_q), q)
return dq
@torch.jit.script
def rotmat_to_rot6d(mat):
rot6d = torch.cat([mat[..., 0], mat[..., 1]], dim=-1)
return rot6d
@torch.jit.script
def rot6d_to_rotmat(rot6d, eps: float = 1e-8):
a1 = rot6d[..., :3].clone()
a2 = rot6d[..., 3:].clone()
ind = torch.norm(a1, dim=-1) < eps
a1[ind] = torch.tensor([1.0, 0.0, 0.0], device=a1.device)
b1 = normalize(a1)
b2 = normalize(a2 - (b1 * a2).sum(dim=-1).unsqueeze(-1) * b1)
ind = torch.norm(b2, dim=-1) < eps
b2[ind] = torch.tensor([0.0, 1.0, 0.0], device=b2.device)
b3 = torch.cross(b1, b2, dim=-1)
mat = torch.stack([b1, b2, b3], dim=-1)
return mat
@torch.jit.script
def angle_axis_to_rot6d(aa):
return rotmat_to_rot6d(angle_axis_to_rotation_matrix(aa))
@torch.jit.script
def rot6d_to_angle_axis(rot6d):
|
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
return torch.stack([w, x, y, z], dim=-1).view(shape)
@torch.jit.script
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return torch.cat((a[:, 0:1], -a[:, 1:]), dim=-1).view(shape)
@torch.jit.script
def quat_apply(a, b):
shape = b.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 3)
xyz = a[:, 1:].clone()
t = xyz.cross(b, dim=-1) * 2
return (b + a[:, 0:1].clone() * t + xyz.cross(t, dim=-1)).view(shape)
@torch.jit.script
def quat_angle(a, eps: float = 1e-6):
shape = a.shape
a = a.reshape(-1, 4)
s = 2 * (a[:, 0] ** 2) - 1
s = s.clamp(-1 + eps, 1 - eps)
s = s.acos()
return s.view(shape[:-1])
@torch.jit.script
def quat_angle_diff(quat1, quat2):
return quat_angle(quat_mul(quat1, quat_conjugate(quat2)))
@torch.jit.script
def torch_safe_atan2(y, x, eps: float = 1e-8):
y = y.clone()
y[(y.abs() < eps) & (x.abs() < eps)] += eps
return torch.atan2(y, x)
@torch.jit.script
def ypr_euler_from_quat(q, handle_singularity: bool = False, eps: float = 1e-6, singular_eps: float = 1e-6):
"""
convert quaternion to yaw-pitch-roll euler angles
"""
yaw_atany = 2 * (q[..., 0] * q[..., 3] + q[..., 1] * q[..., 2])
yaw_atanx = 1 - 2 * (q[..., 2] * q[..., 2] + q[..., 3] * q[..., 3])
roll_atany = 2 * (q[..., 0] * q[..., 1] + q[..., 2] * q[..., 3])
roll_atanx = 1 - 2 * (q[..., 1] * q[..., 1] + q[..., 2] * q[..., 2])
yaw = torch_safe_atan2(yaw_atany, yaw_atanx, eps)
pitch = torch.asin(torch.clamp(2 * (q[..., 0] * q[..., 2] - q[..., 1] * q[..., 3]), min=-1 + eps, max=1 - eps))
roll = torch_safe_atan2(roll_atany, roll_atanx, eps)
if handle_singularity:
""" handle two special cases """
test = q[..., 0] * q[..., 2] - q[..., 1] * q[..., 3]
# north pole, pitch ~= 90 degrees
np_ind = test > 0.5 - singular_eps
if torch.any(np_ind):
# print('ypr_euler_from_quat singularity -- north pole!')
roll[np_ind] = 0.0
pitch[np_ind].clamp_max_(0.5 * np.pi)
yaw_atany = q[..., 3][np_ind]
yaw_atanx = q[..., 0][np_ind]
yaw[np_ind] = 2 * torch_safe_atan2(yaw_atany, yaw_atanx, eps)
# south pole, pitch ~= -90 degrees
sp_ind = test < -0.5 + singular_eps
if torch.any(sp_ind):
# print('ypr_euler_from_quat singularity -- south pole!')
roll[sp_ind] = 0.0
pitch[sp_ind].clamp_min_(-0.5 * np.pi)
yaw_atany = q[..., 3][sp_ind]
yaw_atanx = q[..., 0][sp_ind]
yaw[sp_ind] = 2 * torch_safe_atan2(yaw_atany, yaw_atanx, eps)
return torch.stack([roll, pitch, yaw], dim=-1)
@torch.jit.script
def quat_from_ypr_euler(angles):
"""
convert yaw-pitch-roll euler angles to quaternion
"""
half_ang = angles * 0.5
sin = torch.sin(half_ang)
cos = torch.cos(half_ang)
q = torch.stack([
cos[..., 0] * cos[..., 1] * cos[..., 2] + sin[..., 0] * sin[..., 1] * sin[..., 2],
sin[..., 0] * cos[..., 1] * cos[..., 2] - cos[..., 0] * sin[..., 1] * sin[..., 2],
cos[..., 0] * sin[..., 1] * cos[..., 2] + sin[..., 0] * cos[..., 1] * sin[..., 2],
cos[..., 0] * cos[..., 1] * sin[..., 2] - sin[..., 0] * sin[..., 1] * cos[..., 2]
], dim=-1)
return q
def quat_between_two_vec(v1, v2, eps: float = 1e-6):
"""
quaternion for rotating v1 to v2
"""
orig_shape = v1.shape
v1 = v1.reshape(-1, 3)
v2 = v2.reshape(-1, 3)
dot = (v1 * v2).sum(-1)
cross = torch.cross(v1, v2, dim=-1)
out = torch.cat([(1 + dot).unsqueeze(-1), cross], dim=-1)
# handle v1 & v2 with same direction
sind = dot > 1 - eps
out[sind] = torch.tensor([1., 0., 0., 0.], device=v1.device)
# handle v1 & v2 with opposite direction
nind = dot < -1 + eps
if torch.any(nind):
vx = torch.tensor([1., 0., 0.], device=v1.device)
vxdot = (v1 * vx).sum(-1).abs()
nxind = nind & (vxdot < 1 - eps)
if torch.any(nxind):
out[nxind] = angle_axis_to_quaternion(normalize(torch.cross(vx.expand_as(v1[nxind]), v1[nxind], dim=-1)) * np.pi)
# handle v1 & v2 with opposite direction and they are parallel to x axis
pind = nind & (vxdot >= 1 - eps)
if torch.any(pind):
vy = torch.tensor([0., 1., 0.], device=v1.device)
out[pind] = angle_axis_to_quaternion(normalize(torch.cross(vy.expand_as(v1[pind]), v1[pind], dim=-1)) * np.pi)
# normalize and reshape
out = normalize(out).view(orig_shape[:-1] + (4,))
return out
@torch.jit.script
def get_yaw(q, eps: float = 1e-6):
yaw_atany = 2 * (q[..., 0] * q[..., 3] + q[..., 1] * q[..., 2])
yaw_atanx = 1 - 2 * (q[..., 2] * q[..., 2] + q[..., 3] * q[..., 3])
yaw = torch_safe_atan2(yaw_atany, yaw_atanx, eps)
return yaw
@torch.jit.script
def get_yaw_q(q):
yaw = get_yaw(q)
angle_axis = torch.cat([torch.zeros(yaw.shape + (2,), device=q.device), yaw.unsqueeze(-1)], dim=-1)
heading_q = angle_axis_to_quaternion(angle_axis)
return heading_q
@torch.jit.script
def get_heading(q, eps: float = 1e-6):
heading_atany = q[..., 3]
heading_atanx = q[..., 0]
heading = 2 * torch_safe_atan2(heading_atany, heading_atanx, eps)
return heading
def get_heading_q(q):
q_new = q.clone()
q_new[..., 1] = 0
q_new[..., 2] = 0
q_new = normalize(q_new)
return q_new
@torch.jit.script
def heading_to_vec(h_theta):
v = torch.stack([torch.cos(h_theta), torch.sin(h_theta)], dim=-1)
return v
@torch.jit.script
def vec_to_heading(h_vec):
h_theta = torch_safe_atan2(h_vec[..., 1], h_vec[..., 0])
return h_theta
@torch.jit.script
def heading_to_quat(h_theta):
angle_axis = torch.cat([torch.zeros(h_theta.shape + (2,), device=h_theta.device), h_theta.unsqueeze(-1)], dim=-1)
heading_q = angle_axis_to_quaternion(angle_axis)
return heading_q
def deheading_quat(q, heading_q=None):
if heading_q is None:
heading_q = get_heading_q(q)
dq = quat_mul(quat_conjugate(heading_q), q)
return dq
@torch.jit.script
def rotmat_to_rot6d(mat):
rot6d = torch.cat([mat[..., 0], mat[..., 1]], dim=-1)
return rot6d
@torch.jit.script
def rot6d_to_rotmat(rot6d, eps: float = 1e-8):
a1 = rot6d[..., :3].clone()
a2 = rot6d[..., 3:].clone()
ind = torch.norm(a1, dim=-1) < eps
a1[ind] = torch.tensor([1.0, 0.0, 0.0], device=a1.device)
b1 = normalize(a1)
b2 = normalize(a2 - (b1 * a2).sum(dim=-1).unsqueeze(-1) * b1)
ind = torch.norm(b2, dim=-1) < eps
b2[ind] = torch.tensor([0.0, 1.0, 0.0], device=b2.device)
b3 = torch.cross(b1, b2, dim=-1)
mat = torch.stack([b1, b2, b3], dim=-1)
return mat
@torch.jit.script
def angle_axis_to_rot6d(aa):
return rotmat_to_rot6d(angle_axis_to_rotation_matrix(aa))
@torch.jit.script
def rot6d_to_angle_axis(rot6d): | return rotation_matrix_to_angle_axis(rot6d_to_rotmat(rot6d)) | 4 | 2023-10-30 20:43:43+00:00 | 12k |
YARAHQ/yara-forge | yara-forge.py | [
{
"identifier": "retrieve_yara_rule_sets",
"path": "main/rule_collector.py",
"snippet": "def retrieve_yara_rule_sets(repo_staging_dir, yara_repos):\n \"\"\"\n Retrieves YARA rules from online repositories.\n \"\"\"\n\n # The list of YARA rule sets of all repositories\n yara_rule_repo_sets = []\n\n # Check if the directory exists\n if os.path.exists(repo_staging_dir):\n # Remove the existing repo directory and all its contents\n shutil.rmtree(os.path.join(repo_staging_dir), ignore_errors=False)\n\n # Loop over the repositories\n for repo in yara_repos:\n\n # Output the repository information to the console in a single line\n logging.info(\"Retrieving YARA rules from repository: %s\", repo['name'])\n\n # Extract the owner and the repository name from the URL\n repo_url_parts = repo['url'].split(\"/\")\n repo['owner'] = repo_url_parts[3]\n repo['repo'] = repo_url_parts[4].split(\".\")[0]\n\n # If the repository hasn't not been cloned yet, clone it\n if not os.path.exists(os.path.join(repo_staging_dir, repo['owner'], repo['repo'])):\n # Clone the repository\n repo_folder = os.path.join(repo_staging_dir, repo['owner'], repo['repo'])\n repo['commit_hash'] = Repo.clone_from(repo['url'], repo_folder, branch=repo['branch']).head.commit.hexsha\n else:\n # Get the latest commit hash\n repo_folder = os.path.join(repo_staging_dir, repo['owner'], repo['repo'])\n repo['commit_hash'] = Repo(repo_folder).head.commit.hexsha\n\n # Walk through the extracted folders and find a LICENSE file\n # and save it into the repository object\n repo['license'] = \"NO LICENSE SET\"\n repo['license_url'] = \"N/A\"\n for root, dir, files in os.walk(repo_folder):\n for file in files:\n if file == \"LICENSE\" or file == \"LICENSE.txt\" or file == \"LICENSE.md\":\n file_path = os.path.join(root, file)\n url_path = os.path.relpath(file_path, start=repo_folder)\n if root == repo_folder: # Check if the file is in the root directory\n repo['license_url'] = f'{repo[\"url\"]}/blob/{repo[\"commit_hash\"]}/{url_path}'\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n repo['license'] = f.read()\n break # if we found the license in the root directory, we don't need to look further\n elif 'license_url' not in repo: # If the file is not in the root directory and no license has been found yet\n repo['license_url'] = f'{repo[\"url\"]}/blob/{repo[\"commit_hash\"]}/{url_path}'\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n repo['license'] = f.read()\n\n # Walk through the extracted folders and find all YARA files\n yara_rule_sets = []\n # Walk a sub folder if one is set in the config\n walk_folder = repo_folder\n if 'path' in repo:\n walk_folder = os.path.join(repo_folder, repo['path'])\n # Walk the folder and find all YARA files\n for root, _, files in os.walk(walk_folder):\n for file in files:\n if file.endswith(\".yar\") or file.endswith(\".yara\"):\n file_path = os.path.join(root, file)\n\n # Debug output\n logging.debug(\"Found YARA rule file: %s\", file_path)\n\n # Read the YARA file\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n yara_file_content = f.read()\n # Parse the rules in the file\n try:\n # Get the rule file path in the repository\n relative_path = os.path.relpath(file_path, start=repo_folder)\n # Parse the YARA rules in the file\n yara_parser = plyara.Plyara()\n yara_rules = yara_parser.parse_string(yara_file_content)\n # Create a YARA rule set object\n yara_rule_set = {\n \"rules\": yara_rules,\n \"file_path\": relative_path,\n }\n # Debug output\n logging.debug(\"Found %d YARA rules in file: %s\",\n len(yara_rules), file_path)\n # Append to list of YARA rule sets\n yara_rule_sets.append(yara_rule_set)\n #pprint(yara_rule_set)\n\n except Exception as e:\n print(e)\n logging.error(\"Skipping YARA rule in the following \" \\\n \"file because of a syntax error: %s \", file_path)\n\n # Append the YARA rule repository\n yara_rule_repo = {\n \"name\": repo['name'],\n \"url\": repo['url'],\n \"author\": repo['author'],\n \"owner\": repo['owner'],\n \"repo\": repo['repo'],\n \"branch\": repo['branch'],\n \"rules_sets\": yara_rule_sets,\n \"quality\": repo['quality'],\n \"license\": repo['license'],\n \"license_url\": repo['license_url'],\n \"commit_hash\": repo['commit_hash'],\n \"retrieval_date\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"repo_path\": repo_folder,\n }\n yara_rule_repo_sets.append(yara_rule_repo)\n\n # Output the number of YARA rules retrieved from the repository\n logging.info(\"Retrieved %d YARA rules from repository: %s\",\n len(yara_rule_sets), repo['name'])\n\n # Return the YARA rule sets\n return yara_rule_repo_sets"
},
{
"identifier": "process_yara_rules",
"path": "main/rule_processors.py",
"snippet": "def process_yara_rules(yara_rule_repo_sets, YARA_FORGE_CONFIG):\n \"\"\"\n Processes the YARA rules\n \"\"\"\n\n # Logic hash list to avoid duplicates\n logic_hash_list = {}\n\n # Loop over the repositories\n for repo in yara_rule_repo_sets:\n\n # Rule set identifier\n rule_set_id = repo['name'].replace(\" \", \"_\").replace(\"-\", \"_\").upper()\n\n # Debug output\n logging.info(\"Processing YARA rules from repository: %s\", repo['name'])\n\n # Loop over the rule sets in the repository and modify the rules\n num_rules = 0\n for rules in repo['rules_sets']:\n # Debug output\n logging.debug(\"Processing YARA rules from rule set: %s\", rules['file_path'])\n # Rules that we want to keep\n kept_rules = []\n # Loop over each of the rules and modify them\n for rule in rules['rules']:\n # Debug output\n logging.debug(\"Processing YARA rule: %s\", rule['rule_name'])\n\n # Rule Meta Data Modifications ----------------------------------------------\n\n # Check if the rule is a private rule\n is_private_rule = False\n if 'scopes' in rule:\n if 'private' in rule['scopes']:\n is_private_rule = True\n\n # Add metadata to rules that don't have any\n if 'metadata' not in rule:\n rule['metadata'] = []\n\n # Calculate the logic hash\n logic_hash = generate_hash(rule)\n\n # Duplicate Name Check\n # If the rule name already exists in the list, append a number to it\n if rule['rule_name'] in logic_hash_list.values():\n # Get the number of times the rule name already exists in the list\n num_rule_name = list(logic_hash_list.values()).count(rule['rule_name'])\n # Append the number to the rule name\n rule['rule_name'] = f\"{rule['rule_name']}_{num_rule_name}\"\n\n # Duplicate Content Check\n # Check if the rule is a duplicate (based on the logic hash)\n if logic_hash in logic_hash_list and not is_private_rule:\n logging.info(\"Skipping rule '%s > %s' because it has the same logic hash as '%s'\", \n repo['name'], rule['rule_name'], logic_hash_list[logic_hash])\n continue\n # Register the logic hash\n logic_hash_list[logic_hash] = rule['rule_name']\n modify_meta_data_value(rule['metadata'], 'logic_hash', logic_hash)\n\n # Calculate a UUID for the rule hash\n rule_uuid = generate_uuid_from_hash(logic_hash)\n align_yara_rule_uuid(rule['metadata'], rule_uuid)\n\n # Modifying existing meta data values ---------------------------------------\n\n # Modify the rule references\n rule['metadata'] = align_yara_rule_reference(rule['metadata'], repo['url'])\n\n # Modify the rule date\n rule['metadata'] = align_yara_rule_date(rule['metadata'],\n repo['repo_path'],\n rules['file_path'])\n\n # Modify the rule hashes\n rule['metadata'] = align_yara_rule_hashes(rule['metadata'])\n\n # # Modify the rule description\n rule['metadata'] = align_yara_rule_description(rule['metadata'], repo['name'])\n\n # Modify the rule author\n rule['metadata'] = align_yara_rule_author(rule['metadata'], repo['author'])\n\n # Add tags based on meta data values and condition elements\n rule = add_tags_to_rule(rule)\n\n # Add a score based on the rule quality and meta data keywords\n rule_score = evaluate_yara_rule_score(rule, YARA_FORGE_CONFIG)\n modify_meta_data_value(rule['metadata'], 'score', rule_score)\n\n # Increase the quality score based on certain rule characteristics\n quality_increase = evaluate_quality_increase(rule)\n rule['metadata'] = modify_yara_rule_quality(rule['metadata'], quality_increase )\n\n # Get a custom importance score if available\n custom_importance_score = retrieve_custom_importance_score(repo['name'], rules['file_path'], rule['rule_name'])\n if custom_importance_score:\n modify_meta_data_value(rule['metadata'], 'importance', custom_importance_score)\n logging.debug(\"Custom importance score for rule %s is %d\", rule['rule_name'], custom_importance_score)\n\n # Adding additional meta data values ----------------------------------------\n # Add a quality value based on the original repo\n # a quality reduction is evaluated later in the process - this is just the base value\n # for that calculation\n modify_meta_data_value(rule['metadata'], 'quality', repo['quality'])\n\n # Modify the rule name\n rule_name_old = rule['rule_name']\n rule_name_new = align_yara_rule_name(rule['rule_name'], rule_set_id)\n # If the rule is private, add the _PRIVATE suffix and\n if is_private_rule:\n rule_name_new = f\"{rule_name_new}_PRIVATE\"\n # Add the rule to the private rule mapping\n private_rule_mapping.append({\n \"repo\": rule_set_id,\n \"old_name\": rule_name_old,\n \"new_name\": rule_name_new,\n \"rule\": rule\n })\n # Set the new rule name\n rule['rule_name'] = rule_name_new\n\n # Check if the rule uses private rules\n private_rules_used = check_rule_uses_private_rules(rule_set_id, rule, private_rule_mapping)\n if private_rules_used:\n # Change the condition terms of the rule to align them with\n # the new private rule names\n rule['condition_terms'] = adjust_identifier_names(\n rule_set_id,\n rule['condition_terms'],\n private_rules_used)\n # Add the private rules used to the rule\n rule['private_rules_used'] = private_rules_used\n logging.debug(\"Private rules used: %s\", private_rules_used)\n\n # Add a rule source URL to the original file\n modify_meta_data_value(\n rule['metadata'], 'source_url',\n (\n f'{repo[\"url\"]}/blob/{repo[\"commit_hash\"]}/{rules[\"file_path\"]}'\n f'#L{rule[\"start_line\"]}-L{rule[\"stop_line\"]}'\n )\n )\n\n # Add license URL\n modify_meta_data_value(rule['metadata'], 'license_url', repo['license_url'])\n\n # Sort the meta data values\n rule['metadata'] = sort_meta_data_values(rule['metadata'], YARA_FORGE_CONFIG)\n\n # We keep the rule\n kept_rules.append(rule)\n\n # Count the number of rules\n num_rules += len(kept_rules)\n # Now we replace the rules\n rules['rules'] = kept_rules\n\n # Info output about the number of rules in the repository\n logging.info(\"Normalized %d rules from repository: %s\", num_rules, repo['name'])\n\n return yara_rule_repo_sets"
},
{
"identifier": "write_yara_packages",
"path": "main/rule_output.py",
"snippet": "def write_yara_packages(processed_yara_repos, program_version, yaraqa_commit, YARA_FORGE_CONFIG):\n \"\"\"\n Writes YARA rules into separate files.\n \"\"\"\n\n # List of files that were written\n package_files = []\n\n rule_package_statistics_sets = []\n\n # Loop over the rule packages\n for rule_package in YARA_FORGE_CONFIG['yara_rule_packages']:\n\n # Statistics for the rule package\n rule_package_statistics = {\n \"total_rules\": 0,\n \"total_rules_skipped_age\": 0,\n \"total_rules_skipped_quality\": 0,\n \"total_rules_skipped_importance\": 0,\n \"total_rules_skipped_score\": 0,\n \"repo_statistics\": [],\n \"name\": rule_package['name'],\n }\n\n # Create the directory for the rule package\n package_dir = os.path.join(\"packages\", rule_package['name'])\n if not os.path.exists(package_dir):\n os.makedirs(package_dir)\n # Create the rule file name\n rule_file_name = f\"yara-rules-{rule_package['name']}.yar\"\n # Create the rule file path\n rule_file_path = os.path.join(package_dir, rule_file_name)\n\n # Write information about the rule package, the output file name\n # and the output file path to the console\n logging.info(\"------------------------------------------------------------------------\")\n logging.info(\"Creating YARA rule package '%s': %s\", rule_package['name'], rule_file_path)\n logging.info(\"Description: %s\", rule_package['description'])\n logging.info(\"Minimum Quality: %d\", rule_package['minimum_quality'])\n logging.info(\"Minimum Age: %d\", rule_package['minimum_age'])\n logging.info(\"Output File: %s\", rule_file_path)\n\n # List of strings composed of the rules from each repository\n output_rule_set_strings = []\n\n # Loop over the repositories\n for repo in processed_yara_repos:\n # Debug output\n logging.info(\"Writing YARA rules from repository: %s\", repo['name'])\n\n # Repo rule set string\n repo_rules_strings = []\n already_added_priv_rules = []\n\n # Statistics for the rule package\n rule_repo_statistics = {\n \"total_rules\": 0,\n \"total_rules_skipped_age\": 0,\n \"total_rules_skipped_quality\": 0,\n \"total_rules_skipped_importance\": 0,\n \"total_rules_skipped_score\": 0,\n }\n\n # Loop over the rule sets in the repository and modify the rules\n for rule_sets in repo['rules_sets']:\n # Debug output\n logging.debug(\"Writing YARA rules from rule set: %s\", rule_sets['file_path'])\n # List of required private rules\n required_private_rules = []\n # Loop over the rules in the rule set\n for rule in rule_sets['rules']:\n\n # Debug output\n #pprint(rule)\n\n # Perform some check based on the meta data of the rule\n skip_rule = False\n skip_rule_reason = None\n # Some values that will help with the decision whether to skip the rule\n importance = None\n # Loop over the metadata\n for metadata in rule['metadata']:\n\n # Age check ------------------------------------------------------\n # Check if the rule has a minimum age\n if \"modified\" in metadata:\n rule_date = dateparser.parse(metadata['modified'])\n # Check if the rule is old enough\n if (datetime.datetime.now() - rule_date).days < rule_package['minimum_age']:\n skip_rule = True\n skip_rule_reason = \"age\"\n # Check if the rule is younger than the maximum age\n if \"date\" in metadata:\n rule_date = dateparser.parse(metadata['date'])\n # Check if the rule is old enough\n if (datetime.datetime.now() - rule_date).days > rule_package['max_age']:\n skip_rule = True\n skip_rule_reason = \"age\"\n\n # Score check ----------------------------------------------------\n if \"score\" in metadata:\n # Check if the rule has the require score\n if metadata['score'] < rule_package['minimum_score']:\n skip_rule = True\n skip_rule_reason = \"score\"\n\n # Quality check --------------------------------------------------\n if \"quality\" in metadata:\n # Check if the rule has the require quality\n if metadata['quality'] < rule_package['minimum_quality']:\n skip_rule = True\n skip_rule_reason = \"quality\"\n \n # Importance check -----------------------------------------------\n if \"importance\" in metadata:\n importance = metadata['importance']\n\n # If importance is set, check the importance level defined for the repo and overwrite\n # the skip_rule variable if the importance of the rule is higher than the importance\n # defined for the rule package\n if importance is not None:\n if importance >= rule_package['force_include_importance_level']:\n skip_rule = False\n skip_rule_reason = None\n logging.debug(\"Forcing rule '%s' because of importance\", rule['rule_name'])\n if importance < rule_package['force_exclude_importance_level']:\n skip_rule = True\n skip_rule_reason = \"importance\"\n\n # We skip private rules and add them only if other rules require them\n if 'scopes' in rule:\n if 'private' in rule['scopes']:\n skip_rule = True\n\n # Skip the rule if it doesn't match the minimum quality or age\n if skip_rule:\n logging.debug(\"Skipping rule '%s' because of %s\", rule['rule_name'], skip_rule_reason)\n if skip_rule_reason == \"age\":\n rule_repo_statistics['total_rules_skipped_age'] += 1\n elif skip_rule_reason == \"quality\":\n rule_repo_statistics['total_rules_skipped_quality'] += 1\n elif skip_rule_reason == \"importance\":\n rule_repo_statistics['total_rules_skipped_importance'] += 1\n elif skip_rule_reason == \"score\":\n rule_repo_statistics['total_rules_skipped_score'] += 1\n continue\n else:\n # Collect all private rules used in the accepted rules\n if 'private_rules_used' in rule:\n for priv_rule in rule['private_rules_used']:\n if priv_rule not in required_private_rules:\n required_private_rules.append(priv_rule)\n\n # Write the rule into the output file\n repo_rules_strings.append(rebuild_yara_rule(rule))\n rule_repo_statistics['total_rules'] += 1\n \n # Now we prepare the private rules\n # Loop over the required private rules\n for priv_rule in required_private_rules:\n # Get the rule from the plyara object\n priv_rule_string = rebuild_yara_rule(priv_rule[\"rule\"])\n # Append rule if it hasn't been added yet\n if priv_rule[\"rule\"][\"rule_name\"] not in already_added_priv_rules:\n # Prepend the rule to the output string\n repo_rules_strings.insert(0, priv_rule_string)\n # Add the rule to the list of already added rules\n already_added_priv_rules.append(priv_rule[\"rule\"][\"rule_name\"])\n rule_repo_statistics['total_rules'] += 1\n\n # Only write the rule set if there's at least one rule in the set\n if len(repo_rules_strings) > 0:\n # Prepend header to the output string\n repo_rule_set_header = YARA_FORGE_CONFIG['repo_header'].format(\n repo_name=repo['name'],\n repo_url=repo['url'],\n retrieval_date=datetime.datetime.now().strftime(\"%Y-%m-%d\"),\n repo_commit=repo['commit_hash'],\n total_rules=rule_repo_statistics['total_rules'],\n total_rules_skipped_age=rule_repo_statistics['total_rules_skipped_age'],\n total_rules_skipped_quality=rule_repo_statistics['total_rules_skipped_quality'],\n total_rules_skipped_importance=rule_repo_statistics['total_rules_skipped_importance'],\n total_rules_skipped_score=rule_repo_statistics['total_rules_skipped_score'],\n repo_license=repo['license']\n )\n # Append the rule set string to the list of rule set strings\n output_rule_set_strings.append(repo_rule_set_header)\n output_rule_set_strings.extend(repo_rules_strings)\n \n # Write the rule set statistics including total and skipped rules to the console\n logging.info(\"Rule set: '%s' Total rules: %d, Skipped: %d (age), %d (quality), %d (importance), %d (score)\",\n repo['name'],\n rule_repo_statistics['total_rules'],\n rule_repo_statistics['total_rules_skipped_age'],\n rule_repo_statistics['total_rules_skipped_quality'],\n rule_repo_statistics['total_rules_skipped_importance'],\n rule_repo_statistics['total_rules_skipped_score'])\n\n # Add the repo statistics to the rule package statistics\n rule_package_statistics['repo_statistics'].append({\n \"name\": repo['name'],\n \"total_rules\": rule_repo_statistics['total_rules'],\n \"total_rules_skipped_age\": rule_repo_statistics['total_rules_skipped_age'],\n \"total_rules_skipped_quality\": rule_repo_statistics['total_rules_skipped_quality'],\n \"total_rules_skipped_importance\": rule_repo_statistics['total_rules_skipped_importance'],\n \"total_rules_skipped_score\": rule_repo_statistics['total_rules_skipped_score'],\n })\n\n # Add the repo statistics counters to the the rule package statistics\n rule_package_statistics['total_rules'] += rule_repo_statistics['total_rules']\n rule_package_statistics['total_rules_skipped_age'] += rule_repo_statistics['total_rules_skipped_age']\n rule_package_statistics['total_rules_skipped_quality'] += rule_repo_statistics['total_rules_skipped_quality']\n rule_package_statistics['total_rules_skipped_importance'] += rule_repo_statistics['total_rules_skipped_importance']\n rule_package_statistics['total_rules_skipped_score'] += rule_repo_statistics['total_rules_skipped_score']\n\n # Print the rule package statistics including total and skipped rules to the console\n logging.log(logging.INFO, \"-------------------------------------------------------\")\n logging.info(\"Rule package: '%s' Total rules: %d, Skipped: %d (age), %d (quality), %d (importance), %d (score)\",\n rule_package['name'],\n rule_package_statistics['total_rules'],\n rule_package_statistics['total_rules_skipped_age'],\n rule_package_statistics['total_rules_skipped_quality'],\n rule_package_statistics['total_rules_skipped_importance'],\n rule_package_statistics['total_rules_skipped_score'])\n\n # Add the rule package statistics to the list of rule package statistics\n rule_package_statistics_sets.append(rule_package_statistics)\n\n # Only write the rule file if there's at least one rule in all sets in the package\n if rule_package_statistics['total_rules'] > 0:\n with open(rule_file_path, \"w\", encoding=\"utf-8\") as f:\n\n # Compose the package header and add the statistics on total rules and skipped rules\n rule_set_header = YARA_FORGE_CONFIG['rule_set_header'].format(\n rule_package_name=rule_package['name'],\n rule_package_description=rule_package['description'],\n program_version=program_version,\n yaraqa_commit=yaraqa_commit,\n rule_package_minimum_quality=rule_package['minimum_quality'],\n rule_package_force_include_importance_level=rule_package['force_include_importance_level'],\n rule_package_force_exclude_importance_level=rule_package['force_exclude_importance_level'],\n rule_package_minimum_age=rule_package['minimum_age'],\n rule_package_minimum_score=rule_package['minimum_score'],\n retrieval_date=datetime.datetime.now().strftime(\"%Y-%m-%d\"),\n total_rules=rule_package_statistics['total_rules'],\n total_rules_skipped_age=rule_package_statistics['total_rules_skipped_age'],\n total_rules_skipped_quality=rule_package_statistics['total_rules_skipped_quality'],\n total_rules_skipped_importance=rule_package_statistics['total_rules_skipped_importance'],\n total_rules_skipped_score=rule_package_statistics['total_rules_skipped_score'],\n )\n\n logging.log(logging.INFO, \"You can find more information about skipped files \" \\\n \"in the log file: yara-forge.log when you run it with --debug flag\")\n\n # Prepend the header to the output rule set strings\n output_rule_set_strings.insert(0, rule_set_header)\n\n # Write the output rule set strings to the file\n f.write(\"\".join(output_rule_set_strings))\n\n else:\n # remove the output file if it exists\n if os.path.exists(rule_file_path):\n os.remove(rule_file_path)\n\n # Add the name of the repo and the file path to the output file to the list\n package_files.append({\n \"name\": rule_package['name'],\n \"file_path\": rule_file_path,\n })\n\n # Write the rule package statistics as a markdown table to the build_stats.md file\n write_build_stats(rule_package_statistics_sets)\n\n return package_files"
},
{
"identifier": "evaluate_rules_quality",
"path": "qa/rule_qa.py",
"snippet": "def evaluate_rules_quality(processed_yara_repos, config):\n \"\"\"\n Evaluates the quality of YARA rules.\n \"\"\"\n\n # Create a yaraQA object\n yara_qa = YaraQA()\n\n # Rule issues list\n repo_issues = {}\n\n # Create a copy of the the repos to work with\n processed_yara_repos_copy = processed_yara_repos.copy()\n\n # Loop over the repositories\n for repo_rule_sets in processed_yara_repos_copy:\n # Analyze the rule sets\n logging.info(\"Evaluating rules from repository: %s\", repo_rule_sets['name'])\n # Issue statistics \n issue_statistics = {\n \"issues_syntax\": 0,\n \"issues_efficiency\": 0,\n \"issues_performance\": 0,\n \"issues_critical\": 0,\n }\n\n # Loop over the rule sets in the repository\n for rule_set in repo_rule_sets['rules_sets']:\n logging.debug(\"Evaluating rules from rule set: {rule_set['file_path']}\")\n\n rules_without_errors = []\n\n # Now we do stuff with each rule\n for rule in rule_set['rules']:\n\n # Skip the rule if it has critical issues\n skip_rule = False\n\n # Analyze the rule syntax\n # - Critical errors\n # - Compile issues\n issues_critical = check_issues_critical(rule)\n # Rule has critical issues\n if issues_critical:\n # Adding the values to the statistics\n issue_statistics['issues_critical'] += len(issues_critical)\n logging.warning(\"Rule %s has critical issues and cannot be used: %s\", rule['rule_name'], issues_critical)\n skip_rule = True\n\n # Analyze the rule syntax\n # - Syntactical issues\n # - Compile issues\n issues_syntax = check_syntax_issues(rule)\n # Print the issues if debug is enabled\n logging.debug(\"Evaluated rule %s syntax issues: %s\",\n rule['rule_name'], issues_syntax)\n\n # Analyze the rule quality\n # Checks for\n # - Performance impact issues (based on experience)\n # - Resource usage issues (based on experience)\n # - Logic flaws (based on experience)\n issues_efficiency = yara_qa.analyze_rule(rule)\n # Print the issues if debug is enabled\n logging.debug(\"Evaluated rule %s efficiency issues: %s\",\n rule['rule_name'], issues_efficiency)\n\n # Analyze the rule performance\n # Checks for \n # - Performance issues with live tests\n issues_performance = yara_qa.analyze_live_rule_performance(rule)\n # Add the values to the statistics\n issue_statistics['issues_performance'] += len(issues_performance)\n\n # Reduce the rule's quality score based on the levels of \n # the issues found in the rules\n issues = issues_syntax + issues_efficiency + issues_performance + issues_critical\n # Adding the values to the statistics\n issue_statistics['issues_syntax'] += len(issues_syntax)\n issue_statistics['issues_efficiency'] += len(issues_efficiency)\n # Loop over the issues\n for issue in issues:\n issue['score'] = config['issue_levels'][issue['level']]\n # Calculate the total score\n total_quality_score = sum(issue['score'] for issue in issues)\n\n # Apply a custom quality reduction if the rule has shown to be\n # prone to false positives\n custom_score_reduction = retrieve_custom_quality_reduction(rule)\n total_quality_score += custom_score_reduction\n\n # Debug output report the total score of a rule\n logging.debug(\"Rule %s total score: %d\", rule['rule_name'], total_quality_score)\n\n # Add the total score to the rule's quality score\n rule['metadata'] = modify_yara_rule_quality(rule['metadata'], total_quality_score)\n\n # Add all issues to the big list of issues\n if repo_rule_sets['name'] in repo_issues:\n repo_issues[repo_rule_sets['name']].extend(issues)\n else:\n repo_issues[repo_rule_sets['name']] = issues\n\n # Add the rule to the list of rules without errors\n if not skip_rule:\n rules_without_errors.append(rule)\n\n # Replace the rules in the rule set with the rules without errors\n rule_set['rules'] = rules_without_errors\n\n # Print the issues statistics\n logging.info(\"Issues statistics: %d syntax issues, %d efficiency issues, \" +\n \"%d performance issues, %d critical issues\",\n issue_statistics['issues_syntax'],\n issue_statistics['issues_efficiency'],\n issue_statistics['issues_performance'],\n issue_statistics['issues_critical'])\n\n # Log the issues found in the rules to a separate file\n write_issues_to_file(repo_issues)\n\n # Return the processed repos\n return processed_yara_repos_copy"
},
{
"identifier": "check_yara_packages",
"path": "qa/rule_qa.py",
"snippet": "def check_yara_packages(repo_files):\n \"\"\"\n Checks the YARA packages for errors.\n \"\"\"\n # Loop over the list and print the file names\n for repo_file in repo_files:\n logging.info(\"Checking YARA package '%s' in file: %s\", \n repo_file['name'], repo_file['file_path'])\n # Compile the rule set\n try:\n # Check for errors\n yara.compile(filepath=repo_file['file_path'])\n except Exception as e:\n logging.error(\"The rule set didn't compile without errors: %s\", e)\n return False\n return True"
},
{
"identifier": "get_yara_qa_commit_hash",
"path": "qa/rule_qa.py",
"snippet": "def get_yara_qa_commit_hash():\n \"\"\"\n Returns the current commit hash of the lst commit of the YARA QA sub repository.\n \"\"\"\n # Get the current commit hash of the YARA QA sub repository\n try:\n with open(\".git/modules/qa/yaraQA/refs/heads/main\", \"r\", encoding=\"utf-8\") as f:\n return f.read().strip()\n except Exception as e:\n logging.warning(\"Couldn't get the commit hash of the YARA QA repository: %s\", e)\n return \"unknown\""
}
] | import argparse
import logging
import sys
import yaml
from main.rule_collector import retrieve_yara_rule_sets
from main.rule_processors import process_yara_rules
from main.rule_output import write_yara_packages
from qa.rule_qa import evaluate_rules_quality, check_yara_packages, get_yara_qa_commit_hash | 8,458 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# -*- coding: utf-8 -*-
#
# YARA Forge
# A YARA Rule Concentrator
# Florian Roth
# January 2024
__version__ = '0.7.2'
#import pprint
# Write a section header with dividers
def write_section_header(title, divider_with=72):
print("\n" + "=" * divider_with)
print(title.center(divider_with).upper())
print("=" * divider_with + "\n")
if __name__ == "__main__":
print(r' __ _____ ____ ___ ______ ')
print(r' \ \/ / | / __ \/ | / ____/___ _________ ____ ')
print(r' \ / /| | / /_/ / /| | / /_ / __ \/ ___/ __ `/ _ \ ')
print(r' / / ___ |/ _, _/ ___ | / __/ / /_/ / / / /_/ / __/ ')
print(r' /_/_/ |_/_/ |_/_/ |_| /_/ \____/_/ \__, /\___/ ')
print(r' /____/ ')
print(r' YARA Forge ')
print(r' Brining Order to Chaos ')
print(r' ')
print(r' Version %s ' % __version__)
print(r' Florian Roth, January 2024 ')
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="enable debug output", action="store_true")
parser.add_argument("-c", "--config", help="specify a different config file", default="yara-forge-config.yml")
args = parser.parse_args()
# Create a new logger to log into the command line and a log file name yara-forge.log
# (only set the level to debug if the debug argument is set)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
# Set the level of the plyara logger to warning
logging.getLogger('plyara').setLevel(logging.WARNING)
logging.getLogger('tzlocal').setLevel(logging.CRITICAL)
# Create a handler for the command line
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG if args.debug else logging.INFO)
# Create a handler for the log file
fh = logging.FileHandler("yara-forge.log")
fh.setLevel(logging.DEBUG)
# Create a formatter for the log messages that go to the log file
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Create a formatter for the log messages that go to the command line
formatter_cmd = logging.Formatter('%(message)s')
# Add the formatter to the handlers
ch.setFormatter(formatter_cmd)
fh.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
# Read configuration file
with open(args.config, 'r') as f:
YARA_FORGE_CONFIG = yaml.safe_load(f)
# Retrieve the YARA rule sets
write_section_header("Retrieving YARA rule sets")
yara_rule_repo_sets = retrieve_yara_rule_sets(
YARA_FORGE_CONFIG['repo_staging_dir'],
YARA_FORGE_CONFIG['yara_repositories'])
#pprint.pprint(yara_rule_repo_sets)
# Process the YARA rules
write_section_header("Processing YARA rules")
processed_yara_repos = process_yara_rules(yara_rule_repo_sets, YARA_FORGE_CONFIG)
# Evaluate the quality of the rules
write_section_header("Evaluating YARA rules")
evaluated_yara_repos = evaluate_rules_quality(processed_yara_repos, YARA_FORGE_CONFIG)
# Write the YARA packages
write_section_header("Writing YARA packages")
| #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# -*- coding: utf-8 -*-
#
# YARA Forge
# A YARA Rule Concentrator
# Florian Roth
# January 2024
__version__ = '0.7.2'
#import pprint
# Write a section header with dividers
def write_section_header(title, divider_with=72):
print("\n" + "=" * divider_with)
print(title.center(divider_with).upper())
print("=" * divider_with + "\n")
if __name__ == "__main__":
print(r' __ _____ ____ ___ ______ ')
print(r' \ \/ / | / __ \/ | / ____/___ _________ ____ ')
print(r' \ / /| | / /_/ / /| | / /_ / __ \/ ___/ __ `/ _ \ ')
print(r' / / ___ |/ _, _/ ___ | / __/ / /_/ / / / /_/ / __/ ')
print(r' /_/_/ |_/_/ |_/_/ |_| /_/ \____/_/ \__, /\___/ ')
print(r' /____/ ')
print(r' YARA Forge ')
print(r' Brining Order to Chaos ')
print(r' ')
print(r' Version %s ' % __version__)
print(r' Florian Roth, January 2024 ')
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="enable debug output", action="store_true")
parser.add_argument("-c", "--config", help="specify a different config file", default="yara-forge-config.yml")
args = parser.parse_args()
# Create a new logger to log into the command line and a log file name yara-forge.log
# (only set the level to debug if the debug argument is set)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
# Set the level of the plyara logger to warning
logging.getLogger('plyara').setLevel(logging.WARNING)
logging.getLogger('tzlocal').setLevel(logging.CRITICAL)
# Create a handler for the command line
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG if args.debug else logging.INFO)
# Create a handler for the log file
fh = logging.FileHandler("yara-forge.log")
fh.setLevel(logging.DEBUG)
# Create a formatter for the log messages that go to the log file
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Create a formatter for the log messages that go to the command line
formatter_cmd = logging.Formatter('%(message)s')
# Add the formatter to the handlers
ch.setFormatter(formatter_cmd)
fh.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
# Read configuration file
with open(args.config, 'r') as f:
YARA_FORGE_CONFIG = yaml.safe_load(f)
# Retrieve the YARA rule sets
write_section_header("Retrieving YARA rule sets")
yara_rule_repo_sets = retrieve_yara_rule_sets(
YARA_FORGE_CONFIG['repo_staging_dir'],
YARA_FORGE_CONFIG['yara_repositories'])
#pprint.pprint(yara_rule_repo_sets)
# Process the YARA rules
write_section_header("Processing YARA rules")
processed_yara_repos = process_yara_rules(yara_rule_repo_sets, YARA_FORGE_CONFIG)
# Evaluate the quality of the rules
write_section_header("Evaluating YARA rules")
evaluated_yara_repos = evaluate_rules_quality(processed_yara_repos, YARA_FORGE_CONFIG)
# Write the YARA packages
write_section_header("Writing YARA packages") | repo_files = write_yara_packages(evaluated_yara_repos, program_version=__version__, yaraqa_commit=get_yara_qa_commit_hash(), YARA_FORGE_CONFIG=YARA_FORGE_CONFIG) | 5 | 2023-10-28 18:04:14+00:00 | 12k |
masked-spacetime-hashing/msth | MSTH/streamable_pipeline.py | [
{
"identifier": "base_config",
"path": "nerfstudio/configs/base_config.py",
"snippet": "class PrintableConfig: # pylint: disable=too-few-public-methods\nclass InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods\nclass MachineConfig(PrintableConfig):\nclass LocalWriterConfig(InstantiateConfig):\nclass LoggingConfig(PrintableConfig):\nclass ViewerConfig(PrintableConfig):\n def __str__(self):\n def setup(self, **kwargs) -> Any:\n def setup(self, banner_messages: Optional[List[str]] = None, **kwargs) -> Any:"
},
{
"identifier": "DataManager",
"path": "nerfstudio/data/datamanagers/base_datamanager.py",
"snippet": "class DataManager(nn.Module):\n \"\"\"Generic data manager's abstract class\n\n This version of the data manager is designed be a monolithic way to load data and latents,\n especially since this may contain learnable parameters which need to be shared across the train\n and test data managers. The idea is that we have setup methods for train and eval separately and\n this can be a combined train/eval if you want.\n\n Usage:\n To get data, use the next_train and next_eval functions.\n This data manager's next_train and next_eval methods will return 2 things:\n 1. A Raybundle: This will contain the rays we are sampling, with latents and\n conditionals attached (everything needed at inference)\n 2. A \"batch\" of auxiliary information: This will contain the mask, the ground truth\n pixels, etc needed to actually train, score, etc the model\n\n Rationale:\n Because of this abstraction we've added, we can support more NeRF paradigms beyond the\n vanilla nerf paradigm of single-scene, fixed-images, no-learnt-latents.\n We can now support variable scenes, variable number of images, and arbitrary latents.\n\n\n Train Methods:\n setup_train: sets up for being used as train\n iter_train: will be called on __iter__() for the train iterator\n next_train: will be called on __next__() for the training iterator\n get_train_iterable: utility that gets a clean pythonic iterator for your training data\n\n Eval Methods:\n setup_eval: sets up for being used as eval\n iter_eval: will be called on __iter__() for the eval iterator\n next_eval: will be called on __next__() for the eval iterator\n get_eval_iterable: utility that gets a clean pythonic iterator for your eval data\n\n\n Attributes:\n train_count (int): the step number of our train iteration, needs to be incremented manually\n eval_count (int): the step number of our eval iteration, needs to be incremented manually\n train_dataset (Dataset): the dataset for the train dataset\n eval_dataset (Dataset): the dataset for the eval dataset\n\n Additional attributes specific to each subclass are defined in the setup_train and setup_eval\n functions.\n\n \"\"\"\n\n train_dataset: Optional[Dataset] = None\n eval_dataset: Optional[Dataset] = None\n train_sampler: Optional[DistributedSampler] = None\n eval_sampler: Optional[DistributedSampler] = None\n\n def __init__(self):\n \"\"\"Constructor for the DataManager class.\n\n Subclassed DataManagers will likely need to override this constructor.\n\n If you aren't manually calling the setup_train and setup_eval functions from an overriden\n constructor, that you call super().__init__() BEFORE you initialize any\n nn.Modules or nn.Parameters, but AFTER you've already set all the attributes you need\n for the setup functions.\"\"\"\n super().__init__()\n self.train_count = 0\n self.eval_count = 0\n if self.train_dataset and self.test_mode != \"inference\":\n self.setup_train()\n if self.eval_dataset and self.test_mode != \"inference\":\n self.setup_eval()\n\n def forward(self):\n \"\"\"Blank forward method\n\n This is an nn.Module, and so requires a forward() method normally, although in our case\n we do not need a forward() method\"\"\"\n raise NotImplementedError\n\n def iter_train(self):\n \"\"\"The __iter__ function for the train iterator.\n\n This only exists to assist the get_train_iterable function, since we need to pass\n in an __iter__ function for our trivial iterable that we are making.\"\"\"\n self.train_count = 0\n\n def iter_eval(self):\n \"\"\"The __iter__ function for the eval iterator.\n\n This only exists to assist the get_eval_iterable function, since we need to pass\n in an __iter__ function for our trivial iterable that we are making.\"\"\"\n self.eval_count = 0\n\n def get_train_iterable(self, length=-1) -> IterableWrapper:\n \"\"\"Gets a trivial pythonic iterator that will use the iter_train and next_train functions\n as __iter__ and __next__ methods respectively.\n\n This basically is just a little utility if you want to do something like:\n | for ray_bundle, batch in datamanager.get_train_iterable():\n | <eval code here>\n since the returned IterableWrapper is just an iterator with the __iter__ and __next__\n methods (methods bound to our DataManager instance in this case) specified in the constructor.\n \"\"\"\n return IterableWrapper(self.iter_train, self.next_train, length)\n\n def get_eval_iterable(self, length=-1) -> IterableWrapper:\n \"\"\"Gets a trivial pythonic iterator that will use the iter_eval and next_eval functions\n as __iter__ and __next__ methods respectively.\n\n This basically is just a little utility if you want to do something like:\n | for ray_bundle, batch in datamanager.get_eval_iterable():\n | <eval code here>\n since the returned IterableWrapper is just an iterator with the __iter__ and __next__\n methods (methods bound to our DataManager instance in this case) specified in the constructor.\n \"\"\"\n return IterableWrapper(self.iter_eval, self.next_eval, length)\n\n @abstractmethod\n def setup_train(self):\n \"\"\"Sets up the data manager for training.\n\n Here you will define any subclass specific object attributes from the attribute\"\"\"\n\n @abstractmethod\n def setup_eval(self):\n \"\"\"Sets up the data manager for evaluation\"\"\"\n\n @abstractmethod\n def next_train(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Returns the next batch of data from the train data manager.\n\n Args:\n step: the step number of the eval image to retrieve\n Returns:\n A tuple of the ray bundle for the image, and a dictionary of additional batch information\n such as the groudtruth image.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def next_eval(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Returns the next batch of data from the eval data manager.\n\n Args:\n step: the step number of the eval image to retrieve\n Returns:\n A tuple of the ray bundle for the image, and a dictionary of additional batch information\n such as the groudtruth image.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def next_eval_image(self, step: int) -> Tuple[int, RayBundle, Dict]:\n \"\"\"Retreive the next eval image.\n\n Args:\n step: the step number of the eval image to retrieve\n Returns:\n A tuple of the step number, the ray bundle for the image, and a dictionary of\n additional batch information such as the groudtruth image.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_train_rays_per_batch(self) -> int:\n \"\"\"Returns the number of rays per batch for training.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_eval_rays_per_batch(self) -> int:\n \"\"\"Returns the number of rays per batch for evaluation.\"\"\"\n raise NotImplementedError\n\n def get_datapath(self) -> Optional[Path]: # pylint:disable=no-self-use\n \"\"\"Returns the path to the data. This is used to determine where to save camera paths.\"\"\"\n return None\n\n def get_training_callbacks( # pylint:disable=no-self-use\n self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument\n ) -> List[TrainingCallback]:\n \"\"\"Returns a list of callbacks to be used during training.\"\"\"\n return []\n\n @abstractmethod\n def get_param_groups(self) -> Dict[str, List[Parameter]]: # pylint: disable=no-self-use\n \"\"\"Get the param groups for the data manager.\n\n Returns:\n A list of dictionaries containing the data manager's param groups.\n \"\"\"\n return {}"
},
{
"identifier": "DataManagerConfig",
"path": "nerfstudio/data/datamanagers/base_datamanager.py",
"snippet": "class DataManagerConfig(InstantiateConfig):\n \"\"\"Configuration for data manager instantiation; DataManager is in charge of keeping the train/eval dataparsers;\n After instantiation, data manager holds both train/eval datasets and is in charge of returning unpacked\n train/eval data at each iteration\n \"\"\"\n\n _target: Type = field(default_factory=lambda: DataManager)\n \"\"\"Target class to instantiate.\"\"\"\n data: Optional[Path] = None\n \"\"\"Source of data, may not be used by all models.\"\"\"\n camera_optimizer: Optional[CameraOptimizerConfig] = None\n \"\"\"Specifies the camera pose optimizer used during training. Helpful if poses are noisy.\"\"\""
},
{
"identifier": "VanillaDataManager",
"path": "nerfstudio/data/datamanagers/base_datamanager.py",
"snippet": "class VanillaDataManager(DataManager): # pylint: disable=abstract-method\n \"\"\"Basic stored data manager implementation.\n\n This is pretty much a port over from our old dataloading utilities, and is a little jank\n under the hood. We may clean this up a little bit under the hood with more standard dataloading\n components that can be strung together, but it can be just used as a black box for now since\n only the constructor is likely to change in the future, or maybe passing in step number to the\n next_train and next_eval functions.\n\n Args:\n config: the DataManagerConfig used to instantiate class\n \"\"\"\n\n config: VanillaDataManagerConfig\n train_dataset: InputDataset\n eval_dataset: InputDataset\n train_dataparser_outputs: DataparserOutputs\n train_pixel_sampler: Optional[PixelSampler] = None\n eval_pixel_sampler: Optional[PixelSampler] = None\n\n def __init__(\n self,\n config: VanillaDataManagerConfig,\n device: Union[torch.device, str] = \"cpu\",\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n **kwargs, # pylint: disable=unused-argument\n ):\n self.config = config\n self.device = device\n self.world_size = world_size\n self.local_rank = local_rank\n self.sampler = None\n self.test_mode = test_mode\n self.test_split = \"test\" if test_mode in [\"test\", \"inference\"] else \"val\"\n self.dataparser_config = self.config.dataparser\n if self.config.data is not None:\n self.config.dataparser.data = Path(self.config.data)\n else:\n self.config.data = self.config.dataparser.data\n self.dataparser = self.dataparser_config.setup()\n self.train_dataparser_outputs = self.dataparser.get_dataparser_outputs(split=\"train\")\n\n self.train_dataset = self.create_train_dataset()\n self.eval_dataset = self.create_eval_dataset()\n super().__init__()\n\n def create_train_dataset(self) -> InputDataset:\n \"\"\"Sets up the data loaders for training\"\"\"\n return InputDataset(\n dataparser_outputs=self.train_dataparser_outputs,\n scale_factor=self.config.camera_res_scale_factor,\n )\n\n def create_eval_dataset(self) -> InputDataset:\n \"\"\"Sets up the data loaders for evaluation\"\"\"\n return InputDataset(\n dataparser_outputs=self.dataparser.get_dataparser_outputs(split=self.test_split),\n scale_factor=self.config.camera_res_scale_factor,\n )\n\n def _get_pixel_sampler( # pylint: disable=no-self-use\n self, dataset: InputDataset, *args: Any, **kwargs: Any\n ) -> PixelSampler:\n \"\"\"Infer pixel sampler to use.\"\"\"\n if self.config.patch_size > 1:\n return PatchPixelSampler(*args, **kwargs, patch_size=self.config.patch_size)\n\n # If all images are equirectangular, use equirectangular pixel sampler\n is_equirectangular = dataset.cameras.camera_type == CameraType.EQUIRECTANGULAR.value\n if is_equirectangular.all():\n return EquirectangularPixelSampler(*args, **kwargs)\n # Otherwise, use the default pixel sampler\n if is_equirectangular.any():\n CONSOLE.print(\"[bold yellow]Warning: Some cameras are equirectangular, but using default pixel sampler.\")\n return PixelSampler(*args, **kwargs)\n\n def setup_train(self):\n \"\"\"Sets up the data loaders for training\"\"\"\n assert self.train_dataset is not None\n CONSOLE.print(\"Setting up training dataset...\")\n self.train_image_dataloader = CacheDataloader(\n self.train_dataset,\n num_images_to_sample_from=self.config.train_num_images_to_sample_from,\n num_times_to_repeat_images=self.config.train_num_times_to_repeat_images,\n device=self.device,\n num_workers=self.world_size * 4,\n pin_memory=True,\n collate_fn=self.config.collate_fn,\n )\n self.iter_train_image_dataloader = iter(self.train_image_dataloader)\n self.train_pixel_sampler = self._get_pixel_sampler(self.train_dataset, self.config.train_num_rays_per_batch)\n self.train_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.train_dataset.cameras.size, device=self.device\n )\n self.train_ray_generator = RayGenerator(\n self.train_dataset.cameras.to(self.device),\n self.train_camera_optimizer,\n )\n\n def setup_eval(self):\n \"\"\"Sets up the data loader for evaluation\"\"\"\n assert self.eval_dataset is not None\n CONSOLE.print(\"Setting up evaluation dataset...\")\n self.eval_image_dataloader = CacheDataloader(\n self.eval_dataset,\n num_images_to_sample_from=self.config.eval_num_images_to_sample_from,\n num_times_to_repeat_images=self.config.eval_num_times_to_repeat_images,\n device=self.device,\n num_workers=self.world_size * 4,\n pin_memory=True,\n collate_fn=self.config.collate_fn,\n )\n self.iter_eval_image_dataloader = iter(self.eval_image_dataloader)\n self.eval_pixel_sampler = self._get_pixel_sampler(self.eval_dataset, self.config.eval_num_rays_per_batch)\n self.eval_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.eval_dataset.cameras.size, device=self.device\n )\n self.eval_ray_generator = RayGenerator(\n self.eval_dataset.cameras.to(self.device),\n self.eval_camera_optimizer,\n )\n # for loading full images\n self.fixed_indices_eval_dataloader = FixedIndicesEvalDataloader(\n input_dataset=self.eval_dataset,\n device=self.device,\n num_workers=self.world_size * 4,\n )\n self.eval_dataloader = RandIndicesEvalDataloader(\n input_dataset=self.eval_dataset,\n device=self.device,\n num_workers=self.world_size * 4,\n )\n\n def next_train(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Returns the next batch of data from the train dataloader.\"\"\"\n self.train_count += 1\n image_batch = next(self.iter_train_image_dataloader)\n assert self.train_pixel_sampler is not None\n batch = self.train_pixel_sampler.sample(image_batch)\n ray_indices = batch[\"indices\"]\n ray_bundle = self.train_ray_generator(ray_indices)\n\n return ray_bundle, batch\n\n def next_eval(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Returns the next batch of data from the eval dataloader.\"\"\"\n self.eval_count += 1\n image_batch = next(self.iter_eval_image_dataloader)\n assert self.eval_pixel_sampler is not None\n batch = self.eval_pixel_sampler.sample(image_batch)\n ray_indices = batch[\"indices\"]\n ray_bundle = self.eval_ray_generator(ray_indices)\n return ray_bundle, batch\n\n def next_eval_image(self, step: int) -> Tuple[int, RayBundle, Dict]:\n for camera_ray_bundle, batch in self.eval_dataloader:\n assert camera_ray_bundle.camera_indices is not None\n image_idx = int(camera_ray_bundle.camera_indices[0, 0, 0])\n return image_idx, camera_ray_bundle, batch\n raise ValueError(\"No more eval images\")\n\n def get_train_rays_per_batch(self) -> int:\n return self.config.train_num_rays_per_batch\n\n def get_eval_rays_per_batch(self) -> int:\n return self.config.eval_num_rays_per_batch\n\n def get_datapath(self) -> Path:\n return self.config.dataparser.data\n\n def get_param_groups(self) -> Dict[str, List[Parameter]]: # pylint: disable=no-self-use\n \"\"\"Get the param groups for the data manager.\n Returns:\n A list of dictionaries containing the data manager's param groups.\n \"\"\"\n param_groups = {}\n\n camera_opt_params = list(self.train_camera_optimizer.parameters())\n if self.config.camera_optimizer.mode != \"off\":\n assert len(camera_opt_params) > 0\n param_groups[self.config.camera_optimizer.param_group] = camera_opt_params\n else:\n assert len(camera_opt_params) == 0\n\n return param_groups"
},
{
"identifier": "VanillaDataManagerConfig",
"path": "nerfstudio/data/datamanagers/base_datamanager.py",
"snippet": "class VanillaDataManagerConfig(DataManagerConfig):\n \"\"\"A basic data manager\"\"\"\n\n _target: Type = field(default_factory=lambda: VanillaDataManager)\n \"\"\"Target class to instantiate.\"\"\"\n dataparser: AnnotatedDataParserUnion = BlenderDataParserConfig()\n \"\"\"Specifies the dataparser used to unpack the data.\"\"\"\n train_num_rays_per_batch: int = 1024\n \"\"\"Number of rays per batch to use per training iteration.\"\"\"\n train_num_images_to_sample_from: int = -1\n \"\"\"Number of images to sample during training iteration.\"\"\"\n train_num_times_to_repeat_images: int = -1\n \"\"\"When not training on all images, number of iterations before picking new\n images. If -1, never pick new images.\"\"\"\n eval_num_rays_per_batch: int = 1024\n \"\"\"Number of rays per batch to use per eval iteration.\"\"\"\n eval_num_images_to_sample_from: int = -1\n \"\"\"Number of images to sample during eval iteration.\"\"\"\n eval_num_times_to_repeat_images: int = -1\n \"\"\"When not evaluating on all images, number of iterations before picking\n new images. If -1, never pick new images.\"\"\"\n eval_image_indices: Optional[Tuple[int, ...]] = (0,)\n \"\"\"Specifies the image indices to use during eval; if None, uses all.\"\"\"\n camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig()\n \"\"\"Specifies the camera pose optimizer used during training. Helpful if poses are noisy, such as for data from\n Record3D.\"\"\"\n collate_fn = staticmethod(nerfstudio_collate)\n \"\"\"Specifies the collate function to use for the train and eval dataloaders.\"\"\"\n camera_res_scale_factor: float = 1.0\n \"\"\"The scale factor for scaling spatial data such as images, mask, semantics\n along with relevant information about camera intrinsics\n \"\"\"\n patch_size: int = 1\n \"\"\"Size of patch to sample from. If >1, patch-based sampling will be used.\"\"\""
},
{
"identifier": "TrainingCallback",
"path": "nerfstudio/engine/callbacks.py",
"snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callback (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int) -> None:\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation) -> None:\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)"
},
{
"identifier": "TrainingCallbackAttributes",
"path": "nerfstudio/engine/callbacks.py",
"snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\""
},
{
"identifier": "Model",
"path": "nerfstudio/models/base_model.py",
"snippet": "class Model(nn.Module):\n \"\"\"Model class\n Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be\n subclassed for custom NeRF model.\n\n Args:\n config: configuration for instantiating model\n scene_box: dataset scene box\n \"\"\"\n\n config: ModelConfig\n\n def __init__(\n self,\n config: ModelConfig,\n scene_box: SceneBox,\n num_train_data: int,\n **kwargs,\n ) -> None:\n super().__init__()\n self.config = config\n self.scene_box = scene_box\n self.render_aabb = None # the box that we want to render - should be a subset of scene_box\n self.num_train_data = num_train_data\n self.kwargs = kwargs\n self.collider = None\n\n self.populate_modules() # populate the modules\n self.callbacks = None\n # to keep track of which device the nn.Module is on\n self.device_indicator_param = nn.Parameter(torch.empty(0))\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.device_indicator_param.device\n\n def get_training_callbacks( # pylint:disable=no-self-use\n self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument\n ) -> List[TrainingCallback]:\n \"\"\"Returns a list of callbacks that run functions at the specified training iterations.\"\"\"\n return []\n\n def populate_modules(self):\n \"\"\"Set the necessary modules to get the network working.\"\"\"\n # default instantiates optional modules that are common among many networks\n # NOTE: call `super().populate_modules()` in subclasses\n\n if self.config.enable_collider:\n self.collider = NearFarCollider(\n near_plane=self.config.collider_params[\"near_plane\"], far_plane=self.config.collider_params[\"far_plane\"]\n )\n\n @abstractmethod\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Obtain the parameter groups for the optimizers\n\n Returns:\n Mapping of different parameter groups\n \"\"\"\n\n @abstractmethod\n def get_outputs(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in a Ray Bundle and returns a dictionary of outputs.\n\n Args:\n ray_bundle: Input bundle of rays. This raybundle should have all the\n needed information to compute the outputs.\n\n Returns:\n Outputs of model. (ie. rendered colors)\n \"\"\"\n\n def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Run forward starting with a ray bundle. This outputs different things depending on the configuration\n of the model and whether or not the batch is provided (whether or not we are training basically)\n\n Args:\n ray_bundle: containing all the information needed to render that ray latents included\n \"\"\"\n\n if self.collider is not None:\n ray_bundle = self.collider(ray_bundle)\n\n return self.get_outputs(ray_bundle)\n\n def get_metrics_dict(self, outputs, batch) -> Dict[str, torch.Tensor]:\n \"\"\"Compute and returns metrics.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n \"\"\"\n # pylint: disable=unused-argument\n # pylint: disable=no-self-use\n return {}\n\n @abstractmethod\n def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str, torch.Tensor]:\n \"\"\"Computes and returns the losses dict.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n metrics_dict: dictionary of metrics, some of which we can use for loss\n \"\"\"\n\n @torch.no_grad()\n def get_outputs_for_camera_ray_bundle(self, camera_ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in camera parameters and computes the output of the model.\n\n Args:\n camera_ray_bundle: ray bundle to calculate outputs over\n \"\"\"\n num_rays_per_chunk = self.config.eval_num_rays_per_chunk\n image_height, image_width = camera_ray_bundle.origins.shape[:2]\n num_rays = len(camera_ray_bundle)\n outputs_lists = defaultdict(list)\n with Timer(\"forwarding\"):\n _t1 = time.time()\n for i in range(0, num_rays, num_rays_per_chunk):\n start_idx = i\n end_idx = i + num_rays_per_chunk\n ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx)\n outputs = self.forward(ray_bundle=ray_bundle)\n for output_name, output in outputs.items(): # type: ignore\n outputs_lists[output_name].append(output)\n print(f\"forwarding took {time.time() - _t1} seconds\")\n outputs = {}\n for output_name, outputs_list in outputs_lists.items():\n if not torch.is_tensor(outputs_list[0]):\n # TODO: handle lists of tensors as well\n continue\n if output_name == \"mask_val\":\n outputs[\"mask_val\"] = torch.cat(outputs_list, dim=0)\n outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore\n return outputs\n\n @abstractmethod\n def get_image_metrics_and_images(\n self, outputs: Dict[str, torch.Tensor], batch: Dict[str, torch.Tensor]\n ) -> Tuple[Dict[str, float], Dict[str, torch.Tensor]]:\n \"\"\"Writes the test image outputs.\n TODO: This shouldn't return a loss\n\n Args:\n image_idx: Index of the image.\n step: Current step.\n batch: Batch of data.\n outputs: Outputs of the model.\n\n Returns:\n A dictionary of metrics.\n \"\"\"\n\n def load_model(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: dictionary of pre-trained model states\n \"\"\"\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state[\"model\"].items()}\n self.load_state_dict(state) # type: ignore\n\n def update_to_step(self, step: int) -> None:\n \"\"\"Called when loading a model from a checkpoint. Sets any model parameters that change over\n training to the correct value, based on the training step of the checkpoint.\n\n Args:\n step: training step of the loaded checkpoint\n \"\"\""
},
{
"identifier": "ModelConfig",
"path": "nerfstudio/models/base_model.py",
"snippet": "class ModelConfig(InstantiateConfig):\n \"\"\"Configuration for model instantiation\"\"\"\n\n _target: Type = field(default_factory=lambda: Model)\n \"\"\"target class to instantiate\"\"\"\n enable_collider: bool = True\n \"\"\"Whether to create a scene collider to filter rays.\"\"\"\n collider_params: Optional[Dict[str, float]] = to_immutable_dict({\"near_plane\": 2.0, \"far_plane\": 6.0})\n \"\"\"parameters to instantiate scene collider with\"\"\"\n loss_coefficients: Dict[str, float] = to_immutable_dict({\"rgb_loss_coarse\": 1.0, \"rgb_loss_fine\": 1.0})\n \"\"\"parameters to instantiate density field with\"\"\"\n eval_num_rays_per_chunk: int = 4096\n \"\"\"specifies number of rays per chunk during eval\"\"\""
},
{
"identifier": "profiler",
"path": "nerfstudio/utils/profiler.py",
"snippet": "CONSOLE = Console(width=120)\nPROFILER = []\ndef time_function(func: Callable) -> Callable:\n def wrapper(*args, **kwargs):\ndef flush_profiler(config: cfg.LoggingConfig):\ndef setup_profiler(config: cfg.LoggingConfig):\n def __init__(self, config: cfg.LoggingConfig):\n def update_time(self, func_name: str, start_time: float, end_time: float):\n def print_profile(self):\nclass Profiler:"
}
] | import typing
import torch
import torch.distributed as dist
from abc import abstractmethod
from dataclasses import dataclass, field
from time import time
from typing import Any, Dict, List, Mapping, Optional, Type, Union, cast
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
TextColumn,
TimeElapsedColumn,
)
from torch import nn
from torch.nn import Parameter
from torch.nn.parallel import DistributedDataParallel as DDP
from typing_extensions import Literal
from nerfstudio.configs import base_config as cfg
from nerfstudio.data.datamanagers.base_datamanager import (
DataManager,
DataManagerConfig,
VanillaDataManager,
VanillaDataManagerConfig,
)
from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes
from nerfstudio.models.base_model import Model, ModelConfig
from nerfstudio.utils import profiler | 8,785 | and so on.
Args:
config: configuration to instantiate pipeline
device: location to place model and data
test_mode:
'train': loads train/eval datasets into memory
'test': loads train/test dataset into memory
'inference': does not load any dataset into memory
world_size: total number of machines available
local_rank: rank of current machine
Attributes:
datamanager: The data manager that will be used
model: The model that will be used
"""
# pylint: disable=abstract-method
datamanager: DataManager
_model: Model
@property
def model(self):
"""Returns the unwrapped model if in ddp"""
return module_wrapper(self._model)
@property
def device(self):
"""Returns the device that the model is on."""
return self.model.device
def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):
model_state = {
key.replace("_model.", ""): value for key, value in state_dict.items() if key.startswith("_model.")
}
pipeline_state = {key: value for key, value in state_dict.items() if not key.startswith("_model.")}
self._model.load_state_dict(model_state, strict=strict)
super().load_state_dict(pipeline_state, strict=False)
@profiler.time_function
def get_train_loss_dict(self, step: int):
"""This function gets your training loss dict. This will be responsible for
getting the next batch of data from the DataManager and interfacing with the
Model class, feeding the data to the model's forward function.
Args:
step: current iteration step to update sampler if using DDP (distributed)
"""
if self.world_size > 1 and step:
assert self.datamanager.train_sampler is not None
self.datamanager.train_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_train(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
return model_outputs, loss_dict, metrics_dict
@profiler.time_function
def get_eval_loss_dict(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
self.eval()
if self.world_size > 1:
assert self.datamanager.eval_sampler is not None
self.datamanager.eval_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_eval(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
self.train()
return model_outputs, loss_dict, metrics_dict
@abstractmethod
@profiler.time_function
def get_eval_image_metrics_and_images(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
@abstractmethod
@profiler.time_function
def get_average_eval_image_metrics(self, step: Optional[int] = None):
"""Iterate over all the images in the eval dataset and get the average."""
def load_pipeline(self, loaded_state: Dict[str, Any], step: int) -> None:
"""Load the checkpoint from the given path
Args:
loaded_state: pre-trained model state dict
step: training step of the loaded checkpoint
"""
def get_training_callbacks(
self, training_callback_attributes: TrainingCallbackAttributes
) -> List[TrainingCallback]:
"""Returns the training callbacks from both the Dataloader and the Model."""
def get_param_groups(self) -> Dict[str, List[Parameter]]:
"""Get the param groups for the pipeline.
Returns:
A list of dictionaries containing the pipeline's param groups.
"""
@dataclass
class VanillaPipelineConfig(cfg.InstantiateConfig):
"""Configuration for pipeline instantiation"""
_target: Type = field(default_factory=lambda: VanillaPipeline)
"""target class to instantiate"""
| # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abstracts for the Pipeline class.
"""
from __future__ import annotations
def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model:
"""
If DDP, then return the .module. Otherwise, return the model.
"""
if isinstance(ddp_or_model, DDP):
return cast(Model, ddp_or_model.module)
return ddp_or_model
class Pipeline(nn.Module):
"""The intent of this class is to provide a higher level interface for the Model
that will be easy to use for our Trainer class.
This class will contain high level functions for the model like getting the loss
dictionaries and visualization code. It should have ways to get the next iterations
training loss, evaluation loss, and generate whole images for visualization. Each model
class should be 1:1 with a pipeline that can act as a standardized interface and hide
differences in how each model takes in and outputs data.
This class's function is to hide the data manager and model classes from the trainer,
worrying about:
1) Fetching data with the data manager
2) Feeding the model the data and fetching the loss
Hopefully this provides a higher level interface for the trainer to use, and
simplifying the model classes, which each may have different forward() methods
and so on.
Args:
config: configuration to instantiate pipeline
device: location to place model and data
test_mode:
'train': loads train/eval datasets into memory
'test': loads train/test dataset into memory
'inference': does not load any dataset into memory
world_size: total number of machines available
local_rank: rank of current machine
Attributes:
datamanager: The data manager that will be used
model: The model that will be used
"""
# pylint: disable=abstract-method
datamanager: DataManager
_model: Model
@property
def model(self):
"""Returns the unwrapped model if in ddp"""
return module_wrapper(self._model)
@property
def device(self):
"""Returns the device that the model is on."""
return self.model.device
def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):
model_state = {
key.replace("_model.", ""): value for key, value in state_dict.items() if key.startswith("_model.")
}
pipeline_state = {key: value for key, value in state_dict.items() if not key.startswith("_model.")}
self._model.load_state_dict(model_state, strict=strict)
super().load_state_dict(pipeline_state, strict=False)
@profiler.time_function
def get_train_loss_dict(self, step: int):
"""This function gets your training loss dict. This will be responsible for
getting the next batch of data from the DataManager and interfacing with the
Model class, feeding the data to the model's forward function.
Args:
step: current iteration step to update sampler if using DDP (distributed)
"""
if self.world_size > 1 and step:
assert self.datamanager.train_sampler is not None
self.datamanager.train_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_train(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
return model_outputs, loss_dict, metrics_dict
@profiler.time_function
def get_eval_loss_dict(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
self.eval()
if self.world_size > 1:
assert self.datamanager.eval_sampler is not None
self.datamanager.eval_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_eval(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
self.train()
return model_outputs, loss_dict, metrics_dict
@abstractmethod
@profiler.time_function
def get_eval_image_metrics_and_images(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
@abstractmethod
@profiler.time_function
def get_average_eval_image_metrics(self, step: Optional[int] = None):
"""Iterate over all the images in the eval dataset and get the average."""
def load_pipeline(self, loaded_state: Dict[str, Any], step: int) -> None:
"""Load the checkpoint from the given path
Args:
loaded_state: pre-trained model state dict
step: training step of the loaded checkpoint
"""
def get_training_callbacks(
self, training_callback_attributes: TrainingCallbackAttributes
) -> List[TrainingCallback]:
"""Returns the training callbacks from both the Dataloader and the Model."""
def get_param_groups(self) -> Dict[str, List[Parameter]]:
"""Get the param groups for the pipeline.
Returns:
A list of dictionaries containing the pipeline's param groups.
"""
@dataclass
class VanillaPipelineConfig(cfg.InstantiateConfig):
"""Configuration for pipeline instantiation"""
_target: Type = field(default_factory=lambda: VanillaPipeline)
"""target class to instantiate""" | datamanager: DataManagerConfig = VanillaDataManagerConfig() | 4 | 2023-10-26 04:39:15+00:00 | 12k |
mikacuy/PL-NeRF | run_plnerf.py | [
{
"identifier": "load_llff_data",
"path": "load_llff.py",
"snippet": "def load_llff_data(basedir, factor=8, recenter=True, bd_factor=.75, spherify=False, path_zflat=False):\n \n\n poses, bds, imgs = _load_data(basedir, factor=factor) # factor=8 downsamples original imgs by 8x\n print('Loaded', basedir, bds.min(), bds.max())\n \n # Correct rotation matrix ordering and move variable dim to axis 0\n poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)\n poses = np.moveaxis(poses, -1, 0).astype(np.float32)\n imgs = np.moveaxis(imgs, -1, 0).astype(np.float32)\n images = imgs\n bds = np.moveaxis(bds, -1, 0).astype(np.float32)\n \n # Rescale if bd_factor is provided\n sc = 1. if bd_factor is None else 1./(bds.min() * bd_factor)\n poses[:,:3,3] *= sc\n bds *= sc\n \n if recenter:\n poses = recenter_poses(poses)\n \n if spherify:\n poses, render_poses, bds = spherify_poses(poses, bds)\n\n else:\n \n c2w = poses_avg(poses)\n print('recentered', c2w.shape)\n print(c2w[:3,:4])\n\n ## Get spiral\n # Get average pose\n up = normalize(poses[:, :3, 1].sum(0))\n\n # Find a reasonable \"focus depth\" for this dataset\n close_depth, inf_depth = bds.min()*.9, bds.max()*5.\n dt = .75\n mean_dz = 1./(((1.-dt)/close_depth + dt/inf_depth))\n focal = mean_dz\n\n # Get radii for spiral path\n shrink_factor = .8\n zdelta = close_depth * .2\n tt = poses[:,:3,3] # ptstocam(poses[:3,3,:].T, c2w).T\n rads = np.percentile(np.abs(tt), 90, 0)\n c2w_path = c2w\n N_views = 120\n N_rots = 2\n if path_zflat:\n# zloc = np.percentile(tt, 10, 0)[2]\n zloc = -close_depth * .1\n c2w_path[:3,3] = c2w_path[:3,3] + zloc * c2w_path[:3,2]\n rads[2] = 0.\n N_rots = 1\n N_views/=2\n\n # Generate poses for spiral path\n render_poses = render_path_spiral(c2w_path, up, rads, focal, zdelta, zrate=.5, rots=N_rots, N=N_views)\n \n \n render_poses = np.array(render_poses).astype(np.float32)\n\n c2w = poses_avg(poses)\n print('Data:')\n print(poses.shape, images.shape, bds.shape)\n \n dists = np.sum(np.square(c2w[:3,3] - poses[:,:3,3]), -1)\n i_test = np.argmin(dists)\n print('HOLDOUT view is', i_test)\n \n images = images.astype(np.float32)\n poses = poses.astype(np.float32)\n\n return images, poses, bds, render_poses, i_test"
},
{
"identifier": "load_dtu",
"path": "load_dtu.py",
"snippet": "def load_dtu(root_dir, scene_id, num_train=42, scale_factor = 1. / 200., half_res=True, train_split = None):\n if train_split is None:\n # i_perm = np.random.RandomState(seed=0).permutation(N_VIEWS) # fix a seed so that we get that same split every time \n # i_train, i_test = i_perm[:num_train], i_perm[num_train:]\n i_test = list(range(N_VIEWS))[::8]\n i_train = [i for i in range(N_VIEWS) if i not in i_test]\n else:\n assert len(train_split) == num_train \n i_train = train_split\n i_test = [i for i in range(N_VIEWS) if i not in i_train]\n print(\"USING TRAINGING VIEWS %s and TESTING VIEWS %s\" % (i_train, i_test))\n imgs = []\n intrinsics, w2cs, c2ws, near_fars = [], [], [], [] # record proj mats between views\n if half_res :\n downSample = 0.5\n else:\n downSample = 1.0\n counts = [0]\n for vid in i_train:\n img_filename = os.path.join(root_dir, f'Rectified/scan{scene_id}_train/rect_{vid + 1:03d}_{LIGHTING_ID}_r5000.png')\n # depth_filename = os.path.join(root_dir,f'Depths/scan{scene_id}_train/depth_map_{vid:04d}.pfm')\n img = Image.open(img_filename)\n img_wh = np.round(np.array(img.size) * downSample).astype('int')\n img = img.resize(img_wh, Image.BILINEAR)\n img = np.array(img).astype(np.float32) / 255.\n imgs += [img]\n near_far, intrinsic, w2c, c2w = read_poses(root_dir, vid, scale_factor=scale_factor, downSample=downSample)\n intrinsics.append(intrinsic)\n w2cs.append(w2c)\n c2ws.append(c2w)\n near_fars.append(near_far)\n H, W = img.shape[:2]\n focal = intrinsic[0, 0]\n counts.append(len(i_train))\n for vid in i_test:\n img_filename = os.path.join(root_dir, f'Rectified/scan{scene_id}_train/rect_{vid + 1:03d}_{LIGHTING_ID}_r5000.png')\n # depth_filename = os.path.join(root_dir,f'Depths/scan{scene_id}_train/depth_map_{vid:04d}.pfm')\n img = Image.open(img_filename)\n img_wh = np.round(np.array(img.size) * downSample).astype('int')\n img = img.resize(img_wh, Image.BILINEAR)\n img = np.array(img).astype(np.float32) / 255.\n imgs += [img]\n near_far, intrinsic, w2c, c2w = read_poses(root_dir, vid, scale_factor=scale_factor, downSample=downSample)\n intrinsics.append(intrinsic)\n w2cs.append(w2c)\n c2ws.append(c2w)\n near_fars.append(near_far)\n near = min([m for m, M in near_fars]) # near plane is the min of all near planes for each view\n far = max([M for m, M in near_fars]) # far plane is the max of all far planes for each view\n counts.append(N_VIEWS)\n \n\n imgs = np.stack(imgs, axis=0).astype(np.float32)\n intrinsics = np.stack(intrinsics, axis=0).astype(np.float32)\n w2cs = np.stack(w2cs, axis=0).astype(np.float32)\n c2ws = np.stack(c2ws, axis=0).astype(np.float32)\n i_split = [np.arange(counts[i], counts[i+1]) for i in range(2)] # train and test\n render_poses = np.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n\n return imgs, intrinsics, w2cs, render_poses, [H, W, focal], i_split, near, far, [i_train, i_test]"
},
{
"identifier": "load_dtu2",
"path": "load_dtu.py",
"snippet": "def load_dtu2(root_dir, scene_id, num_train=42, half_res=True, train_split = None):\n scene_dir = os.path.join(root_dir, f\"scan{scene_id}\")\n image_dirs = os.path.join(scene_dir, \"image\")\n camera_file_path = os.path.join(scene_dir, \"cameras.npz\")\n all_cam = np.load(camera_file_path)\n # Prepare to average intrinsics over images\n fx, fy, cx, cy = 0.0, 0.0, 0.0, 0.0\n all_imgs = []\n all_poses = []\n if half_res :\n downSample = 0.5\n else:\n downSample = 1.0\n for i in range(N_VIEWS):\n image_path = os.path.join(image_dirs, \"%06d.png\" % i)\n img = Image.open(image_path)\n img_wh = np.round(np.array(img.size) * downSample).astype('int')\n W, H = img_wh\n img = img.resize(img_wh, Image.BILINEAR)\n img = np.array(img).astype(np.float32) / 255.\n img = torch.tensor(img)\n \n P = all_cam[f\"world_mat_{i}\"]\n P = P[:3]\n\n K, R, t = cv2.decomposeProjectionMatrix(P)[:3]\n K = K / K[2, 2]\n\n pose = np.eye(4, dtype=np.float32)\n pose[:3, :3] = R.transpose()\n pose[:3, 3] = (t[:3] / t[3])[:, 0]\n\n scale_mtx = all_cam.get(f\"scale_mat_{i}\")\n if scale_mtx is not None:\n norm_trans = scale_mtx[:3, 3:]\n norm_scale = np.diagonal(scale_mtx[:3, :3])[..., None]\n\n pose[:3, 3:] -= norm_trans\n pose[:3, 3:] /= norm_scale\n\n fx += torch.tensor(K[0, 0]) * downSample\n fy += torch.tensor(K[1, 1]) * downSample\n cx += torch.tensor(K[0, 2]) * downSample\n cy += torch.tensor(K[1, 2]) * downSample\n pose = (_coord_trans_world @ torch.tensor(pose, dtype=torch.float32).cpu() @ _coord_trans_cam)\n all_imgs.append(img)\n all_poses.append(pose)\n \n fx /= N_VIEWS\n fy /= N_VIEWS\n cx /= N_VIEWS\n cy /= N_VIEWS\n focal = torch.tensor((fx, fy), dtype=torch.float32)\n c = torch.tensor((cx, cy), dtype=torch.float32)\n K = torch.tensor([[focal[0], 0, c[0]], [0, focal[1], c[1]], [0, 0, 1.]]).float()\n all_imgs = torch.stack(all_imgs)\n all_poses = torch.stack(all_poses)\n \n if train_split is None:\n # i_perm = np.random.RandomState(seed=0).permutation(N_VIEWS) # fix a seed so that we get that same split every time \n # i_train, i_test = i_perm[:num_train], i_perm[num_train:]\n i_test = list(range(N_VIEWS))[::8]\n i_train = [i for i in range(N_VIEWS) if i not in i_test]\n num_train = len(i_train)\n else:\n assert len(train_split) == num_train \n i_train = train_split\n i_test = [i for i in range(N_VIEWS) if i not in i_train]\n print(\"USING TRAINGING VIEWS %s and TESTING VIEWS %s\" % (i_train, i_test))\n counts = [0, num_train, N_VIEWS]\n all_imgs_out = torch.zeros_like(all_imgs)\n all_poses_out = torch.zeros_like(all_poses)\n all_imgs_out[:num_train] = all_imgs[i_train]\n all_imgs_out[num_train:] = all_imgs[i_test]\n all_poses_out[:num_train] = all_poses[i_train]\n all_poses_out[num_train:] = all_poses[i_test]\n i_split = [np.arange(counts[i], counts[i+1]) for i in range(2)] # train and test\n render_poses = torch.stack([torch.tensor(pose_spherical(angle, -30.0, 4.0)) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n # near 0.1, far 5.0 data from pixel nerf\n return all_imgs_out, K, all_poses_out, render_poses, [H, W, focal[0]], i_split, 0.1, 5.0, [i_train, i_test]"
},
{
"identifier": "load_blender_data",
"path": "load_blender.py",
"snippet": "def load_blender_data(basedir, half_res=False, testskip=1):\n splits = ['train', 'val', 'test']\n metas = {}\n for s in splits:\n with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:\n metas[s] = json.load(fp)\n\n all_imgs = []\n all_poses = []\n counts = [0]\n for s in splits:\n meta = metas[s]\n imgs = []\n poses = []\n if s=='train' or testskip==0:\n skip = 1\n else:\n skip = testskip\n \n for frame in meta['frames'][::skip]:\n fname = os.path.join(basedir, frame['file_path'] + '.png')\n imgs.append(imageio.imread(fname))\n poses.append(np.array(frame['transform_matrix']))\n imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)\n poses = np.array(poses).astype(np.float32)\n counts.append(counts[-1] + imgs.shape[0])\n all_imgs.append(imgs)\n all_poses.append(poses)\n \n i_split = [np.arange(counts[i], counts[i+1]) for i in range(3)]\n \n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n \n H, W = imgs[0].shape[:2]\n camera_angle_x = float(meta['camera_angle_x'])\n focal = .5 * W / np.tan(.5 * camera_angle_x)\n \n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n \n if half_res:\n H = H//2\n W = W//2\n focal = focal/2.\n\n imgs_half_res = np.zeros((imgs.shape[0], H, W, 4))\n for i, img in enumerate(imgs):\n imgs_half_res[i] = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA)\n imgs = imgs_half_res\n # imgs = tf.image.resize_area(imgs, [400, 400]).numpy()\n\n \n return imgs, poses, render_poses, [H, W, focal], i_split"
},
{
"identifier": "load_scene_blender_fixed_dist_new",
"path": "load_blender.py",
"snippet": "def load_scene_blender_fixed_dist_new(basedir, half_res=True, train_dist=1.0, test_dist=1.0, val_dist=1.0):\n splits = ['train', 'val', 'test']\n\n all_imgs = []\n\n all_poses = []\n all_intrinsics = []\n counts = [0]\n filenames = []\n\n for s in splits:\n\n if s == \"train\":\n folder = 'radius_{}_{}'.format(str(train_dist), s)\n transforms_file = 'transforms_radius{}_{}.json'.format(str(train_dist), s)\n elif s == \"val\":\n folder = 'radius_{}_{}'.format(str(val_dist), s)\n transforms_file = 'transforms_radius{}_{}.json'.format(str(val_dist), s) \n elif s == \"test\":\n folder = 'radius_{}_{}'.format(str(test_dist), s)\n transforms_file = 'transforms_radius{}_{}.json'.format(str(test_dist), s) \n else:\n ## dummy will return not exist\n transforms_file = \"blah\"\n\n if os.path.exists(os.path.join(basedir, transforms_file)):\n\n json_fname = os.path.join(basedir, transforms_file)\n\n with open(json_fname, 'r') as fp:\n meta = json.load(fp)\n\n # if 'train' in s:\n near = 2.\n far = 6.\n camera_angle_x = float(meta['camera_angle_x'])\n\n imgs = []\n poses = []\n intrinsics = []\n\n if s=='train':\n skip = 1\n elif s == \"val\":\n skip = 1\n elif s ==\"test\":\n skip = 4\n elif \"video\" in s:\n skip = 1\n \n for frame in meta['frames'][::skip]:\n if len(frame['file_path']) != 0 :\n if half_res :\n downsample = 2\n else:\n downsample = 1\n\n img = read_files(os.path.join(basedir, frame['file_path']+\".png\"), downsample_scale=downsample)\n\n filenames.append(frame['file_path'])\n imgs.append(img)\n\n # poses.append(np.array(frame['transform_matrix'])@ BLENDER2OPENCV)\n poses.append(np.array(frame['transform_matrix']))\n\n H, W = img.shape[:2]\n focal = .5 * W / np.tan(.5 * camera_angle_x) \n\n fx, fy, cx, cy = focal, focal, W/2.0, H/2.0\n intrinsics.append(np.array((fx, fy, cx, cy)))\n\n counts.append(counts[-1] + len(poses))\n if len(imgs) > 0:\n all_imgs.append(np.array(imgs))\n all_poses.append(np.array(poses).astype(np.float32))\n all_intrinsics.append(np.array(intrinsics).astype(np.float32))\n\n else:\n counts.append(counts[-1])\n\n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n\n i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))]\n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n intrinsics = np.concatenate(all_intrinsics, 0)\n \n return imgs, poses, render_poses, [H, W, focal], i_split"
},
{
"identifier": "load_scene_blender2",
"path": "load_blender.py",
"snippet": "def load_scene_blender2(basedir, train_json = \"transforms_train.json\", half_res=True):\n splits = ['train', 'val', 'test']\n # splits = ['test']\n\n all_imgs = []\n\n all_poses = []\n all_intrinsics = []\n counts = [0]\n filenames = []\n for s in splits:\n if os.path.exists(os.path.join(basedir, '{}_transforms.json'.format(s))):\n\n json_fname = os.path.join(basedir, '{}_transforms.json'.format(s))\n\n with open(json_fname, 'r') as fp:\n meta = json.load(fp)\n\n if 'train' in s:\n near = 2.\n far = 6.\n camera_angle_x = float(meta['camera_angle_x'])\n\n imgs = []\n poses = []\n intrinsics = []\n\n if s=='train':\n skip = 1\n elif s ==\"test\":\n skip = 8\n elif \"video\" in s:\n skip = 1\n \n for frame in meta['frames'][::skip]:\n if len(frame['file_path']) != 0 :\n if half_res :\n downsample = 2\n else:\n downsample = 1\n\n img = read_files(os.path.join(basedir, frame['file_path']+\".png\"), downsample_scale=downsample)\n\n filenames.append(frame['file_path'])\n imgs.append(img)\n\n # poses.append(np.array(frame['transform_matrix'])@ BLENDER2OPENCV)\n poses.append(np.array(frame['transform_matrix']))\n\n H, W = img.shape[:2]\n focal = .5 * W / np.tan(.5 * camera_angle_x) \n\n fx, fy, cx, cy = focal, focal, W/2.0, H/2.0\n intrinsics.append(np.array((fx, fy, cx, cy)))\n\n counts.append(counts[-1] + len(poses))\n if len(imgs) > 0:\n all_imgs.append(np.array(imgs))\n all_poses.append(np.array(poses).astype(np.float32))\n all_intrinsics.append(np.array(intrinsics).astype(np.float32))\n\n else:\n counts.append(counts[-1])\n\n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n\n i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))]\n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n intrinsics = np.concatenate(all_intrinsics, 0)\n \n return imgs, poses, render_poses, [H, W, focal], i_split"
}
] | import os, sys
import numpy as np
import imageio
import json
import random
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import configargparse
import datetime
import math
import cv2
import shutil
import configargparse
from tqdm import tqdm, trange
from torch.utils.tensorboard import SummaryWriter
from skimage.metrics import structural_similarity
from lpips import LPIPS
from run_nerf_helpers import *
from load_llff import load_llff_data
from load_dtu import load_dtu, load_dtu2
from load_blender import load_blender_data, load_scene_blender_fixed_dist_new, load_scene_blender2
from natsort import natsorted
from argparse import Namespace | 8,713 | print(f"Using {args.n_gpus} GPU(s).")
# Load data
scene_data_dir = os.path.join(args.data_dir, args.scene_id)
K = None
if args.dataset == 'llff':
images, poses, bds, render_poses, i_test = load_llff_data(scene_data_dir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify)
hwf = poses[0,:3,-1]
poses = poses[:,:3,:4]
print('Loaded llff', images.shape, render_poses.shape, hwf, scene_data_dir)
if not isinstance(i_test, list):
i_test = [i_test]
if args.llffhold > 0:
print('Auto LLFF holdout,', args.llffhold)
i_test = np.arange(images.shape[0])[::args.llffhold]
i_val = i_test
i_train = np.array([i for i in np.arange(int(images.shape[0])) if
(i not in i_test and i not in i_val)])
print('DEFINING BOUNDS')
if args.no_ndc:
near = np.ndarray.min(bds) * .9
far = np.ndarray.max(bds) * 1.
else:
near = 0.
far = 1.
print('NEAR FAR', near, far)
elif args.dataset == 'blender':
images, poses, render_poses, hwf, i_split = load_blender_data(scene_data_dir, args.half_res, args.testskip)
print('Loaded blender', images.shape, render_poses.shape, hwf, scene_data_dir)
i_train, i_val, i_test = i_split
# near = 2.
near = args.set_near_plane
print("Set near plane to: " + str(near))
far = 6.
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == "blender2":
images, poses, render_poses, hwf, i_split = load_scene_blender2(scene_data_dir, half_res=args.half_res)
print('Loaded blender2', images.shape, render_poses.shape, hwf, scene_data_dir)
i_train, i_val, i_test = i_split
# near = 2.
near = args.set_near_plane
print("Set near plane to: " + str(near))
far = 6.
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == "blender_fixeddist":
images, poses, render_poses, hwf, i_split = load_scene_blender_fixed_dist_new(scene_data_dir, half_res=args.half_res, train_dist=1.0, test_dist=args.test_dist)
print('Loaded blender fixed dist', images.shape, hwf, scene_data_dir)
i_train, i_val, i_test = i_split
near = args.set_near_plane
print("Set near plane to: " + str(near))
far = 6.
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == 'LINEMOD':
images, poses, render_poses, hwf, K, i_split, near, far = load_LINEMOD_data(scene_data_dir, args.half_res, args.testskip)
print(f'Loaded LINEMOD, images shape: {images.shape}, hwf: {hwf}, K: {K}')
print(f'[CHECK HERE] near: {near}, far: {far}.')
i_train, i_val, i_test = i_split
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == 'DTU':
# use the existing split
if args.dtu_split is not None:
with open(args.dtu_split, 'r') as ff:
train_split = json.load(ff)
else:
train_split = None
images, Ks, poses, render_poses, hwf, i_split, near, far, splits = load_dtu(args.data_dir, args.dtu_scene_id, num_train=args.num_train, half_res=args.half_res, train_split=train_split)
K = Ks[0]
print(f'Loaded DTU, images shape: {images.shape}, hwf: {hwf}, K: {K}')
print(f'[CHECK HERE] near: {near}, far: {far}.')
i_train, i_test = i_split
i_val = i_test
save_json = build_json_for_dtu(splits, Ks, poses, near, far)
save_split_file = os.path.join(args.ckpt_dir, args.expname, 'split.json')
with open(save_split_file, 'w') as f:
json.dump(save_json, f, indent=4)
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == 'DTU2':
# use the existing split
if args.dtu_split is not None:
with open(args.dtu_split, 'r') as ff:
train_split = json.load(ff)
else:
train_split = None
| '''
Mikaela Uy
[email protected]
PL-NeRF: novel view synthesis experiments
A piecewise linear formulation to volume rendering
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
DEBUG = False
def build_json_for_dtu(splits, intrinsics, poses, near, far):
out_dict = {}
out_dict = {"near": near,
"far": far}
i_train, i_test = splits
train_dicts = []
test_dicts = []
for i in i_train:
train_dict = {}
train_dict["extrinsic"] = poses[i].tolist()
train_dict["intrinsic"] = intrinsics[i].tolist()
train_dict["pose_id"] = int(i)
train_dicts.append(train_dict)
for i in i_test:
test_dict = {}
test_dict["extrinsic"] = poses[i].tolist()
test_dict["intrinsic"] = intrinsics[i].tolist()
test_dict["pose_id"] = int(i)
test_dicts.append(test_dict)
out_dict["train_frames"] = train_dicts
out_dict["test_frames"] = test_dicts
return out_dict
def batchify(fn, chunk):
"""Constructs a version of 'fn' that applies to smaller batches.
"""
if chunk is None:
return fn
def ret(inputs):
return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0)
return ret
def run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1024*64):
"""Prepares inputs and applies network 'fn'.
"""
inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])
embedded = embed_fn(inputs_flat)
if viewdirs is not None:
input_dirs = viewdirs[:,None].expand(inputs.shape)
input_dirs_flat = torch.reshape(input_dirs, [-1, input_dirs.shape[-1]])
embedded_dirs = embeddirs_fn(input_dirs_flat)
embedded = torch.cat([embedded, embedded_dirs], -1)
outputs_flat = batchify(fn, netchunk)(embedded)
outputs = torch.reshape(outputs_flat, list(inputs.shape[:-1]) + [outputs_flat.shape[-1]])
return outputs
def batchify_rays(rays_flat, chunk=1024*32, **kwargs):
"""Render rays in smaller minibatches to avoid OOM.
"""
all_ret = {}
for i in range(0, rays_flat.shape[0], chunk):
ret = render_rays(rays_flat[i:i+chunk], **kwargs)
for k in ret:
if k not in all_ret:
all_ret[k] = []
all_ret[k].append(ret[k])
all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret}
return all_ret
def render(H, W, K, chunk=1024*32, rays=None, c2w=None, ndc=True,
near=0., far=1.,
use_viewdirs=False, c2w_staticcam=None,
**kwargs):
"""Render rays
Args:
H: int. Height of image in pixels.
W: int. Width of image in pixels.
focal: float. Focal length of pinhole camera.
chunk: int. Maximum number of rays to process simultaneously. Used to
control maximum memory usage. Does not affect final results.
rays: array of shape [2, batch_size, 3]. Ray origin and direction for
each example in batch.
c2w: array of shape [3, 4]. Camera-to-world transformation matrix.
ndc: bool. If True, represent ray origin, direction in NDC coordinates.
near: float or array of shape [batch_size]. Nearest distance for a ray.
far: float or array of shape [batch_size]. Farthest distance for a ray.
use_viewdirs: bool. If True, use viewing direction of a point in space in model.
c2w_staticcam: array of shape [3, 4]. If not None, use this transformation matrix for
camera while using other c2w argument for viewing directions.
Returns:
rgb_map: [batch_size, 3]. Predicted RGB values for rays.
disp_map: [batch_size]. Disparity map. Inverse of depth.
acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.
extras: dict with everything returned by render_rays().
"""
if c2w is not None:
# special case to render full image
rays_o, rays_d = get_rays(H, W, K, c2w)
else:
# use provided ray batch
rays_o, rays_d = rays
if use_viewdirs:
# provide ray directions as input
viewdirs = rays_d
if c2w_staticcam is not None:
# special case to visualize effect of viewdirs
rays_o, rays_d = get_rays(H, W, K, c2w_staticcam)
viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
viewdirs = torch.reshape(viewdirs, [-1,3]).float()
sh = rays_d.shape # [..., 3]
if ndc:
# for forward facing scenes
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
# Create ray batch
rays_o = torch.reshape(rays_o, [-1,3]).float()
rays_d = torch.reshape(rays_d, [-1,3]).float()
near, far = near * torch.ones_like(rays_d[...,:1]), far * torch.ones_like(rays_d[...,:1])
rays = torch.cat([rays_o, rays_d, near, far], -1)
if use_viewdirs:
rays = torch.cat([rays, viewdirs], -1)
# Render and reshape
all_ret = batchify_rays(rays, chunk, **kwargs)
for k in all_ret:
k_sh = list(sh[:-1]) + list(all_ret[k].shape[1:])
all_ret[k] = torch.reshape(all_ret[k], k_sh)
k_extract = ['rgb_map', 'disp_map', 'acc_map']
ret_list = [all_ret[k] for k in k_extract]
ret_dict = {k : all_ret[k] for k in all_ret if k not in k_extract}
return ret_list + [ret_dict]
def render_path(render_poses, hwf, K, chunk, render_kwargs, gt_imgs=None, savedir=None, render_factor=0):
H, W, focal = hwf
if render_factor!=0:
# Render downsampled for speed
H = H//render_factor
W = W//render_factor
focal = focal/render_factor
rgbs = []
disps = []
t = time.time()
for i, c2w in enumerate(tqdm(render_poses)):
print(i, time.time() - t)
t = time.time()
rgb, disp, acc, _ = render(H, W, K, chunk=chunk, c2w=c2w[:3,:4], **render_kwargs)
rgbs.append(rgb.cpu().numpy())
disps.append(disp.cpu().numpy())
if i==0:
print(rgb.shape, disp.shape)
"""
if gt_imgs is not None and render_factor==0:
p = -10. * np.log10(np.mean(np.square(rgb.cpu().numpy() - gt_imgs[i])))
print(p)
"""
if savedir is not None:
rgb8 = to8b(rgbs[-1])
filename = os.path.join(savedir, '{:03d}.png'.format(i))
imageio.imwrite(filename, rgb8)
rgbs = np.stack(rgbs, 0)
disps = np.stack(disps, 0)
return rgbs, disps
def test_images_samples(count, indices, images, depths, valid_depths, poses, H, W, K, lpips_alex, args, render_kwargs_test, \
embedcam_fn=None, with_test_time_optimization=False):
far = render_kwargs_test['far']
if count is None:
# take all images in order
count = len(indices)
img_i = indices
else:
# take random images
if count > len(indices):
count = len(indices)
img_i = np.random.choice(indices, size=count, replace=False)
rgbs_res = torch.empty(count, 3, H, W)
rgbs0_res = torch.empty(count, 3, H, W)
target_rgbs_res = torch.empty(count, 3, H, W)
depths_res = torch.empty(count, 1, H, W)
depths0_res = torch.empty(count, 1, H, W)
target_depths_res = torch.empty(count, 1, H, W)
target_valid_depths_res = torch.empty(count, 1, H, W, dtype=bool)
mean_metrics = MeanTracker()
mean_depth_metrics = MeanTracker() # track separately since they are not always available
for n, img_idx in enumerate(img_i):
print("Render image {}/{}".format(n + 1, count))
target = images[img_idx]
target_depth = torch.zeros((target.shape[0], target.shape[1], 1)).to(device)
target_valid_depth = torch.zeros((target.shape[0], target.shape[1]), dtype=bool).to(device)
pose = poses[img_idx, :3,:4]
intrinsic = K
with torch.no_grad():
# rgb, _, _, extras = render(H, W, intrinsic, chunk=(args.chunk // 2), c2w=pose, **render_kwargs_test)
# print(render_kwargs_test)
rgb, _, _, extras = render(H, W, intrinsic, chunk=args.chunk, c2w=pose, **render_kwargs_test)
###
target_hypothesis_repeated = extras['depth_map'].unsqueeze(-1).repeat(1, 1, extras["pred_hyp"].shape[-1])
dists = torch.norm(extras["pred_hyp"].unsqueeze(-1) - target_hypothesis_repeated.unsqueeze(-1), p=2, dim=-1)
mask = extras['depth_map'] < 4.0
dist_masked = dists[mask, ...]
depth_rmse = torch.mean(dists)
if not torch.isnan(depth_rmse):
depth_metrics = {"importance_sampling_error" : depth_rmse.item()}
mean_depth_metrics.add(depth_metrics)
mean_metrics = mean_depth_metrics
result_dir = os.path.join(args.ckpt_dir, args.expname, "test_samples_error" + "_" + str(args.N_importance))
os.makedirs(result_dir, exist_ok=True)
with open(os.path.join(result_dir, 'metrics_expecteddepth.txt'), 'w') as f:
mean_metrics.print(f)
return mean_metrics
def render_images_with_metrics(count, indices, images, depths, valid_depths, poses, H, W, K, lpips_alex, args, render_kwargs_test, \
embedcam_fn=None, with_test_time_optimization=False):
far = render_kwargs_test['far']
if count is None:
# take all images in order
count = len(indices)
img_i = indices
else:
# take random images
if count > len(indices):
count = len(indices)
img_i = np.random.choice(indices, size=count, replace=False)
rgbs_res = torch.empty(count, 3, H, W)
rgbs0_res = torch.empty(count, 3, H, W)
target_rgbs_res = torch.empty(count, 3, H, W)
depths_res = torch.empty(count, 1, H, W)
depths0_res = torch.empty(count, 1, H, W)
target_depths_res = torch.empty(count, 1, H, W)
target_valid_depths_res = torch.empty(count, 1, H, W, dtype=bool)
mean_metrics = MeanTracker()
mean_depth_metrics = MeanTracker() # track separately since they are not always available
for n, img_idx in enumerate(img_i):
print("Render image {}/{}".format(n + 1, count), end="")
target = images[img_idx]
if args.dataset == "scannet":
target_depth = depths[img_idx]
target_valid_depth = valid_depths[img_idx]
else:
target_depth = torch.zeros((target.shape[0], target.shape[1], 1)).to(device)
target_valid_depth = torch.zeros((target.shape[0], target.shape[1]), dtype=bool).to(device)
pose = poses[img_idx, :3,:4]
intrinsic = K
with torch.no_grad():
# rgb, _, _, extras = render(H, W, intrinsic, chunk=(args.chunk // 2), c2w=pose, **render_kwargs_test)
# print(render_kwargs_test)
rgb, _, _, extras = render(H, W, intrinsic, chunk=args.chunk, c2w=pose, **render_kwargs_test)
# compute depth rmse
depth_rmse = compute_rmse(extras['depth_map'][target_valid_depth], target_depth[:, :, 0][target_valid_depth])
if not torch.isnan(depth_rmse):
depth_metrics = {"depth_rmse" : depth_rmse.item()}
mean_depth_metrics.add(depth_metrics)
# compute color metrics
target = torch.tensor(target).to(rgb.device)
img_loss = img2mse(rgb, target)
psnr = mse2psnr(img_loss)
print("PSNR: {}".format(psnr))
rgb = torch.clamp(rgb, 0, 1)
ssim = structural_similarity(rgb.cpu().numpy(), target.cpu().numpy(), data_range=1., channel_axis=-1)
lpips = lpips_alex(rgb.permute(2, 0, 1).unsqueeze(0), target.permute(2, 0, 1).unsqueeze(0), normalize=True)[0]
# store result
rgbs_res[n] = rgb.clamp(0., 1.).permute(2, 0, 1).cpu()
target_rgbs_res[n] = target.permute(2, 0, 1).cpu()
depths_res[n] = (extras['depth_map'] / far).unsqueeze(0).cpu()
target_depths_res[n] = (target_depth[:, :, 0] / far).unsqueeze(0).cpu()
target_valid_depths_res[n] = target_valid_depth.unsqueeze(0).cpu()
metrics = {"img_loss" : img_loss.item(), "psnr" : psnr.item(), "ssim" : ssim, "lpips" : lpips[0, 0, 0],}
if 'rgb0' in extras:
img_loss0 = img2mse(extras['rgb0'], target)
psnr0 = mse2psnr(img_loss0)
depths0_res[n] = (extras['depth0'] / far).unsqueeze(0).cpu()
rgbs0_res[n] = torch.clamp(extras['rgb0'], 0, 1).permute(2, 0, 1).cpu()
metrics.update({"img_loss0" : img_loss0.item(), "psnr0" : psnr0.item()})
mean_metrics.add(metrics)
res = { "rgbs" : rgbs_res, "target_rgbs" : target_rgbs_res, "depths" : depths_res, "target_depths" : target_depths_res, \
"target_valid_depths" : target_valid_depths_res}
if 'rgb0' in extras:
res.update({"rgbs0" : rgbs0_res, "depths0" : depths0_res,})
all_mean_metrics = MeanTracker()
all_mean_metrics.add({**mean_metrics.as_dict(), **mean_depth_metrics.as_dict()})
return all_mean_metrics, res
def write_images_with_metrics(images, mean_metrics, far, args, with_test_time_optimization=False, test_samples=False):
if not test_samples:
result_dir = os.path.join(args.ckpt_dir, args.expname, "test_images_" + str(args.mode)+ "_" + str(args.N_samples) + "_" + str(args.N_importance) + ("with_optimization_" if with_test_time_optimization else "") + args.scene_id)
else:
result_dir = os.path.join(args.ckpt_dir, args.expname, "test_images_samples" + str(args.mode)+ "_" + str(args.N_samples) + "_" + str(args.N_importance) + ("with_optimization_" if with_test_time_optimization else "") + str(args.N_samples) + "_" + str(args.N_importance) + args.scene_id)
os.makedirs(result_dir, exist_ok=True)
for n, (rgb, depth, gt_rgb) in enumerate(zip(images["rgbs"].permute(0, 2, 3, 1).cpu().numpy(), \
images["depths"].permute(0, 2, 3, 1).cpu().numpy(), images["target_rgbs"].permute(0, 2, 3, 1).cpu().numpy())):
# write rgb
cv2.imwrite(os.path.join(result_dir, str(n) + "_rgb" + ".png"), cv2.cvtColor(to8b(rgb), cv2.COLOR_RGB2BGR))
cv2.imwrite(os.path.join(result_dir, str(n) + "_gt" + ".png"), cv2.cvtColor(to8b(gt_rgb), cv2.COLOR_RGB2BGR))
# write depth
cv2.imwrite(os.path.join(result_dir, str(n) + "_d" + ".png"), to16b(depth))
with open(os.path.join(result_dir, 'metrics.txt'), 'w') as f:
mean_metrics.print(f)
mean_metrics.print()
def write_images_with_metrics_testdist(images, mean_metrics, far, args, test_dist, with_test_time_optimization=False, test_samples=False):
if not test_samples:
result_dir = os.path.join(args.ckpt_dir, args.expname, "test_images_dist" + str(test_dist) + "_" + ("with_optimization_" if with_test_time_optimization else "") + args.scene_id)
else:
result_dir = os.path.join(args.ckpt_dir, args.expname, "test_images_samples_dist" + str(test_dist) + "_" + ("with_optimization_" if with_test_time_optimization else "") + str(args.N_samples) + "_" + str(args.N_importance) + args.scene_id)
# if not test_samples:
# result_dir = os.path.join(args.ckpt_dir, args.expname, "train_images_" + ("with_optimization_" if with_test_time_optimization else "") + args.scene_id)
# else:
# result_dir = os.path.join(args.ckpt_dir, args.expname, "train_images_samples" + ("with_optimization_" if with_test_time_optimization else "") + str(args.N_samples) + "_" + str(args.N_importance) + args.scene_id)
os.makedirs(result_dir, exist_ok=True)
for n, (rgb, depth, gt_rgb) in enumerate(zip(images["rgbs"].permute(0, 2, 3, 1).cpu().numpy(), \
images["depths"].permute(0, 2, 3, 1).cpu().numpy(), images["target_rgbs"].permute(0, 2, 3, 1).cpu().numpy())):
# write rgb
# cv2.imwrite(os.path.join(result_dir, str(n) + "_rgb" + ".jpg"), cv2.cvtColor(to8b(rgb), cv2.COLOR_RGB2BGR))
cv2.imwrite(os.path.join(result_dir, str(n) + "_rgb" + ".png"), cv2.cvtColor(to8b(rgb), cv2.COLOR_RGB2BGR))
cv2.imwrite(os.path.join(result_dir, str(n) + "_gt" + ".png"), cv2.cvtColor(to8b(gt_rgb), cv2.COLOR_RGB2BGR))
# write depth
cv2.imwrite(os.path.join(result_dir, str(n) + "_d" + ".png"), to16b(depth))
with open(os.path.join(result_dir, 'metrics.txt'), 'w') as f:
mean_metrics.print(f)
mean_metrics.print()
def create_nerf(args):
"""Instantiate NeRF's MLP model.
"""
embed_fn, input_ch = get_embedder(args.multires, args.i_embed)
input_ch_views = 0
embeddirs_fn = None
if args.use_viewdirs:
embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed)
output_ch = 5 if args.N_importance > 0 else 4
skips = [4]
model = NeRF(D=args.netdepth, W=args.netwidth,
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device)
coarse_grad_vars = list(model.parameters())
model_fine = None
if args.N_importance > 0:
model_fine = NeRF(D=args.netdepth_fine, W=args.netwidth_fine,
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device)
grad_vars = list(model_fine.parameters())
network_query_fn = lambda inputs, viewdirs, network_fn : run_network(inputs, viewdirs, network_fn,
embed_fn=embed_fn,
embeddirs_fn=embeddirs_fn,
netchunk=args.netchunk)
# Create optimizer
optimizer = torch.optim.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))
optimizer_coarse = torch.optim.Adam(params=coarse_grad_vars, lr=args.coarse_lrate, betas=(0.9, 0.999))
start = 0
##########################
# Load checkpoints
if args.ft_path is not None and args.ft_path!='None':
ckpts = [args.ft_path]
else:
ckpts = [os.path.join(args.ckpt_dir, args.expname, f) for f in sorted(os.listdir(os.path.join(args.ckpt_dir, args.expname))) if 'tar' in f]
print('Found ckpts', ckpts)
if len(ckpts) > 0 and not args.no_reload:
ckpt_path = ckpts[-1]
print('Reloading from', ckpt_path)
ckpt = torch.load(ckpt_path)
start = ckpt['global_step']
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
# Load model
model.load_state_dict(ckpt['network_fn_state_dict'])
if model_fine is not None:
model_fine.load_state_dict(ckpt['network_fine_state_dict'])
##########################
render_kwargs_train = {
'network_query_fn' : network_query_fn,
'perturb' : args.perturb,
'N_importance' : args.N_importance,
'network_fine' : model_fine,
'N_samples' : args.N_samples,
'network_fn' : model,
'use_viewdirs' : args.use_viewdirs,
'white_bkgd' : args.white_bkgd,
'raw_noise_std' : args.raw_noise_std,
'mode' : args.mode,
'color_mode': args.color_mode
}
# NDC only good for LLFF-style forward facing data
if args.dataset != 'llff' or args.no_ndc:
print('Not ndc!')
render_kwargs_train['ndc'] = False
render_kwargs_train['lindisp'] = args.lindisp
render_kwargs_test = {k : render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = True
render_kwargs_test['raw_noise_std'] = 0.
# return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer, optimizer_coarse
return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer, optimizer_coarse
def compute_weights(raw, z_vals, rays_d, noise=0.):
raw2alpha = lambda raw, dists, act_fn=F.relu: 1.-torch.exp(-act_fn(raw)*dists)
dists = z_vals[...,1:] - z_vals[...,:-1]
dists = torch.cat([dists, torch.full_like(dists[...,:1], 1e10, device=device)], -1) # [N_rays, N_samples]
dists = dists * torch.norm(rays_d[...,None,:], dim=-1)
alpha = raw2alpha(raw[...,3] + noise, dists) # [N_rays, N_samples]
weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1), device=device), 1.-alpha + 1e-10], -1), -1)[:, :-1]
return weights
### Our reformulation to piecewise linear
def compute_weights_piecewise_linear(raw, z_vals, near, far, rays_d, noise=0., return_tau=False):
raw2expr = lambda raw, dists: torch.exp(-raw*dists)
### Concat
z_vals = torch.cat([near, z_vals, far], -1)
dists = z_vals[...,1:] - z_vals[...,:-1]
### Original code
dists = dists * torch.norm(rays_d[...,None,:], dim=-1)
tau = torch.cat([torch.ones((raw.shape[0], 1), device=device)*1e-10, raw[...,3] + noise, torch.ones((raw.shape[0], 1), device=device)*1e10], -1) ### tau(near) = 0, tau(far) = very big (will hit an opaque surface)
tau = F.relu(tau) ## Make positive from proof of DS-NeRF
interval_ave_tau = 0.5 * (tau[...,1:] + tau[...,:-1])
'''
Evaluating exp(-0.5 (tau_{i+1}+tau_i) (s_{i+1}-s_i) )
'''
expr = raw2expr(interval_ave_tau, dists) # [N_rays, N_samples+1]
### Transmittance until s_n
T = torch.cumprod(torch.cat([torch.ones((expr.shape[0], 1), device=device), expr], -1), -1) # [N_rays, N_samples+2], T(near)=1, starts off at 1
### Factor to multiply transmittance with
factor = (1 - expr)
weights = factor * T[:, :-1] # [N_rays, N_samples+1]
if return_tau:
return weights, tau, T
else:
return weights
def raw2outputs(raw, z_vals, near, far, rays_d, mode, color_mode, raw_noise_std=0, pytest=False, white_bkgd=False, farcolorfix=False):
"""Transforms model's predictions to semantically meaningful values.
Args:
raw: [num_rays, num_samples along ray, 4]. Prediction from model.
z_vals: [num_rays, num_samples along ray]. Integration time.
rays_d: [num_rays, 3]. Direction of each ray.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray.
disp_map: [num_rays]. Disparity map. Inverse of depth map.
acc_map: [num_rays]. Sum of weights along each ray.
weights: [num_rays, num_samples]. Weights assigned to each sampled color.
depth_map: [num_rays]. Estimated distance to object.
"""
rgb = torch.sigmoid(raw[...,:3]) # [N_rays, N_samples, 3]
noise = 0.
if raw_noise_std > 0.:
noise = torch.randn(raw[...,3].shape) * raw_noise_std
# Overwrite randomly sampled data if pytest
if pytest:
np.random.seed(0)
noise = np.random.rand(*list(raw[...,3].shape)) * raw_noise_std
noise = torch.Tensor(noise)
if mode == "linear":
weights, tau, T = compute_weights_piecewise_linear(raw, z_vals, near, far, rays_d, noise, return_tau=True)
if color_mode == "midpoint":
if farcolorfix:
rgb_concat = torch.cat([rgb[: ,0, :].unsqueeze(1), rgb, torch.zeros((rgb[:, -1].shape), device=device).unsqueeze(1)], 1)
else:
rgb_concat = torch.cat([rgb[: ,0, :].unsqueeze(1), rgb, rgb[: ,-1, :].unsqueeze(1)], 1)
rgb_mid = .5 * (rgb_concat[:, 1:, :] + rgb_concat[:, :-1, :])
rgb_map = torch.sum(weights[...,None] * rgb_mid, -2) # [N_rays, 3]
elif color_mode == "left":
rgb_concat = torch.cat([rgb[: ,0, :].unsqueeze(1), rgb], 1)
rgb_map = torch.sum(weights[...,None] * rgb_concat, -2)
else:
print("ERROR: Color mode unimplemented, please select left or midpoint.")
### Piecewise linear means take the midpoint
z_vals = torch.cat([near, z_vals, far], -1)
z_vals_mid = .5 * (z_vals[...,1:] + z_vals[...,:-1])
depth_map = torch.sum(weights * z_vals_mid, -1)
elif mode == "constant":
weights = compute_weights(raw, z_vals, rays_d, noise)
rgb_map = torch.sum(weights[...,None] * rgb, -2) # [N_rays, 3]
depth_map = torch.sum(weights * z_vals, -1)
tau = None
T = None
disp_map = 1./torch.max(1e-10 * torch.ones_like(depth_map), depth_map / torch.sum(weights, -1))
acc_map = torch.sum(weights, -1)
if white_bkgd:
rgb_map = rgb_map + (1.-acc_map[...,None])
return rgb_map, disp_map, acc_map, weights, depth_map, tau, T
def render_rays(ray_batch,
network_fn,
network_query_fn,
N_samples,
mode,
color_mode,
retraw=False,
lindisp=False,
perturb=0.,
N_importance=0,
network_fine=None,
white_bkgd=False,
raw_noise_std=0.,
verbose=False,
pytest=False,
quad_solution_v2=False,
zero_tol = 1e-4,
epsilon = 1e-3,
farcolorfix = False,
constant_init = False):
"""Volumetric rendering.
Args:
ray_batch: array of shape [batch_size, ...]. All information necessary
for sampling along a ray, including: ray origin, ray direction, min
dist, max dist, and unit-magnitude viewing direction.
network_fn: function. Model for predicting RGB and density at each point
in space.
network_query_fn: function used for passing queries to network_fn.
N_samples: int. Number of different times to sample along each ray.
retraw: bool. If True, include model's raw, unprocessed predictions.
lindisp: bool. If True, sample linearly in inverse depth rather than in depth.
perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified
random points in time.
N_importance: int. Number of additional times to sample along each ray.
These samples are only passed to network_fine.
network_fine: "fine" network with same spec as network_fn.
white_bkgd: bool. If True, assume a white background.
raw_noise_std: ...
verbose: bool. If True, print more debugging info.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray. Comes from fine model.
disp_map: [num_rays]. Disparity map. 1 / depth.
acc_map: [num_rays]. Accumulated opacity along each ray. Comes from fine model.
raw: [num_rays, num_samples, 4]. Raw predictions from model.
rgb0: See rgb_map. Output for coarse model.
disp0: See disp_map. Output for coarse model.
acc0: See acc_map. Output for coarse model.
z_std: [num_rays]. Standard deviation of distances along ray for each
sample.
"""
N_rays = ray_batch.shape[0]
rays_o, rays_d = ray_batch[:,0:3], ray_batch[:,3:6] # [N_rays, 3] each
viewdirs = ray_batch[:,-3:] if ray_batch.shape[-1] > 8 else None
bounds = torch.reshape(ray_batch[...,6:8], [-1,1,2])
near, far = bounds[...,0], bounds[...,1] # [-1,1]
t_vals = torch.linspace(0., 1., steps=N_samples)
if not lindisp:
z_vals = near * (1.-t_vals) + far * (t_vals)
else:
z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))
z_vals = z_vals.expand([N_rays, N_samples])
if perturb > 0.:
# get intervals between samples
mids = .5 * (z_vals[...,1:] + z_vals[...,:-1])
upper = torch.cat([mids, z_vals[...,-1:]], -1)
lower = torch.cat([z_vals[...,:1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape)
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
t_rand = np.random.rand(*list(z_vals.shape))
t_rand = torch.Tensor(t_rand)
z_vals = lower + (upper - lower) * t_rand
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples, 3]
### If constant init then overwrite mode for coarse model to constant first
if constant_init:
mode = "constant"
# raw = run_network(pts)
raw = network_query_fn(pts, viewdirs, network_fn)
rgb_map, disp_map, acc_map, weights, depth_map, tau, T = raw2outputs(raw, z_vals, near, far, rays_d, mode, color_mode, raw_noise_std, pytest=pytest, white_bkgd=white_bkgd, farcolorfix=farcolorfix)
if N_importance > 0:
rgb_map_0, disp_map_0, acc_map_0, depth_map_0, z_vals_0, weights_0 = rgb_map, disp_map, acc_map, depth_map, z_vals, weights
z_vals_mid = .5 * (z_vals[...,1:] + z_vals[...,:-1])
if mode == "linear":
z_samples, _, _, _ = sample_pdf_reformulation(z_vals, weights, tau, T, near, far, N_importance, det=(perturb==0.), pytest=pytest, quad_solution_v2=quad_solution_v2, zero_threshold = zero_tol, epsilon_=epsilon)
elif mode == "constant":
z_samples = sample_pdf(z_vals_mid, weights[...,1:-1], N_importance, det=(perturb==0.), pytest=pytest)
z_samples = z_samples.detach()
######## Clamping in quad solution should have fixed this
z_samples = torch.clamp(z_samples, near, far)
########
z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples + N_importance, 3]
run_fn = network_fn if network_fine is None else network_fine
# raw = run_network(pts, fn=run_fn)
raw = network_query_fn(pts, viewdirs, run_fn)
rgb_map, disp_map, acc_map, weights, depth_map, tau, T = raw2outputs(raw, z_vals, near, far, rays_d, mode, color_mode, raw_noise_std, pytest=pytest, white_bkgd=white_bkgd, farcolorfix=farcolorfix)
# ret = {'rgb_map' : rgb_map, 'disp_map' : disp_map, 'acc_map' : acc_map, 'depth_map' : depth_map, 'pred_hyp' : pred_depth_hyp}
ret = {'rgb_map' : rgb_map, 'disp_map' : disp_map, 'acc_map' : acc_map, 'depth_map' : depth_map}
if retraw:
ret['raw'] = raw
if N_importance > 0:
ret['rgb0'] = rgb_map_0
ret['disp0'] = disp_map_0
ret['depth0'] = depth_map_0
ret['acc0'] = acc_map_0
ret['z_std'] = torch.std(z_samples, dim=-1, unbiased=False) # [N_rays]
for k in ret:
if (torch.isnan(ret[k]).any() or torch.isinf(ret[k]).any()) and DEBUG:
print(f"! [Numerical Error] {k} contains nan or inf.")
return ret
def config_parser():
parser = configargparse.ArgumentParser()
parser.add_argument('--task', default="train", type=str, help='one out of: "train", "test", "video"')
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--expname", type=str,
help='experiment name')
parser.add_argument("--ckpt_dir", type=str, default="",
help='checkpoint directory')
parser.add_argument("--scene_id", type=str, default="lego",
help='scene identifier')
parser.add_argument("--data_dir", type=str, default="../nerf_synthetic",
help='directory containing the scenes')
parser.add_argument("--dataset", type=str, default="blender",
help='dataset used -- selects which dataloader"')
# training options
parser.add_argument("--netdepth", type=int, default=8,
help='layers in network')
parser.add_argument("--netwidth", type=int, default=256,
help='channels per layer')
parser.add_argument("--netdepth_fine", type=int, default=8,
help='layers in fine network')
parser.add_argument("--netwidth_fine", type=int, default=256,
help='channels per layer in fine network')
parser.add_argument("--N_rand", type=int, default=32*32*4,
help='batch size (number of random rays per gradient step)')
parser.add_argument("--lrate", type=float, default=5e-4,
help='learning rate')
parser.add_argument("--coarse_lrate", type=float, default=5e-4,
help='learning rate')
parser.add_argument("--lrate_decay", type=int, default=250,
help='exponential learning rate decay (in 1000 steps)')
parser.add_argument("--chunk", type=int, default=1024*32,
help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument("--netchunk", type=int, default=1024*64,
help='number of pts sent through network in parallel, decrease if running out of memory')
parser.add_argument("--no_batching", action='store_true',
help='only take random rays from 1 image at a time')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--ft_path", type=str, default=None,
help='specific weights npy file to reload for coarse network')
# rendering options
parser.add_argument("--N_samples", type=int, default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_importance", type=int, default=128,
help='number of additional fine samples per ray')
parser.add_argument("--perturb", type=float, default=1.,
help='set to 0. for no jitter, 1. for jitter')
parser.add_argument("--use_viewdirs", action='store_true',
help='use full 5D input instead of 3D')
parser.add_argument("--i_embed", type=int, default=0,
help='set 0 for default positional encoding, -1 for none')
parser.add_argument("--multires", type=int, default=10,
help='log2 of max freq for positional encoding (3D location)')
parser.add_argument("--multires_views", type=int, default=4,
help='log2 of max freq for positional encoding (2D direction)')
parser.add_argument("--raw_noise_std", type=float, default=0.,
help='std dev of noise added to regularize sigma_a output, 1e0 recommended')
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true',
help='render the test set instead of render_poses path')
parser.add_argument("--render_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
# training options
parser.add_argument("--precrop_iters", type=int, default=0,
help='number of steps to train on central crops')
parser.add_argument("--precrop_frac", type=float,
default=.5, help='fraction of img taken for central crops')
# dataset options
parser.add_argument("--testskip", type=int, default=1,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
## blender flags
parser.add_argument("--white_bkgd", action='store_true',
help='set to render synthetic data on a white bkgd (always use for dvoxels)')
# parser.add_argument('--white_bkgd', default= False, type=bool)
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
## llff flags
parser.add_argument("--factor", type=int, default=8,
help='downsample factor for LLFF images')
parser.add_argument("--no_ndc", action='store_true',
help='do not use normalized device coordinates (set for non-forward facing scenes)')
parser.add_argument("--lindisp", action='store_true',
help='sampling linearly in disparity rather than depth')
parser.add_argument("--spherify", action='store_true',
help='set for spherical 360 scenes')
parser.add_argument("--llffhold", type=int, default=8,
help='will take every 1/N images as LLFF test set, paper uses 8')
# logging/saving options
parser.add_argument("--num_iterations", type=int, default=500000,
help='number of iterations for training')
parser.add_argument("--i_print", type=int, default=100,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_img", type=int, default=600000,
help='frequency of tensorboard image logging')
parser.add_argument("--i_weights", type=int, default=100000,
help='frequency of weight ckpt saving')
parser.add_argument("--i_testset", type=int, default=500000,
help='frequency of testset saving')
parser.add_argument("--i_video", type=int, default=500000,
help='frequency of render_poses video saving')
### For PWL ###
parser.add_argument("--mode", type=str, default="constant",
help='rendering opacity aggregation mode -- whether to use piecewise constant (vanilla) or piecewise linear (reformulation)."')
parser.add_argument("--color_mode", type=str, default="midpoint",
help='rendering color aggregation mode -- whether to use left bin or midpoint."')
parser.add_argument('--quad_solution_v2', default= True, type=bool)
### Epsilon and zero tol in quadratic solution
parser.add_argument("--zero_tol", type=float, default=1e-4,
help='zero tol to revert to piecewise constant assumption')
parser.add_argument("--epsilon", type=float, default=1e-3,
help='epsilon value in the increasing and decreasing cases or max(x,epsilon)')
parser.add_argument('--set_near_plane', default= 2., type=float)
parser.add_argument("--constant_init", type=int, default=1000,
help='number of iterations to use constant aggregation')
parser.add_argument('--test_dist', default= 1.0, type=float)
parser.add_argument("--eval_scene_id", type=str, default="chair_rgba_fixdist_nv100_dist0.25-1.0-4_depth_sfn",
help='scene identifier for eval')
parser.add_argument("--eval_data_dir", type=str, default="../nerf_synthetic/fixed_dist_new-rgba/",
help='directory containing the scenes for eval')
### DTU flags
parser.add_argument("--dtu_scene_id", type=int, default=21,
help='scan id for DTU dataset to render')
parser.add_argument("--num_train", type=int, default=40,
help='number of training views to use (1 - 49)')
parser.add_argument("--dtu_split", type=str, default=None,
help='number of training views to use (1 - 49)')
##################
return parser
def train():
parser = config_parser()
args = parser.parse_args()
print(args.white_bkgd)
if args.task == "train":
if args.expname is None:
args.expname = "{}_{}".format(datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S'), args.scene_id)
args_file = os.path.join(args.ckpt_dir, args.expname, 'args.json')
os.makedirs(os.path.join(args.ckpt_dir, args.expname), exist_ok=True)
with open(args_file, 'w') as af:
json.dump(vars(args), af, indent=4)
else:
if args.expname is None:
print("Error: Specify experiment name for test or video")
exit()
tmp_task = args.task
tmp_data_dir = args.data_dir
tmp_scene_id = args.scene_id
tmp_dataset = args.dataset
tmp_test_dist = args.test_dist
tmp_ckpt_dir = args.ckpt_dir
tmp_set_near_plane = args.set_near_plane
tmp_white_bkgd = args.white_bkgd
tmp_eval_scene_id = args.eval_scene_id
tmp_eval_data_dir = args.eval_data_dir
# tmp_white_bkgd = False
tmp_test_skip = args.testskip
# tmp_mode = args.mode
# tmp_N_samples = args.N_samples
# tmp_N_importance = args.N_importance
# load nerf parameters from training
args_file = os.path.join(args.ckpt_dir, args.expname, 'args.json')
with open(args_file, 'r') as af:
args_dict = json.load(af)
args = Namespace(**args_dict)
# task and paths are not overwritten
args.task = tmp_task
args.data_dir = tmp_data_dir
args.ckpt_dir = tmp_ckpt_dir
# args.mode = tmp_mode
args.train_jsonfile = 'transforms_train.json'
args.set_near_plane = tmp_set_near_plane
# args.N_samples = tmp_N_samples
# args.N_importance = tmp_N_importance
args.dataset = tmp_dataset
args.test_dist = tmp_test_dist
args.scene_id = tmp_scene_id
args.white_bkgd = tmp_white_bkgd
args.eval_scene_id = tmp_eval_scene_id
args.eval_data_dir = tmp_eval_data_dir
args.testskip = tmp_test_skip
print('\n'.join(f'{k}={v}' for k, v in vars(args).items()))
args.n_gpus = torch.cuda.device_count()
print(f"Using {args.n_gpus} GPU(s).")
# Load data
scene_data_dir = os.path.join(args.data_dir, args.scene_id)
K = None
if args.dataset == 'llff':
images, poses, bds, render_poses, i_test = load_llff_data(scene_data_dir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify)
hwf = poses[0,:3,-1]
poses = poses[:,:3,:4]
print('Loaded llff', images.shape, render_poses.shape, hwf, scene_data_dir)
if not isinstance(i_test, list):
i_test = [i_test]
if args.llffhold > 0:
print('Auto LLFF holdout,', args.llffhold)
i_test = np.arange(images.shape[0])[::args.llffhold]
i_val = i_test
i_train = np.array([i for i in np.arange(int(images.shape[0])) if
(i not in i_test and i not in i_val)])
print('DEFINING BOUNDS')
if args.no_ndc:
near = np.ndarray.min(bds) * .9
far = np.ndarray.max(bds) * 1.
else:
near = 0.
far = 1.
print('NEAR FAR', near, far)
elif args.dataset == 'blender':
images, poses, render_poses, hwf, i_split = load_blender_data(scene_data_dir, args.half_res, args.testskip)
print('Loaded blender', images.shape, render_poses.shape, hwf, scene_data_dir)
i_train, i_val, i_test = i_split
# near = 2.
near = args.set_near_plane
print("Set near plane to: " + str(near))
far = 6.
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == "blender2":
images, poses, render_poses, hwf, i_split = load_scene_blender2(scene_data_dir, half_res=args.half_res)
print('Loaded blender2', images.shape, render_poses.shape, hwf, scene_data_dir)
i_train, i_val, i_test = i_split
# near = 2.
near = args.set_near_plane
print("Set near plane to: " + str(near))
far = 6.
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == "blender_fixeddist":
images, poses, render_poses, hwf, i_split = load_scene_blender_fixed_dist_new(scene_data_dir, half_res=args.half_res, train_dist=1.0, test_dist=args.test_dist)
print('Loaded blender fixed dist', images.shape, hwf, scene_data_dir)
i_train, i_val, i_test = i_split
near = args.set_near_plane
print("Set near plane to: " + str(near))
far = 6.
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == 'LINEMOD':
images, poses, render_poses, hwf, K, i_split, near, far = load_LINEMOD_data(scene_data_dir, args.half_res, args.testskip)
print(f'Loaded LINEMOD, images shape: {images.shape}, hwf: {hwf}, K: {K}')
print(f'[CHECK HERE] near: {near}, far: {far}.')
i_train, i_val, i_test = i_split
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == 'DTU':
# use the existing split
if args.dtu_split is not None:
with open(args.dtu_split, 'r') as ff:
train_split = json.load(ff)
else:
train_split = None
images, Ks, poses, render_poses, hwf, i_split, near, far, splits = load_dtu(args.data_dir, args.dtu_scene_id, num_train=args.num_train, half_res=args.half_res, train_split=train_split)
K = Ks[0]
print(f'Loaded DTU, images shape: {images.shape}, hwf: {hwf}, K: {K}')
print(f'[CHECK HERE] near: {near}, far: {far}.')
i_train, i_test = i_split
i_val = i_test
save_json = build_json_for_dtu(splits, Ks, poses, near, far)
save_split_file = os.path.join(args.ckpt_dir, args.expname, 'split.json')
with open(save_split_file, 'w') as f:
json.dump(save_json, f, indent=4)
if args.white_bkgd:
images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:])
else:
images = images[...,:3]
elif args.dataset == 'DTU2':
# use the existing split
if args.dtu_split is not None:
with open(args.dtu_split, 'r') as ff:
train_split = json.load(ff)
else:
train_split = None | images, K, poses, render_poses, hwf, i_split, near, far, splits = load_dtu2(args.data_dir, args.dtu_scene_id, num_train=args.num_train, half_res=args.half_res, train_split=train_split) | 2 | 2023-10-30 06:38:00+00:00 | 12k |
sehyunkwon/ICTC | step1/llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "step1/llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "step1/llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "step1/llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "step1/llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "step1/llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "step1/llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "step1/llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "step1/llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "step1/llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "step1/llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "step1/llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "step1/llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "step1/llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,385 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-10-27 05:00:14+00:00 | 12k |
Trustworthy-AI-Group/TransferAttack | transferattack/model_related/dhf.py | [
{
"identifier": "dhf_inception_v3",
"path": "transferattack/model_related/dhf_networks/inception.py",
"snippet": "def dhf_inception_v3(mixup_weight_max: float, random_keep_prob: float, dhf_modules = None, weights: Optional[Inception_V3_Weights] = None, progress: bool = True, **kwargs: Any) -> Inception3:\n dhf_model = inception_v3(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = Inception_V3_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model"
},
{
"identifier": "dhf_inc_res_v2",
"path": "transferattack/model_related/dhf_networks/inc_res_v2.py",
"snippet": "def dhf_inc_res_v2(mixup_weight_max: float, random_keep_prob: float, dhf_modules = None, weights = None, progress: bool = True, **kwargs: Any):\n dhf_model = timm.create_model('inception_resnet_v2', pretrained=True)\n if dhf_modules is None:\n dhf_modules = Inc_Res_V2_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model"
},
{
"identifier": "dhf_resnet18",
"path": "transferattack/model_related/dhf_networks/resnet.py",
"snippet": "def dhf_resnet18(mixup_weight_max: float, random_keep_prob: float, dhf_modules=None, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:\n dhf_model = resnet18(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = ResNet18_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model"
},
{
"identifier": "dhf_resnet50",
"path": "transferattack/model_related/dhf_networks/resnet.py",
"snippet": "def dhf_resnet50(mixup_weight_max: float, random_keep_prob: float, dhf_modules=None, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:\n dhf_model = resnet50(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = ResNet50_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model"
},
{
"identifier": "dhf_resnet101",
"path": "transferattack/model_related/dhf_networks/resnet.py",
"snippet": "def dhf_resnet101(mixup_weight_max: float, random_keep_prob: float, dhf_modules=None, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:\n dhf_model = resnet101(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = ResNet101_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model"
},
{
"identifier": "dhf_resnet152",
"path": "transferattack/model_related/dhf_networks/resnet.py",
"snippet": "def dhf_resnet152(mixup_weight_max: float, random_keep_prob: float, dhf_modules=None, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:\n dhf_model = resnet152(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = ResNet152_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model"
},
{
"identifier": "MIFGSM",
"path": "transferattack/gradient/mifgsm.py",
"snippet": "class MIFGSM(Attack):\n \"\"\"\n MI-FGSM Attack\n 'Boosting Adversarial Attacks with Momentum (CVPR 2018)'(https://arxiv.org/abs/1710.06081)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n\n Example script:\n python main.py --attack mifgsm --output_dir adv_data/mifgsm/resnet18\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='MI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.epoch = epoch\n self.decay = decay"
},
{
"identifier": "NIFGSM",
"path": "transferattack/gradient/nifgsm.py",
"snippet": "class NIFGSM(MIFGSM):\n \"\"\"\n NI-FGSM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='NI-FGSM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n\n def transform(self, x, momentum, **kwargs):\n \"\"\"\n look ahead for NI-FGSM\n \"\"\"\n return x + self.alpha*self.decay*momentum"
},
{
"identifier": "DIM",
"path": "transferattack/input_transformation/dim.py",
"snippet": "class DIM(MIFGSM):\n \"\"\"\n DIM Attack\n 'Improving Transferability of Adversarial Examples with Input Diversity (CVPR 2019)'(https://arxiv.org/abs/1803.06978)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n resize_rate (float): the relative size of the resized image\n diversity_prob (float): the probability for transforming the input image\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1, resize_rate=1.1, diversity_prob=0.5\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., resize_rate=1.1, diversity_prob=0.5, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='DIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n if resize_rate < 1:\n raise Exception(\"Error! The resize rate should be larger than 1.\")\n self.resize_rate = resize_rate\n self.diversity_prob = diversity_prob\n \n def transform(self, x, **kwargs):\n \"\"\"\n Random transform the input images\n \"\"\"\n # do not transform the input image\n if torch.rand(1) > self.diversity_prob:\n return x\n \n img_size = x.shape[-1]\n img_resize = int(img_size * self.resize_rate)\n\n # resize the input image to random size\n rnd = torch.randint(low=min(img_size, img_resize), high=max(img_size, img_resize), size=(1,), dtype=torch.int32)\n rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)\n\n # randomly add padding\n h_rem = img_resize - rnd\n w_rem = img_resize - rnd\n pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)\n pad_bottom = h_rem - pad_top\n pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)\n pad_right = w_rem - pad_left\n\n padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)\n\n # resize the image back to img_size\n return F.interpolate(padded, size=[img_size, img_size], mode='bilinear', align_corners=False)"
},
{
"identifier": "TIM",
"path": "transferattack/input_transformation/tim.py",
"snippet": "class TIM(MIFGSM):\n \"\"\"\n TIM Attack\n 'Evading Defenses to Transferable Adversarial Examples by Translation-Invariant Attacks (CVPR 2019)'(https://arxiv.org/abs/1904.02884)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n kernel_type (str): the type of kernel (gaussian/uniform/linear).\n kernel_size (int): the size of kernel.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15\n\n Example script:\n python main.py --attack tim --output_dir adv_data/tim/resnet18\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='TIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.kernel = self.generate_kernel(kernel_type, kernel_size)\n\n def generate_kernel(self, kernel_type, kernel_size, nsig=3):\n \"\"\"\n Generate the gaussian/uniform/linear kernel\n\n Arguments:\n kernel_type (str): the method for initilizing the kernel\n kernel_size (int): the size of kernel\n \"\"\"\n if kernel_type.lower() == 'gaussian':\n x = np.linspace(-nsig, nsig, kernel_size)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n elif kernel_type.lower() == 'uniform':\n kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)\n elif kernel_type.lower() == 'linear':\n kern1d = 1 - np.abs(np.linspace((-kernel_size+1)//2, (kernel_size-1)//2, kernel_size)/(kernel_size**2))\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n else:\n raise Exception(\"Unspported kernel type {}\".format(kernel_type))\n \n stack_kernel = np.stack([kernel, kernel, kernel])\n stack_kernel = np.expand_dims(stack_kernel, 1)\n return torch.from_numpy(stack_kernel.astype(np.float32)).to(self.device)\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n Overridden for TIM attack.\n \"\"\"\n grad = torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n grad = F.conv2d(grad, self.kernel, stride=1, padding='same', groups=3)\n return grad"
},
{
"identifier": "SIM",
"path": "transferattack/input_transformation/sim.py",
"snippet": "class SIM(MIFGSM):\n \"\"\"\n SIM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='SIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n\n def transform(self, x, **kwargs):\n \"\"\"\n Scale the input for SIM\n \"\"\"\n return torch.cat([x / (2**i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale)) if self.targeted else self.loss(logits, label.repeat(self.num_scale))"
},
{
"identifier": "Admix",
"path": "transferattack/input_transformation/admix.py",
"snippet": "class Admix(MIFGSM):\n \"\"\"\n Admix Attack\n 'Admix: Enhancing the Transferability of Adversarial Attacks (ICCV 2021)'(https://arxiv.org/abs/2102.00436)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n num_admix (int): the number of admixed images in each iteration.\n admix_strength (float): the strength of admixed images.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='Admix', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n self.num_admix = num_admix\n self.admix_strength = admix_strength\n\n def transform(self, x, **kwargs):\n \"\"\"\n Admix the input for Admix Attack\n \"\"\"\n admix_images = torch.concat([(x + self.admix_strength * x[torch.randperm(x.size(0))].detach()) for _ in range(self.num_admix)], dim=0)\n return torch.concat([admix_images / (2 ** i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale*self.num_admix)) if self.targeted else self.loss(logits, label.repeat(self.num_scale*self.num_admix))"
},
{
"identifier": "utils",
"path": "transferattack/model_related/dhf_networks/utils.py",
"snippet": "class DHFUnit(nn.Module):\n def __init__(self, mixup_weight_max=0.2, random_keep_prob=0.9) -> None:\n def set_dhf_params(self, if_dhf: bool, update_mf: bool, dhf_indicator: Tensor):\n def forward(self, x: Tensor):\n def _forward(self, x):\n def uniform_random_like(x, minval, maxval):\ndef get_layer(model, name):\ndef set_layer(model, name, layer):\ndef convert_to_DHF_model_inplace_(model, mixup_weight_max: float, random_keep_prob: float, dhf_modules):\ndef turn_on_dhf_update_mf_setting(model: nn.Module):\ndef trun_off_dhf_update_mf_setting(model: nn.Module):\ndef turn_on_dhf_attack_setting(model: nn.Module, dhf_indicator: Tensor):\ndef preview_model(model: nn.Module):"
}
] | from torch import Tensor
from ..utils import *
from .dhf_networks.inception import dhf_inception_v3
from .dhf_networks.inc_res_v2 import dhf_inc_res_v2
from .dhf_networks.resnet import dhf_resnet18, dhf_resnet50, dhf_resnet101, dhf_resnet152
from ..gradient.mifgsm import MIFGSM
from ..gradient.nifgsm import NIFGSM
from ..input_transformation.dim import DIM
from ..input_transformation.tim import TIM
from ..input_transformation.sim import SIM
from ..input_transformation.admix import Admix
from .dhf_networks import utils | 7,751 | def __init__(self, model_name='inc_v3', dhf_modules=None, mixup_weight_max=0.2, random_keep_prob=0.9, *args, **kwargs):
self.dhf_moduels = dhf_modules
self.mixup_weight_max = mixup_weight_max
self.random_keep_prob = random_keep_prob
self.benign_images = None
super().__init__(model_name, *args, **kwargs)
def load_model(self, model_name):
if model_name in support_models.keys():
model = wrap_model(support_models[model_name](mixup_weight_max=self.mixup_weight_max,
random_keep_prob=self.random_keep_prob, weights='DEFAULT').eval().cuda())
else:
raise ValueError('Model {} not supported for DHF'.format(model_name))
return model
def update_mixup_feature(self, data: Tensor):
utils.turn_on_dhf_update_mf_setting(model=self.model)
_ = self.model(data)
utils.trun_off_dhf_update_mf_setting(model=self.model)
def forward(self, data: Tensor, label: Tensor, **kwargs):
self.benign_images = data.clone().detach().to(self.device).requires_grad_(False)
self.update_mixup_feature(self.benign_images)
# return super().forward(data, label, **kwargs)
data = data.clone().detach().to(self.device)
label = label.clone().detach().to(self.device)
delta = self.init_delta(data)
# Initialize correct indicator
num_scale = 1 if not hasattr(self, "num_scale") else self.num_scale
num_scale = num_scale if not hasattr(self, "num_admix") else num_scale * self.num_admix
correct_indicator = torch.ones(size=(len(data)*num_scale,), device=self.device)
momentum = 0
for _ in range(self.epoch):
self.preprocess(correct_indicator=correct_indicator)
# Obtain the output
logits = self.get_logits(self.transform(data+delta))
# Update correct indicator
correct_indicator = (torch.max(logits.detach(), dim=1)[1] == label.repeat(num_scale)).to(torch.float32)
# Calculate the loss
loss = self.get_loss(logits, label)
# Calculate the gradients
grad = self.get_grad(loss, delta)
# Calculate the momentum
momentum = self.get_momentum(grad, momentum)
# Update adversarial perturbation
delta = self.update_delta(delta, data, momentum, self.alpha)
return delta.detach()
def preprocess(self, *args, **kwargs):
utils.turn_on_dhf_attack_setting(self.model, dhf_indicator=1-kwargs["correct_indicator"])
class DHF_SIM(SIM):
"""
DHF Attack
Arguments:
model (str): the surrogate model name for attack.
mixup_weight_max (float): the maximium of mixup weight.
random_keep_prob (float): the keep probability when adjusting the feature elements.
"""
def __init__(self, model_name='inc_v3', dhf_modules=None, mixup_weight_max=0.2, random_keep_prob=0.9, *args, **kwargs):
self.dhf_moduels = dhf_modules
self.mixup_weight_max = mixup_weight_max
self.random_keep_prob = random_keep_prob
self.benign_images = None
super().__init__(model_name, *args, **kwargs)
def load_model(self, model_name):
if model_name in support_models.keys():
model = wrap_model(support_models[model_name](mixup_weight_max=self.mixup_weight_max,
random_keep_prob=self.random_keep_prob, weights='DEFAULT').eval().cuda())
else:
raise ValueError('Model {} not supported for DHF'.format(model_name))
return model
def update_mixup_feature(self, data: Tensor):
utils.turn_on_dhf_update_mf_setting(model=self.model)
_ = self.model(data)
utils.trun_off_dhf_update_mf_setting(model=self.model)
def forward(self, data: Tensor, label: Tensor, **kwargs):
self.benign_images = self.transform(data.clone().detach().to(self.device).requires_grad_(False))
self.update_mixup_feature(self.benign_images)
# return super().forward(data, label, **kwargs)
data = data.clone().detach().to(self.device)
label = label.clone().detach().to(self.device)
delta = self.init_delta(data)
# Initialize correct indicator
num_scale = 1 if not hasattr(self, "num_scale") else self.num_scale
num_scale = num_scale if not hasattr(self, "num_admix") else num_scale * self.num_admix
correct_indicator = torch.ones(size=(len(data)*num_scale,), device=self.device)
momentum = 0
for _ in range(self.epoch):
self.preprocess(correct_indicator=correct_indicator)
# Obtain the output
logits = self.get_logits(self.transform(data+delta))
# Update correct indicator
correct_indicator = (torch.max(logits.detach(), dim=1)[1] == label.repeat(num_scale)).to(torch.float32)
# Calculate the loss
loss = self.get_loss(logits, label)
# Calculate the gradients
grad = self.get_grad(loss, delta)
# Calculate the momentum
momentum = self.get_momentum(grad, momentum)
# Update adversarial perturbation
delta = self.update_delta(delta, data, momentum, self.alpha)
return delta.detach()
def preprocess(self, *args, **kwargs):
utils.turn_on_dhf_attack_setting(self.model, dhf_indicator=1-kwargs["correct_indicator"])
| # example bash: python main.py --attack=mifgsm_dhf
support_models = {
"inc_v3": dhf_inception_v3,
"inc_res": dhf_inc_res_v2,
'resnet18': dhf_resnet18,
"resnet50": dhf_resnet50,
"resnet101": dhf_resnet101,
"resnet152": dhf_resnet152,
}
"""
Diversifying the High-level Features for better Adversarial Transferability BMVC 2023 (https://arxiv.org/abs/2304.10136)
"""
class DHF_IFGSM(MIFGSM):
"""
DHF Attack
Arguments:
model (str): the surrogate model name for attack.
mixup_weight_max (float): the maximium of mixup weight.
random_keep_prob (float): the keep probability when adjusting the feature elements.
"""
def __init__(self, model_name='inc_v3', dhf_modules=None, mixup_weight_max=0.2, random_keep_prob=0.9, *args, **kwargs):
self.dhf_moduels = dhf_modules
self.mixup_weight_max = mixup_weight_max
self.random_keep_prob = random_keep_prob
self.benign_images = None
super().__init__(model_name, *args, **kwargs)
self.decay = 0.
def load_model(self, model_name):
if model_name in support_models.keys():
model = wrap_model(support_models[model_name](mixup_weight_max=self.mixup_weight_max,
random_keep_prob=self.random_keep_prob, weights='DEFAULT').eval().cuda())
else:
raise ValueError('Model {} not supported for DHF'.format(model_name))
return model
def update_mixup_feature(self, data: Tensor):
utils.turn_on_dhf_update_mf_setting(model=self.model)
_ = self.model(data)
utils.trun_off_dhf_update_mf_setting(model=self.model)
def forward(self, data: Tensor, label: Tensor, **kwargs):
self.benign_images = data.clone().detach().to(self.device).requires_grad_(False)
self.update_mixup_feature(self.benign_images)
# return super().forward(data, label, **kwargs)
data = data.clone().detach().to(self.device)
label = label.clone().detach().to(self.device)
delta = self.init_delta(data)
# Initialize correct indicator
num_scale = 1 if not hasattr(self, "num_scale") else self.num_scale
num_scale = num_scale if not hasattr(self, "num_admix") else num_scale * self.num_admix
correct_indicator = torch.ones(size=(len(data)*num_scale,), device=self.device)
momentum = 0
for _ in range(self.epoch):
self.preprocess(correct_indicator=correct_indicator)
# Obtain the output
logits = self.get_logits(self.transform(data+delta))
# Update correct indicator
correct_indicator = (torch.max(logits.detach(), dim=1)[1] == label.repeat(num_scale)).to(torch.float32)
# Calculate the loss
loss = self.get_loss(logits, label)
# Calculate the gradients
grad = self.get_grad(loss, delta)
# Calculate the momentum
momentum = self.get_momentum(grad, momentum)
# Update adversarial perturbation
delta = self.update_delta(delta, data, momentum, self.alpha)
return delta.detach()
def preprocess(self, *args, **kwargs):
utils.turn_on_dhf_attack_setting(self.model, dhf_indicator=1-kwargs["correct_indicator"])
class DHF_MIFGSM(MIFGSM):
"""
DHF Attack
Arguments:
model (str): the surrogate model name for attack.
mixup_weight_max (float): the maximium of mixup weight.
random_keep_prob (float): the keep probability when adjusting the feature elements.
"""
def __init__(self, model_name='inc_v3', dhf_modules=None, mixup_weight_max=0.2, random_keep_prob=0.9, *args, **kwargs):
self.dhf_moduels = dhf_modules
self.mixup_weight_max = mixup_weight_max
self.random_keep_prob = random_keep_prob
self.benign_images = None
super().__init__(model_name, *args, **kwargs)
def load_model(self, model_name):
if model_name in support_models.keys():
model = wrap_model(support_models[model_name](mixup_weight_max=self.mixup_weight_max,
random_keep_prob=self.random_keep_prob, weights='DEFAULT').eval().cuda())
else:
raise ValueError('Model {} not supported for DHF'.format(model_name))
return model
def update_mixup_feature(self, data: Tensor):
utils.turn_on_dhf_update_mf_setting(model=self.model)
_ = self.model(data)
utils.trun_off_dhf_update_mf_setting(model=self.model)
def preprocess(self, *args, **kwargs):
utils.turn_on_dhf_attack_setting(self.model, dhf_indicator=1-kwargs["correct_indicator"])
def forward(self, data: Tensor, label: Tensor, **kwargs):
self.benign_images = data.clone().detach().to(self.device).requires_grad_(False)
self.update_mixup_feature(self.benign_images)
# return super().forward(data, label, **kwargs)
data = data.clone().detach().to(self.device)
label = label.clone().detach().to(self.device)
delta = self.init_delta(data)
# Initialize correct indicator
num_scale = 1 if not hasattr(self, "num_scale") else self.num_scale
num_scale = num_scale if not hasattr(self, "num_admix") else num_scale * self.num_admix
correct_indicator = torch.ones(size=(len(data)*num_scale,), device=self.device)
momentum = 0
for _ in range(self.epoch):
self.preprocess(correct_indicator=correct_indicator)
# Obtain the output
logits = self.get_logits(self.transform(data+delta))
# Update correct indicator
correct_indicator = (torch.max(logits.detach(), dim=1)[1] == label.repeat(num_scale)).to(torch.float32)
# Calculate the loss
loss = self.get_loss(logits, label)
# Calculate the gradients
grad = self.get_grad(loss, delta)
# Calculate the momentum
momentum = self.get_momentum(grad, momentum)
# Update adversarial perturbation
delta = self.update_delta(delta, data, momentum, self.alpha)
return delta.detach()
class DHF_NIFGSM(NIFGSM):
"""
DHF Attack
Arguments:
model (str): the surrogate model name for attack.
mixup_weight_max (float): the maximium of mixup weight.
random_keep_prob (float): the keep probability when adjusting the feature elements.
"""
def __init__(self, model_name='inc_v3', dhf_modules=None, mixup_weight_max=0.2, random_keep_prob=0.9, *args, **kwargs):
self.dhf_moduels = dhf_modules
self.mixup_weight_max = mixup_weight_max
self.random_keep_prob = random_keep_prob
self.benign_images = None
super().__init__(model_name, *args, **kwargs)
def load_model(self, model_name):
if model_name in support_models.keys():
model = wrap_model(support_models[model_name](mixup_weight_max=self.mixup_weight_max,
random_keep_prob=self.random_keep_prob, weights='DEFAULT').eval().cuda())
else:
raise ValueError('Model {} not supported for DHF'.format(model_name))
return model
def update_mixup_feature(self, data: Tensor):
utils.turn_on_dhf_update_mf_setting(model=self.model)
_ = self.model(data)
utils.trun_off_dhf_update_mf_setting(model=self.model)
def forward(self, data: Tensor, label: Tensor, **kwargs):
self.benign_images = data.clone().detach().to(self.device).requires_grad_(False)
self.update_mixup_feature(self.benign_images)
# return super().forward(data, label, **kwargs)
data = data.clone().detach().to(self.device)
label = label.clone().detach().to(self.device)
delta = self.init_delta(data)
# Initialize correct indicator
num_scale = 1 if not hasattr(self, "num_scale") else self.num_scale
num_scale = num_scale if not hasattr(self, "num_admix") else num_scale * self.num_admix
correct_indicator = torch.ones(size=(len(data)*num_scale,), device=self.device)
momentum = 0
for _ in range(self.epoch):
self.preprocess(correct_indicator=correct_indicator)
# Obtain the output
logits = self.get_logits(self.transform(data+delta))
# Update correct indicator
correct_indicator = (torch.max(logits.detach(), dim=1)[1] == label.repeat(num_scale)).to(torch.float32)
# Calculate the loss
loss = self.get_loss(logits, label)
# Calculate the gradients
grad = self.get_grad(loss, delta)
# Calculate the momentum
momentum = self.get_momentum(grad, momentum)
# Update adversarial perturbation
delta = self.update_delta(delta, data, momentum, self.alpha)
return delta.detach()
def preprocess(self, *args, **kwargs):
utils.turn_on_dhf_attack_setting(self.model, dhf_indicator=1-kwargs["correct_indicator"])
class DHF_DIM(DIM):
"""
DHF Attack
Arguments:
model (str): the surrogate model name for attack.
mixup_weight_max (float): the maximium of mixup weight.
random_keep_prob (float): the keep probability when adjusting the feature elements.
"""
def __init__(self, model_name='inc_v3', dhf_modules=None, mixup_weight_max=0.2, random_keep_prob=0.9, *args, **kwargs):
self.dhf_moduels = dhf_modules
self.mixup_weight_max = mixup_weight_max
self.random_keep_prob = random_keep_prob
self.benign_images = None
super().__init__(model_name, *args, **kwargs)
def load_model(self, model_name):
if model_name in support_models.keys():
model = wrap_model(support_models[model_name](mixup_weight_max=self.mixup_weight_max,
random_keep_prob=self.random_keep_prob, weights='DEFAULT').eval().cuda())
else:
raise ValueError('Model {} not supported for DHF'.format(model_name))
return model
def update_mixup_feature(self, data: Tensor):
utils.turn_on_dhf_update_mf_setting(model=self.model)
_ = self.model(data)
utils.trun_off_dhf_update_mf_setting(model=self.model)
def forward(self, data: Tensor, label: Tensor, **kwargs):
self.benign_images = data.clone().detach().to(self.device).requires_grad_(False)
data = data.clone().detach().to(self.device)
label = label.clone().detach().to(self.device)
delta = self.init_delta(data)
# Initialize correct indicator
num_scale = 1 if not hasattr(self, "num_scale") else self.num_scale
num_scale = num_scale if not hasattr(self, "num_admix") else num_scale * self.num_admix
correct_indicator = torch.ones(size=(len(data)*num_scale,), device=self.device)
momentum = 0
for _ in range(self.epoch):
self.preprocess(correct_indicator=correct_indicator)
# Obtain the output
logits = self.get_logits(self.transform(data+delta))
# Update correct indicator
correct_indicator = (torch.max(logits.detach(), dim=1)[1] == label.repeat(num_scale)).to(torch.float32)
# Calculate the loss
loss = self.get_loss(logits, label)
# Calculate the gradients
grad = self.get_grad(loss, delta)
# Calculate the momentum
momentum = self.get_momentum(grad, momentum)
# Update adversarial perturbation
delta = self.update_delta(delta, data, momentum, self.alpha)
return delta.detach()
def preprocess(self, *args, **kwargs):
self.reuse_rnds = False
mixup_input = self.transform(self.benign_images)
self.update_mixup_feature(mixup_input)
self.reuse_rnds = True
utils.turn_on_dhf_attack_setting(self.model, dhf_indicator=1-kwargs["correct_indicator"])
class DHF_TIM(TIM):
"""
DHF Attack
Arguments:
model (str): the surrogate model name for attack.
mixup_weight_max (float): the maximium of mixup weight.
random_keep_prob (float): the keep probability when adjusting the feature elements.
"""
def __init__(self, model_name='inc_v3', dhf_modules=None, mixup_weight_max=0.2, random_keep_prob=0.9, *args, **kwargs):
self.dhf_moduels = dhf_modules
self.mixup_weight_max = mixup_weight_max
self.random_keep_prob = random_keep_prob
self.benign_images = None
super().__init__(model_name, *args, **kwargs)
def load_model(self, model_name):
if model_name in support_models.keys():
model = wrap_model(support_models[model_name](mixup_weight_max=self.mixup_weight_max,
random_keep_prob=self.random_keep_prob, weights='DEFAULT').eval().cuda())
else:
raise ValueError('Model {} not supported for DHF'.format(model_name))
return model
def update_mixup_feature(self, data: Tensor):
utils.turn_on_dhf_update_mf_setting(model=self.model)
_ = self.model(data)
utils.trun_off_dhf_update_mf_setting(model=self.model)
def forward(self, data: Tensor, label: Tensor, **kwargs):
self.benign_images = data.clone().detach().to(self.device).requires_grad_(False)
self.update_mixup_feature(self.benign_images)
# return super().forward(data, label, **kwargs)
data = data.clone().detach().to(self.device)
label = label.clone().detach().to(self.device)
delta = self.init_delta(data)
# Initialize correct indicator
num_scale = 1 if not hasattr(self, "num_scale") else self.num_scale
num_scale = num_scale if not hasattr(self, "num_admix") else num_scale * self.num_admix
correct_indicator = torch.ones(size=(len(data)*num_scale,), device=self.device)
momentum = 0
for _ in range(self.epoch):
self.preprocess(correct_indicator=correct_indicator)
# Obtain the output
logits = self.get_logits(self.transform(data+delta))
# Update correct indicator
correct_indicator = (torch.max(logits.detach(), dim=1)[1] == label.repeat(num_scale)).to(torch.float32)
# Calculate the loss
loss = self.get_loss(logits, label)
# Calculate the gradients
grad = self.get_grad(loss, delta)
# Calculate the momentum
momentum = self.get_momentum(grad, momentum)
# Update adversarial perturbation
delta = self.update_delta(delta, data, momentum, self.alpha)
return delta.detach()
def preprocess(self, *args, **kwargs):
utils.turn_on_dhf_attack_setting(self.model, dhf_indicator=1-kwargs["correct_indicator"])
class DHF_SIM(SIM):
"""
DHF Attack
Arguments:
model (str): the surrogate model name for attack.
mixup_weight_max (float): the maximium of mixup weight.
random_keep_prob (float): the keep probability when adjusting the feature elements.
"""
def __init__(self, model_name='inc_v3', dhf_modules=None, mixup_weight_max=0.2, random_keep_prob=0.9, *args, **kwargs):
self.dhf_moduels = dhf_modules
self.mixup_weight_max = mixup_weight_max
self.random_keep_prob = random_keep_prob
self.benign_images = None
super().__init__(model_name, *args, **kwargs)
def load_model(self, model_name):
if model_name in support_models.keys():
model = wrap_model(support_models[model_name](mixup_weight_max=self.mixup_weight_max,
random_keep_prob=self.random_keep_prob, weights='DEFAULT').eval().cuda())
else:
raise ValueError('Model {} not supported for DHF'.format(model_name))
return model
def update_mixup_feature(self, data: Tensor):
utils.turn_on_dhf_update_mf_setting(model=self.model)
_ = self.model(data)
utils.trun_off_dhf_update_mf_setting(model=self.model)
def forward(self, data: Tensor, label: Tensor, **kwargs):
self.benign_images = self.transform(data.clone().detach().to(self.device).requires_grad_(False))
self.update_mixup_feature(self.benign_images)
# return super().forward(data, label, **kwargs)
data = data.clone().detach().to(self.device)
label = label.clone().detach().to(self.device)
delta = self.init_delta(data)
# Initialize correct indicator
num_scale = 1 if not hasattr(self, "num_scale") else self.num_scale
num_scale = num_scale if not hasattr(self, "num_admix") else num_scale * self.num_admix
correct_indicator = torch.ones(size=(len(data)*num_scale,), device=self.device)
momentum = 0
for _ in range(self.epoch):
self.preprocess(correct_indicator=correct_indicator)
# Obtain the output
logits = self.get_logits(self.transform(data+delta))
# Update correct indicator
correct_indicator = (torch.max(logits.detach(), dim=1)[1] == label.repeat(num_scale)).to(torch.float32)
# Calculate the loss
loss = self.get_loss(logits, label)
# Calculate the gradients
grad = self.get_grad(loss, delta)
# Calculate the momentum
momentum = self.get_momentum(grad, momentum)
# Update adversarial perturbation
delta = self.update_delta(delta, data, momentum, self.alpha)
return delta.detach()
def preprocess(self, *args, **kwargs):
utils.turn_on_dhf_attack_setting(self.model, dhf_indicator=1-kwargs["correct_indicator"])
| class DHF_Admix(Admix): | 11 | 2023-10-31 03:43:26+00:00 | 12k |
hydrogram/hydrogram | hydrogram/dispatcher.py | [
{
"identifier": "utils",
"path": "hydrogram/utils.py",
"snippet": "async def ainput(prompt: str = \"\", *, hide: bool = False):\ndef get_input_media_from_file_id(\n file_id: str, expected_file_type: FileType = None, ttl_seconds: Optional[int] = None\n) -> Union[\"raw.types.InputMediaPhoto\", \"raw.types.InputMediaDocument\"]:\nasync def parse_messages(\n client, messages: \"raw.types.messages.Messages\", replies: int = 1\n) -> list[\"types.Message\"]:\ndef parse_deleted_messages(client, update) -> list[\"types.Message\"]:\ndef pack_inline_message_id(msg_id: \"raw.base.InputBotInlineMessageID\"):\ndef unpack_inline_message_id(inline_message_id: str) -> \"raw.base.InputBotInlineMessageID\":\ndef get_raw_peer_id(peer: raw.base.Peer) -> Optional[int]:\ndef get_peer_id(peer: raw.base.Peer) -> int:\ndef get_peer_type(peer_id: int) -> str:\ndef get_channel_id(peer_id: int) -> int:\ndef btoi(b: bytes) -> int:\ndef itob(i: int) -> bytes:\ndef sha256(data: bytes) -> bytes:\ndef xor(a: bytes, b: bytes) -> bytes:\ndef compute_password_hash(\n algo: raw.types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,\n password: str,\n) -> bytes:\ndef compute_password_check(\n r: raw.types.account.Password, password: str\n) -> raw.types.InputCheckPasswordSRP:\nasync def parse_text_entities(\n client: \"hydrogram.Client\",\n text: str,\n parse_mode: enums.ParseMode,\n entities: list[\"types.MessageEntity\"],\n) -> dict[str, Union[str, list[raw.base.MessageEntity]]]:\ndef zero_datetime() -> datetime:\ndef timestamp_to_datetime(ts: Optional[int]) -> Optional[datetime]:\ndef datetime_to_timestamp(dt: Optional[datetime]) -> Optional[int]:\ndef get_reply_head_fm(\n message_thread_id: int, reply_to_message_id: int\n) -> raw.types.InputReplyToMessage:\nMIN_CHANNEL_ID = -1002147483647\nMAX_CHANNEL_ID = -1000000000000\nMIN_CHAT_ID = -2147483647\nMAX_USER_ID_OLD = 2147483647\nMAX_USER_ID = 999999999999\n B = btoi(B_bytes)\n A = pow(g, a, p)\n S = pow(g_b, a_ux, p)"
},
{
"identifier": "CallbackQueryHandler",
"path": "hydrogram/handlers/callback_query_handler.py",
"snippet": "class CallbackQueryHandler(Handler):\n \"\"\"The CallbackQuery handler class. Used to handle callback queries coming from inline buttons.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_callback_query` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new CallbackQuery arrives. It takes *(client, callback_query)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of callback queries to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n callback_query (:obj:`~hydrogram.types.CallbackQuery`):\n The received callback query.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n self.original_callback = callback\n super().__init__(self.resolve_future_or_callback, filters)\n\n def compose_data_identifier(self, query: CallbackQuery) -> Identifier:\n \"\"\"\n Composes an Identifier object from a CallbackQuery object.\n\n Parameters:\n query (:obj:`~hydrogram.types.CallbackQuery`):\n The CallbackQuery object to compose of.\n\n Returns:\n :obj:`~hydrogram.types.Identifier`: An Identifier object.\n \"\"\"\n from_user = query.from_user\n from_user_id = from_user.id if from_user else None\n from_user_username = from_user.username if from_user else None\n\n chat_id = None\n message_id = None\n\n if query.message:\n message_id = getattr(query.message, \"id\", getattr(query.message, \"message_id\", None))\n\n if query.message.chat:\n chat_id = [query.message.chat.id, query.message.chat.username]\n\n return Identifier(\n message_id=message_id,\n chat_id=chat_id,\n from_user_id=[from_user_id, from_user_username],\n inline_message_id=query.inline_message_id,\n )\n\n async def check_if_has_matching_listener(\n self, client: \"hydrogram.Client\", query: CallbackQuery\n ) -> tuple[bool, Optional[Listener]]:\n \"\"\"\n Checks if the CallbackQuery object has a matching listener.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to check with.\n\n query (:obj:`~hydrogram.types.CallbackQuery`):\n The CallbackQuery object to check with.\n\n Returns:\n A tuple of whether the CallbackQuery object has a matching listener and its filters does match with the\n CallbackQuery and the matching listener;\n \"\"\"\n data = self.compose_data_identifier(query)\n\n listener = client.get_listener_matching_with_data(data, ListenerTypes.CALLBACK_QUERY)\n\n listener_does_match = False\n\n if listener:\n filters = listener.filters\n if callable(filters):\n if iscoroutinefunction(filters.__call__):\n listener_does_match = await filters(client, query)\n else:\n listener_does_match = await client.loop.run_in_executor(\n None, filters, client, query\n )\n else:\n listener_does_match = True\n\n return listener_does_match, listener\n\n async def check(self, client: \"hydrogram.Client\", query: CallbackQuery) -> bool:\n \"\"\"\n Checks if the CallbackQuery object has a matching listener or handler.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to check with.\n\n query (:obj:`~hydrogram.types.CallbackQuery`):\n The CallbackQuery object to check with.\n\n Returns:\n ``bool``: A boolean indicating whether the CallbackQuery object has a matching listener or the handler filter matches.\n \"\"\"\n listener_does_match, listener = await self.check_if_has_matching_listener(client, query)\n\n if callable(self.filters):\n if iscoroutinefunction(self.filters.__call__):\n handler_does_match = await self.filters(client, query)\n else:\n handler_does_match = await client.loop.run_in_executor(\n None, self.filters, client, query\n )\n else:\n handler_does_match = True\n\n data = self.compose_data_identifier(query)\n\n if PyromodConfig.unallowed_click_alert:\n # matches with the current query but from any user\n permissive_identifier = Identifier(\n chat_id=data.chat_id,\n message_id=data.message_id,\n inline_message_id=data.inline_message_id,\n from_user_id=None,\n )\n\n matches = permissive_identifier.matches(data)\n\n if (\n listener\n and (matches and not listener_does_match)\n and listener.unallowed_click_alert\n ):\n alert = (\n listener.unallowed_click_alert\n if isinstance(listener.unallowed_click_alert, str)\n else PyromodConfig.unallowed_click_alert_text\n )\n await query.answer(alert)\n return False\n\n # let handler get the chance to handle if listener\n # exists but its filters doesn't match\n return listener_does_match or handler_does_match\n\n async def resolve_future_or_callback(\n self, client: \"hydrogram.Client\", query: CallbackQuery, *args\n ) -> None:\n \"\"\"\n Resolves the future or calls the callback of the listener. Will call the original handler if no listener.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to resolve or call with.\n\n query (:obj:`~hydrogram.types.CallbackQuery`):\n The CallbackQuery object to resolve or call with.\n\n args:\n The arguments to call the callback with.\n\n Returns:\n ``None``\n \"\"\"\n listener_does_match, listener = await self.check_if_has_matching_listener(client, query)\n\n if listener and listener_does_match:\n client.remove_listener(listener)\n\n if listener.future and not listener.future.done():\n listener.future.set_result(query)\n\n raise hydrogram.StopPropagation\n if listener.callback:\n if iscoroutinefunction(listener.callback):\n await listener.callback(client, query, *args)\n else:\n listener.callback(client, query, *args)\n\n raise hydrogram.StopPropagation\n\n raise ValueError(\"Listener must have either a future or a callback\")\n\n await self.original_callback(client, query, *args)"
},
{
"identifier": "ChatJoinRequestHandler",
"path": "hydrogram/handlers/chat_join_request_handler.py",
"snippet": "class ChatJoinRequestHandler(Handler):\n \"\"\"The ChatJoinRequest handler class. Used to handle join chat requests.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`.\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_chat_join_request` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new ChatJoinRequest event arrives. It takes\n *(client, chat_join_request)* as positional arguments (look at the section below for a detailed\n description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of updates to be passed in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the handler.\n\n chat_join_request (:obj:`~hydrogram.types.ChatJoinRequest`):\n The received chat join request.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)"
},
{
"identifier": "ChatMemberUpdatedHandler",
"path": "hydrogram/handlers/chat_member_updated_handler.py",
"snippet": "class ChatMemberUpdatedHandler(Handler):\n \"\"\"The ChatMemberUpdated handler class. Used to handle changes in the status of a chat member.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`.\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_chat_member_updated` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new ChatMemberUpdated event arrives. It takes\n *(client, chat_member_updated)* as positional arguments (look at the section below for a detailed\n description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of updates to be passed in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the handler.\n\n chat_member_updated (:obj:`~hydrogram.types.ChatMemberUpdated`):\n The received chat member update.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)"
},
{
"identifier": "ChosenInlineResultHandler",
"path": "hydrogram/handlers/chosen_inline_result_handler.py",
"snippet": "class ChosenInlineResultHandler(Handler):\n \"\"\"The ChosenInlineResultHandler handler class. Used to handle chosen inline results coming from inline queries.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_chosen_inline_result` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new chosen inline result arrives.\n It takes *(client, chosen_inline_result)* as positional arguments (look at the section below for a\n detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of chosen inline results to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n chosen_inline_result (:obj:`~hydrogram.types.ChosenInlineResult`):\n The received chosen inline result.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)"
},
{
"identifier": "DeletedMessagesHandler",
"path": "hydrogram/handlers/deleted_messages_handler.py",
"snippet": "class DeletedMessagesHandler(Handler):\n \"\"\"The deleted messages handler class. Used to handle deleted messages coming from any chat\n (private, group, channel). It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_deleted_messages` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when one or more messages have been deleted.\n It takes *(client, messages)* as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of messages to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n messages (List of :obj:`~hydrogram.types.Message`):\n The deleted messages, as list.\n \"\"\"\n\n def __init__(self, callback: Callable, filters: Filter = None):\n super().__init__(callback, filters)\n\n async def check(self, client: \"hydrogram.Client\", messages: list[Message]):\n # Every message should be checked, if at least one matches the filter True is returned\n # otherwise, or if the list is empty, False is returned\n for message in messages:\n if await super().check(client, message):\n return True\n return False"
},
{
"identifier": "EditedMessageHandler",
"path": "hydrogram/handlers/edited_message_handler.py",
"snippet": "class EditedMessageHandler(Handler):\n \"\"\"The EditedMessage handler class. Used to handle edited messages.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_edited_message` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new edited message arrives. It takes *(client, message)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of messages to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n edited_message (:obj:`~hydrogram.types.Message`):\n The received edited message.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)"
},
{
"identifier": "InlineQueryHandler",
"path": "hydrogram/handlers/inline_query_handler.py",
"snippet": "class InlineQueryHandler(Handler):\n \"\"\"The InlineQuery handler class. Used to handle inline queries.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_inline_query` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new InlineQuery arrives. It takes *(client, inline_query)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of inline queries to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the inline query handler.\n\n inline_query (:obj:`~hydrogram.types.InlineQuery`):\n The received inline query.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)"
},
{
"identifier": "MessageHandler",
"path": "hydrogram/handlers/message_handler.py",
"snippet": "class MessageHandler(Handler):\n \"\"\"The Message handler class. Used to handle new messages.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_message` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new Message arrives. It takes *(client, message)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of messages to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n message (:obj:`~hydrogram.types.Message`):\n The received message.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n self.original_callback = callback\n super().__init__(self.resolve_future_or_callback, filters)\n\n async def check_if_has_matching_listener(\n self, client: \"hydrogram.Client\", message: Message\n ) -> tuple[bool, Optional[Listener]]:\n \"\"\"\n Checks if the message has a matching listener.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to check with.\n\n message (:obj:`~hydrogram.types.Message`):\n The Message object to check with.\n\n Returns:\n ``tuple``: A tuple of two elements, the first one is whether the message has a matching listener or not,\n the second one is the matching listener if exists.\n \"\"\"\n from_user = message.from_user\n from_user_id = from_user.id if from_user else None\n from_user_username = from_user.username if from_user else None\n\n message_id = getattr(message, \"id\", getattr(message, \"message_id\", None))\n\n data = Identifier(\n message_id=message_id,\n chat_id=[message.chat.id, message.chat.username],\n from_user_id=[from_user_id, from_user_username],\n )\n\n listener = client.get_listener_matching_with_data(data, ListenerTypes.MESSAGE)\n\n listener_does_match = False\n\n if listener:\n filters = listener.filters\n if callable(filters):\n if iscoroutinefunction(filters.__call__):\n listener_does_match = await filters(client, message)\n else:\n listener_does_match = await client.loop.run_in_executor(\n None, filters, client, message\n )\n else:\n listener_does_match = True\n\n return listener_does_match, listener\n\n async def check(self, client: \"hydrogram.Client\", message: Message) -> bool:\n \"\"\"\n Checks if the message has a matching listener or handler and its filters does match with the Message.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to check with.\n\n message (:obj:`~hydrogram.types.Message`):\n The Message object to check with.\n\n Returns:\n ``bool``: Whether the message has a matching listener or handler and its filters does match with the Message.\n \"\"\"\n listener_does_match = (await self.check_if_has_matching_listener(client, message))[0]\n\n if callable(self.filters):\n if iscoroutinefunction(self.filters.__call__):\n handler_does_match = await self.filters(client, message)\n else:\n handler_does_match = await client.loop.run_in_executor(\n None, self.filters, client, message\n )\n else:\n handler_does_match = True\n\n # let handler get the chance to handle if listener\n # exists but its filters doesn't match\n return listener_does_match or handler_does_match\n\n async def resolve_future_or_callback(\n self, client: \"hydrogram.Client\", message: Message, *args\n ):\n \"\"\"\n Resolves the future or calls the callback of the listener if the message has a matching listener.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to resolve or call with.\n\n message (:obj:`~hydrogram.types.Message`):\n The Message object to resolve or call with.\n\n args (``tuple``):\n Arguments to call the callback with.\n \"\"\"\n listener_does_match, listener = await self.check_if_has_matching_listener(client, message)\n\n if listener and listener_does_match:\n client.remove_listener(listener)\n\n if listener.future and not listener.future.done():\n listener.future.set_result(message)\n\n raise hydrogram.StopPropagation\n if listener.callback:\n if iscoroutinefunction(listener.callback):\n await listener.callback(client, message, *args)\n else:\n listener.callback(client, message, *args)\n\n raise hydrogram.StopPropagation\n\n raise ValueError(\"Listener must have either a future or a callback\")\n\n await self.original_callback(client, message, *args)"
},
{
"identifier": "PollHandler",
"path": "hydrogram/handlers/poll_handler.py",
"snippet": "class PollHandler(Handler):\n \"\"\"The Poll handler class. Used to handle polls updates.\n\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_poll` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new poll update arrives. It takes *(client, poll)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of polls to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the poll handler.\n\n poll (:obj:`~hydrogram.types.Poll`):\n The received poll.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)"
},
{
"identifier": "RawUpdateHandler",
"path": "hydrogram/handlers/raw_update_handler.py",
"snippet": "class RawUpdateHandler(Handler):\n \"\"\"The Raw Update handler class. Used to handle raw updates. It is intended to be used with\n :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_raw_update` decorator.\n\n Parameters:\n callback (``Callable``):\n A function that will be called when a new update is received from the server. It takes\n *(client, update, users, chats)* as positional arguments (look at the section below for\n a detailed description).\n\n Other Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the update handler.\n\n update (``Update``):\n The received update, which can be one of the many single Updates listed in the\n :obj:`~hydrogram.raw.base.Update` base type.\n\n users (``dict``):\n Dictionary of all :obj:`~hydrogram.types.User` mentioned in the update.\n You can access extra info about the user (such as *first_name*, *last_name*, etc...) by using\n the IDs you find in the *update* argument (e.g.: *users[1768841572]*).\n\n chats (``dict``):\n Dictionary of all :obj:`~hydrogram.types.Chat` and\n :obj:`~hydrogram.raw.types.Channel` mentioned in the update.\n You can access extra info about the chat (such as *title*, *participants_count*, etc...)\n by using the IDs you find in the *update* argument (e.g.: *chats[1701277281]*).\n\n Note:\n The following Empty or Forbidden types may exist inside the *users* and *chats* dictionaries.\n They mean you have been blocked by the user or banned from the group/channel.\n\n - :obj:`~hydrogram.raw.types.UserEmpty`\n - :obj:`~hydrogram.raw.types.ChatEmpty`\n - :obj:`~hydrogram.raw.types.ChatForbidden`\n - :obj:`~hydrogram.raw.types.ChannelForbidden`\n \"\"\"\n\n def __init__(self, callback: Callable):\n super().__init__(callback)"
},
{
"identifier": "UserStatusHandler",
"path": "hydrogram/handlers/user_status_handler.py",
"snippet": "class UserStatusHandler(Handler):\n \"\"\"The UserStatus handler class. Used to handle user status updates (user going online or offline).\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`.\n\n For a nicer way to register this handler, have a look at the :meth:`~hydrogram.Client.on_user_status` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new user status update arrives. It takes *(client, user)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of users to be passed in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the user status handler.\n\n user (:obj:`~hydrogram.types.User`):\n The user containing the updated status.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)"
}
] | import asyncio
import inspect
import logging
import hydrogram
from collections import OrderedDict
from hydrogram import utils
from hydrogram.handlers import (
CallbackQueryHandler,
ChatJoinRequestHandler,
ChatMemberUpdatedHandler,
ChosenInlineResultHandler,
DeletedMessagesHandler,
EditedMessageHandler,
InlineQueryHandler,
MessageHandler,
PollHandler,
RawUpdateHandler,
UserStatusHandler,
)
from hydrogram.raw.types import (
UpdateBotCallbackQuery,
UpdateBotChatInviteRequester,
UpdateBotInlineQuery,
UpdateBotInlineSend,
UpdateChannelParticipant,
UpdateChatParticipant,
UpdateDeleteChannelMessages,
UpdateDeleteMessages,
UpdateEditChannelMessage,
UpdateEditMessage,
UpdateInlineBotCallbackQuery,
UpdateMessagePoll,
UpdateNewChannelMessage,
UpdateNewMessage,
UpdateNewScheduledMessage,
UpdateUserStatus,
) | 7,219 | # Hydrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2023 Dan <https://github.com/delivrance>
# Copyright (C) 2023-present Hydrogram <https://hydrogram.org>
#
# This file is part of Hydrogram.
#
# Hydrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Hydrogram. If not, see <http://www.gnu.org/licenses/>.
log = logging.getLogger(__name__)
class Dispatcher:
NEW_MESSAGE_UPDATES = (
UpdateNewMessage,
UpdateNewChannelMessage,
UpdateNewScheduledMessage,
)
EDIT_MESSAGE_UPDATES = (UpdateEditMessage, UpdateEditChannelMessage)
DELETE_MESSAGES_UPDATES = (UpdateDeleteMessages, UpdateDeleteChannelMessages)
CALLBACK_QUERY_UPDATES = (UpdateBotCallbackQuery, UpdateInlineBotCallbackQuery)
CHAT_MEMBER_UPDATES = (UpdateChatParticipant, UpdateChannelParticipant)
USER_STATUS_UPDATES = (UpdateUserStatus,)
BOT_INLINE_QUERY_UPDATES = (UpdateBotInlineQuery,)
POLL_UPDATES = (UpdateMessagePoll,)
CHOSEN_INLINE_RESULT_UPDATES = (UpdateBotInlineSend,)
CHAT_JOIN_REQUEST_UPDATES = (UpdateBotChatInviteRequester,)
def __init__(self, client: "hydrogram.Client"):
self.client = client
self.loop = asyncio.get_event_loop()
self.handler_worker_tasks = []
self.locks_list = []
self.updates_queue = asyncio.Queue()
self.groups = OrderedDict()
async def message_parser(update, users, chats):
return (
await hydrogram.types.Message._parse(
client=self.client,
message=update.message,
users=users,
chats=chats,
is_scheduled=isinstance(update, UpdateNewScheduledMessage),
),
MessageHandler,
)
async def edited_message_parser(update, users, chats):
# Edited messages are parsed the same way as new messages, but the handler is different
parsed, _ = await message_parser(update, users, chats)
return (parsed, EditedMessageHandler)
async def deleted_messages_parser(update, users, chats):
return (
utils.parse_deleted_messages(self.client, update),
DeletedMessagesHandler,
)
async def callback_query_parser(update, users, chats):
return (
await hydrogram.types.CallbackQuery._parse(self.client, update, users),
CallbackQueryHandler,
)
async def user_status_parser(update, users, chats):
return (
hydrogram.types.User._parse_user_status(self.client, update),
UserStatusHandler,
)
async def inline_query_parser(update, users, chats):
return (
hydrogram.types.InlineQuery._parse(self.client, update, users),
InlineQueryHandler,
)
async def poll_parser(update, users, chats):
return (
hydrogram.types.Poll._parse_update(self.client, update),
PollHandler,
)
async def chosen_inline_result_parser(update, users, chats):
return (
hydrogram.types.ChosenInlineResult._parse(self.client, update, users),
ChosenInlineResultHandler,
)
async def chat_member_updated_parser(update, users, chats):
return (
hydrogram.types.ChatMemberUpdated._parse(self.client, update, users, chats),
ChatMemberUpdatedHandler,
)
async def chat_join_request_parser(update, users, chats):
return (
hydrogram.types.ChatJoinRequest._parse(self.client, update, users, chats),
| # Hydrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2023 Dan <https://github.com/delivrance>
# Copyright (C) 2023-present Hydrogram <https://hydrogram.org>
#
# This file is part of Hydrogram.
#
# Hydrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Hydrogram. If not, see <http://www.gnu.org/licenses/>.
log = logging.getLogger(__name__)
class Dispatcher:
NEW_MESSAGE_UPDATES = (
UpdateNewMessage,
UpdateNewChannelMessage,
UpdateNewScheduledMessage,
)
EDIT_MESSAGE_UPDATES = (UpdateEditMessage, UpdateEditChannelMessage)
DELETE_MESSAGES_UPDATES = (UpdateDeleteMessages, UpdateDeleteChannelMessages)
CALLBACK_QUERY_UPDATES = (UpdateBotCallbackQuery, UpdateInlineBotCallbackQuery)
CHAT_MEMBER_UPDATES = (UpdateChatParticipant, UpdateChannelParticipant)
USER_STATUS_UPDATES = (UpdateUserStatus,)
BOT_INLINE_QUERY_UPDATES = (UpdateBotInlineQuery,)
POLL_UPDATES = (UpdateMessagePoll,)
CHOSEN_INLINE_RESULT_UPDATES = (UpdateBotInlineSend,)
CHAT_JOIN_REQUEST_UPDATES = (UpdateBotChatInviteRequester,)
def __init__(self, client: "hydrogram.Client"):
self.client = client
self.loop = asyncio.get_event_loop()
self.handler_worker_tasks = []
self.locks_list = []
self.updates_queue = asyncio.Queue()
self.groups = OrderedDict()
async def message_parser(update, users, chats):
return (
await hydrogram.types.Message._parse(
client=self.client,
message=update.message,
users=users,
chats=chats,
is_scheduled=isinstance(update, UpdateNewScheduledMessage),
),
MessageHandler,
)
async def edited_message_parser(update, users, chats):
# Edited messages are parsed the same way as new messages, but the handler is different
parsed, _ = await message_parser(update, users, chats)
return (parsed, EditedMessageHandler)
async def deleted_messages_parser(update, users, chats):
return (
utils.parse_deleted_messages(self.client, update),
DeletedMessagesHandler,
)
async def callback_query_parser(update, users, chats):
return (
await hydrogram.types.CallbackQuery._parse(self.client, update, users),
CallbackQueryHandler,
)
async def user_status_parser(update, users, chats):
return (
hydrogram.types.User._parse_user_status(self.client, update),
UserStatusHandler,
)
async def inline_query_parser(update, users, chats):
return (
hydrogram.types.InlineQuery._parse(self.client, update, users),
InlineQueryHandler,
)
async def poll_parser(update, users, chats):
return (
hydrogram.types.Poll._parse_update(self.client, update),
PollHandler,
)
async def chosen_inline_result_parser(update, users, chats):
return (
hydrogram.types.ChosenInlineResult._parse(self.client, update, users),
ChosenInlineResultHandler,
)
async def chat_member_updated_parser(update, users, chats):
return (
hydrogram.types.ChatMemberUpdated._parse(self.client, update, users, chats),
ChatMemberUpdatedHandler,
)
async def chat_join_request_parser(update, users, chats):
return (
hydrogram.types.ChatJoinRequest._parse(self.client, update, users, chats), | ChatJoinRequestHandler, | 2 | 2023-10-29 16:16:37+00:00 | 12k |
chenruduan/OAReactDiff | oa_reactdiff/tests/dynamics/test_egnn_dynamics.py | [
{
"identifier": "LEFTNet",
"path": "oa_reactdiff/model/leftnet.py",
"snippet": "class LEFTNet(torch.nn.Module):\n r\"\"\"\n LEFTNet\n\n Args:\n pos_require_grad (bool, optional): If set to :obj:`True`, will require to take derivative of model output with respect to the atomic positions. (default: :obj:`False`)\n cutoff (float, optional): Cutoff distance for interatomic interactions. (default: :obj:`5.0`)\n num_layers (int, optional): Number of building blocks. (default: :obj:`4`)\n hidden_channels (int, optional): Hidden embedding size. (default: :obj:`128`)\n num_radial (int, optional): Number of radial basis functions. (default: :obj:`96`)\n y_mean (float, optional): Mean value of the labels of training data. (default: :obj:`0`)\n y_std (float, optional): Standard deviation of the labels of training data. (default: :obj:`1`)\n\n \"\"\"\n\n def __init__(\n self,\n pos_require_grad=False,\n cutoff=10.0,\n num_layers=4,\n hidden_channels=128,\n num_radial=96,\n in_hidden_channels: int = 8,\n reflect_equiv: bool = True,\n legacy: bool = True,\n update: bool = True,\n pos_grad: bool = False,\n single_layer_output: bool = True,\n for_conf: bool = False,\n ff: bool = False,\n object_aware: bool = True,\n **kwargs,\n ):\n super(LEFTNet, self).__init__()\n self.num_layers = num_layers\n self.hidden_channels = hidden_channels\n self.cutoff = cutoff\n self.pos_require_grad = pos_require_grad\n self.reflect_equiv = reflect_equiv\n self.legacy = legacy\n self.update = update\n self.pos_grad = pos_grad\n self.for_conf = for_conf\n self.ff = ff\n self.object_aware = object_aware\n\n self.embedding = nn.Linear(in_hidden_channels, hidden_channels)\n self.embedding_out = nn.Linear(hidden_channels, in_hidden_channels)\n self.radial_emb = RBFEmb(num_radial, self.cutoff)\n self.neighbor_emb = NeighborEmb(hidden_channels, in_hidden_channels)\n self.s2v = CFConvS2V(hidden_channels)\n\n self.radial_lin = nn.Sequential(\n nn.Linear(num_radial, hidden_channels),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels, hidden_channels),\n )\n\n self.lin3 = nn.Sequential(\n nn.Linear(3, hidden_channels // 4),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 4, 1),\n )\n self.pos_expansion = MLP(\n in_dim=3,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n last_layer_no_activation=True,\n bias=False,\n )\n if self.legacy:\n self.distance_embedding = MLP(\n in_dim=num_radial,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n bias=False,\n )\n if self.pos_grad:\n self.dynamic_mlp_modules = nn.Sequential(\n nn.Linear(hidden_channels, hidden_channels // 2),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 2, 3),\n )\n\n self.gcl_layers = nn.ModuleList()\n self.message_layers = nn.ModuleList()\n self.update_layers = nn.ModuleList()\n\n for _ in range(num_layers):\n self.gcl_layers.append(\n GCLMessage(hidden_channels, num_radial, legacy=legacy)\n )\n self.message_layers.append(\n EquiMessage(hidden_channels, num_radial, reflect_equiv).jittable()\n )\n self.update_layers.append(EquiUpdate(hidden_channels, reflect_equiv))\n\n self.last_layer = nn.Linear(hidden_channels, 1)\n\n self.inv_sqrt_2 = 1 / math.sqrt(2.0)\n self.out_pos = EquiOutput(\n hidden_channels,\n out_channels=1,\n single_layer_output=single_layer_output,\n )\n\n # for node-wise frame\n self.vec = vector()\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.radial_emb.reset_parameters()\n\n def scalarization(self, pos, edge_index):\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n coord_diff = pos[i] - pos[j]\n radial = torch.sum((coord_diff) ** 2, 1).unsqueeze(1)\n coord_cross = torch.cross(pos[i], pos[j])\n norm = torch.sqrt(radial) + EPS\n coord_diff = coord_diff / norm\n cross_norm = (torch.sqrt(torch.sum((coord_cross) ** 2, 1).unsqueeze(1))) + EPS\n coord_cross = coord_cross / cross_norm\n coord_vertical = torch.cross(coord_diff, coord_cross)\n\n return dist, coord_diff, coord_cross, coord_vertical\n\n @staticmethod\n def assemble_nodemask(edge_index: Tensor, pos: Tensor):\n node_mask = torch.zeros(pos.size(0), device=pos.device)\n node_mask[:] = -1\n _i, _j = edge_index\n _ind = 0\n for center in range(pos.size(0)):\n if node_mask[center] > -1:\n continue\n _connected = _j[torch.where(_i == center)]\n _connected = torch.concat(\n [_connected, torch.tensor([center], device=pos.device)]\n )\n node_mask[_connected] = _ind\n _ind += 1\n return node_mask\n\n def forward(\n self,\n h: Tensor,\n pos: Tensor,\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n node_mask: Optional[Tensor] = None,\n edge_mask: Optional[Tensor] = None,\n update_coords_mask: Optional[Tensor] = None,\n subgraph_mask: Optional[Tensor] = None,\n ):\n # if self.pos_require_grad:\n # pos.requires_grad_()\n\n if not self.object_aware:\n subgraph_mask = None\n\n i, j = edge_index\n\n # embed z, assuming last column is atom number\n z_emb = self.embedding(h)\n\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n inner_subgraph_mask = torch.zeros(edge_index.size(1), 1, device=dist.device)\n inner_subgraph_mask[torch.where(dist < self.cutoff)[0]] = 1\n\n all_edge_masks = inner_subgraph_mask\n if subgraph_mask is not None:\n all_edge_masks = all_edge_masks * subgraph_mask\n\n edge_index_w_cutoff = edge_index.T[torch.where(all_edge_masks > 0)[0]].T\n node_mask_w_cutoff = self.assemble_nodemask(\n edge_index=edge_index_w_cutoff, pos=pos\n )\n\n pos_frame = pos.clone()\n pos_frame = remove_mean_batch(pos_frame, node_mask_w_cutoff.long())\n\n # bulid edge-wise frame and scalarization vector features for edge update\n dist, coord_diff, coord_cross, coord_vertical = self.scalarization(\n pos_frame, edge_index\n )\n\n dist = dist * all_edge_masks.squeeze(-1)\n coord_diff = coord_diff * all_edge_masks\n coord_cross = coord_cross * all_edge_masks\n coord_vertical = coord_vertical * all_edge_masks\n\n frame = torch.cat(\n (\n coord_diff.unsqueeze(-1),\n coord_cross.unsqueeze(-1),\n coord_vertical.unsqueeze(-1),\n ),\n dim=-1,\n )\n radial_emb = self.radial_emb(dist)\n radial_emb = radial_emb * all_edge_masks\n\n f = self.radial_lin(radial_emb)\n rbounds = 0.5 * (torch.cos(dist * pi / self.cutoff) + 1.0)\n f = rbounds.unsqueeze(-1) * f\n\n # init node features\n s = self.neighbor_emb(h, z_emb, edge_index, f)\n\n NE1 = self.s2v(s, coord_diff.unsqueeze(-1), edge_index, f)\n scalrization1 = torch.sum(NE1[i].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n scalrization2 = torch.sum(NE1[j].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n if self.reflect_equiv:\n scalrization1[:, 1, :] = torch.abs(scalrization1[:, 1, :].clone())\n scalrization2[:, 1, :] = torch.abs(scalrization2[:, 1, :].clone())\n\n scalar3 = (\n self.lin3(torch.permute(scalrization1, (0, 2, 1)))\n + torch.permute(scalrization1, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n scalar4 = (\n self.lin3(torch.permute(scalrization2, (0, 2, 1)))\n + torch.permute(scalrization2, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n edgeweight = torch.cat((scalar3, scalar4), dim=-1) * rbounds.unsqueeze(-1)\n edgeweight = torch.cat((edgeweight, f), dim=-1)\n # add distance embedding\n edgeweight = torch.cat((edgeweight, radial_emb), dim=-1)\n\n # bulid node-wise frame for node-update\n a = pos_frame\n if self.legacy:\n b = self.vec(pos_frame, edge_index)\n else:\n # Added by Chenru: for new implementation of constructing node frame.\n eff_edge_ij = torch.where(all_edge_masks.squeeze(-1) == 1)[0]\n eff_edge_index = edge_index[:, eff_edge_ij]\n eff_dist = dist[eff_edge_ij]\n b = nn_vector(eff_dist, eff_edge_index, pos_frame)\n # assert_rot_equiv(nn_vector, dist_pad, edge_index, pos) # for debugging\n\n x1 = (a - b) / ((torch.sqrt(torch.sum((a - b) ** 2, 1).unsqueeze(1))) + EPS)\n y1 = torch.cross(a, b)\n normy = (torch.sqrt(torch.sum(y1**2, 1).unsqueeze(1))) + EPS\n y1 = y1 / normy\n # assert torch.trace(torch.matmul(x1, torch.transpose(y1, 0, 1))) < EPS # for debugging\n\n z1 = torch.cross(x1, y1)\n nodeframe = torch.cat(\n (x1.unsqueeze(-1), y1.unsqueeze(-1), z1.unsqueeze(-1)), dim=-1\n )\n\n pos_prjt = torch.sum(pos_frame.unsqueeze(-1) * nodeframe, dim=1)\n\n vec = torch.zeros(s.size(0), 3, s.size(1), device=s.device)\n gradient = torch.zeros(s.size(0), 3, device=s.device)\n for i in range(self.num_layers):\n # Added by Chenru: for letting multiple objects message passing.\n if self.legacy or i == 0:\n s = s + self.pos_expansion(pos_prjt)\n s, edgeweight = self.gcl_layers[i](\n s,\n edge_index,\n edgeweight,\n )\n\n dx, dvec = self.message_layers[i](\n s,\n vec,\n edge_index,\n radial_emb,\n edgeweight,\n coord_diff,\n coord_cross,\n )\n s = s + dx\n vec = vec + dvec\n s = s * self.inv_sqrt_2\n\n if self.update:\n dx, dvec = self.update_layers[i](s, vec, nodeframe)\n s = s + dx\n vec = vec + dvec\n\n if self.pos_grad:\n dynamic_coff = self.dynamic_mlp_modules(s) # (node, 3)\n basis_mix = (\n dynamic_coff[:, :1] * x1\n + dynamic_coff[:, 1:2] * y1\n + dynamic_coff[:, 2:3] * z1\n )\n gradient = gradient + basis_mix / self.num_layers\n\n if self.for_conf:\n return s\n\n _, dpos = self.out_pos(s, vec)\n\n if update_coords_mask is not None:\n dpos = update_coords_mask * dpos\n pos = pos + dpos + gradient\n\n if self.ff:\n return s, dpos\n\n h = self.embedding_out(s)\n if node_mask is not None:\n h = h * node_mask\n edge_attr = None\n return h, pos, edge_attr"
},
{
"identifier": "EGNNDynamics",
"path": "oa_reactdiff/dynamics/egnn_dynamics.py",
"snippet": "class EGNNDynamics(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n update_pocket_coords: bool = True,\n condition_time: bool = True,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n ) -> None:\n r\"\"\"Base dynamics class set up for denoising process.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n def forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tuple[List[Tensor], Tensor]:\n r\"\"\"predict noise /mu.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tuple[List[Tensor], Tensor]: updated pos-h and edge attributes\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n h_final, pos_final, edge_attr_final = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n )\n vel = pos_final - pos\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in pos, resetting EGNN output to randn.\")\n vel = torch.randn_like(vel)\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in h, resetting EGNN output to randn.\")\n h_final = torch.randn_like(h_final)\n\n h_final = h_final[:, :-condition_dim]\n\n frag_index = self.compute_frag_index(n_frag_switch)\n xh_final = [\n torch.cat(\n [\n self.remove_mean_batch(\n vel[frag_index[ii] : frag_index[ii + 1]],\n combined_mask[frag_index[ii] : frag_index[ii + 1]],\n ),\n self.decoders[ii](h_final[frag_index[ii] : frag_index[ii + 1]]),\n ],\n dim=-1,\n )\n for ii, name in enumerate(self.fragment_names)\n ]\n\n # xh_final = self.enpose_pbc(xh_final)\n\n if edge_attr_final is None or edge_attr_final.size(1) <= max(1, self.dist_dim):\n edge_attr_final = None\n else:\n edge_attr_final = self.edge_decoder(edge_attr_final)\n return xh_final, edge_attr_final\n\n @staticmethod\n def enpose_pbc(xh: List[Tensor], magnitude=10.0) -> List[Tensor]:\n xrange = magnitude * 2\n xh = [torch.remainder(_xh + magnitude, xrange) - magnitude for _xh in xh]\n return xh\n\n @staticmethod\n def compute_frag_index(n_frag_switch: Tensor) -> np.ndarray:\n counts = [\n torch.where(n_frag_switch == ii)[0].numel()\n for ii in torch.unique(n_frag_switch)\n ]\n return np.concatenate([np.array([0]), np.cumsum(counts)])\n\n @torch.no_grad()\n def adjust_edge_attr_on_new_eij(\n self,\n edge_index: Tensor,\n edge_attr: Tensor,\n edge_index_new: Tensor,\n ) -> Tensor:\n r\"\"\"Get ready new edge attributes (e_ij) given old {ij, e_ij} and new {ij}\n\n Args:\n edge_index (Tensor): ij\n edge_attr (Tensor): e_ij\n edge_index_new (Tensor): new ij\n\n Raises:\n ValueError: finding multiple entries for the same ij pair\n\n Returns:\n Tensor: new e_ij\n \"\"\"\n edge_index_T = torch.transpose(edge_index, 1, 0)\n edge_index_new_T = torch.transpose(edge_index_new, 1, 0)\n\n edge_attr_new = []\n for _ind, ij in enumerate(edge_index_new_T):\n ind = torch.where((ij == edge_index_T).all(dim=1))[0]\n if ind.size(0) > 1:\n raise ValueError(f\"ind should only be 0 or 1, getting {ind}\")\n\n if ind.size(0) == 0:\n self.create_new_edge_attr(\n ind_new=_ind,\n ij_new=ij,\n edge_index_new_T=edge_index_new_T,\n edge_attr_new=edge_attr_new,\n edge_attr=edge_attr,\n )\n else:\n edge_attr_new.append(edge_attr[ind.item()].detach())\n return torch.stack(edge_attr_new, dim=0)\n\n @staticmethod\n def init_edge_attr(sample_edge_attr):\n r\"\"\"initialize edge attributes.\"\"\"\n return torch.rand_like(sample_edge_attr)\n\n def create_new_edge_attr(\n self,\n ind_new: Tensor,\n ij_new: Tensor,\n edge_index_new_T: Tensor,\n edge_attr_new: List[Tensor],\n edge_attr: Tensor,\n ) -> List[Tensor]:\n r\"\"\"Create new edge attrbution for ij that is not present in old connections\n\n Args:\n ind_new (Tensor): natural index of new ij\n ij_new (Tensor): new ij\n edge_index_new_T (Tensor): new edge indexes, [n_edge, 2]\n edge_attr_new (List[Tensor]): list of new edge attributes\n edge_attr (Tensor): old edge attributes\n\n Raises:\n ValueError: not ji found for ij in new indexes\n\n Returns:\n List[Tensor]: list of new edge attributes\n \"\"\"\n ij_new_reverse = ij_new[torch.tensor([1, 0])]\n ind_new_reverse = torch.where((ij_new_reverse == edge_index_new_T).all(dim=1))[\n 0\n ]\n print(ind_new_reverse)\n if ind_new_reverse.size(0) == 0:\n raise ValueError(f\"should always find a reverse ind.\")\n # print(ij_new, ind_new, ind_new_reverse)\n if ind_new_reverse.item() >= ind_new:\n edge_attr_new.append(self.init_edge_attr(edge_attr[0]))\n else:\n edge_attr_new.append(edge_attr_new[ind_new_reverse.item()])\n return edge_attr_new\n\n @staticmethod\n def remove_mean_batch(x, indices):\n mean = scatter_mean(x, indices, dim=0)\n x = x - mean[indices]\n return x"
},
{
"identifier": "Confidence",
"path": "oa_reactdiff/dynamics/confidence.py",
"snippet": "class Confidence(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n **kwargs,\n ) -> None:\n r\"\"\"Confindence score for generated samples.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n model_config.update({\"for_conf\": True})\n update_pocket_coords = True\n condition_time = (True,)\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n hidden_channels = model_config[\"hidden_channels\"]\n self.readout = GatedMLP(\n in_dim=hidden_channels,\n out_dims=[hidden_channels, hidden_channels, 1],\n activation=\"swish\",\n bias=True,\n last_layer_no_activation=True,\n )\n\n def _forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tensor:\n r\"\"\"predict confidence.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tensor: binary probability of confidence fo each graph.\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n node_features = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n ) # (n_node, n_hidden)\n\n graph_features = scatter_mean(\n node_features,\n index=combined_mask,\n dim=0,\n ) # (n_system, n_hidden)\n conf = self.readout(graph_features)\n return conf.squeeze()\n\n def forward(\n self,\n representations: List[Dict],\n conditions: Tensor,\n ):\n masks = [repre[\"mask\"] for repre in representations]\n combined_mask = torch.cat(masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n fragments_nodes = [repr[\"size\"] for repr in representations]\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n xh = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n\n pred = self._forward(\n xh=xh,\n edge_index=edge_index,\n t=torch.tensor([0]),\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None,\n )\n return pred"
},
{
"identifier": "get_edges_index",
"path": "oa_reactdiff/utils/_graph_tools.py",
"snippet": "def get_edges_index(\n combined_mask: Tensor,\n pos: Optional[Tensor] = None,\n edge_cutoff: Optional[float] = None,\n remove_self_edge: bool = False,\n) -> Tensor:\n r\"\"\"\n\n Args:\n combined_mask (Tensor): Combined mask for all fragments.\n Edges are built for nodes with the same indexes in the mask.\n pos (Optional[Tensor]): 3D coordinations of nodes. Defaults to None.\n edge_cutoff (Optional[float]): cutoff for building edges within a fragment.\n Defaults to None.\n remove_self_edge (bool): whether to remove self-connecting edge (i.e., ii).\n Defaults to False.\n\n Returns:\n Tensor: [2, n_edges], i for node index.\n \"\"\"\n # TODO: cache batches for each example in self._edges_dict[n_nodes]\n adj = combined_mask[:, None] == combined_mask[None, :]\n if edge_cutoff is not None:\n adj = adj & (torch.cdist(pos, pos) <= edge_cutoff)\n if remove_self_edge:\n adj = adj.fill_diagonal_(False)\n edges = torch.stack(torch.where(adj), dim=0)\n return edges"
},
{
"identifier": "get_n_frag_switch",
"path": "oa_reactdiff/utils/_graph_tools.py",
"snippet": "def get_n_frag_switch(natm_list: List[Tensor]) -> Tensor:\n r\"\"\"Get the type of fragments to which each node belongs\n Example: [Tensor(1, 1), Tensor(2, 1)] -> [0, 0, 1, 1 ,1]\n\n Args:\n natm_list (List[Tensor]): [Tensor([number of atoms per small fragment])]\n\n Returns:\n Tensor: [n_nodes], type of fragment each node belongs to\n \"\"\"\n shapes = [natm.shape[0] for natm in natm_list]\n assert np.std(shapes) == 0, \"Tensor must be the same length for <natom_list>\"\n n_frag_switch = torch.repeat_interleave(\n torch.arange(len(natm_list), device=natm_list[0].device),\n torch.tensor(\n [torch.sum(natm).item() for natm in natm_list],\n device=natm_list[0].device,\n ),\n )\n return n_frag_switch.to(natm_list[0].device)"
},
{
"identifier": "get_mask_for_frag",
"path": "oa_reactdiff/utils/_graph_tools.py",
"snippet": "def get_mask_for_frag(natm: Tensor) -> Tensor:\n r\"\"\"Get fragment index for each node\n Example: Tensor([2, 0, 3]) -> [0, 0, 2, 2, 2]\n\n Args:\n natm (Tensor): number of nodes per small fragment\n\n Returns:\n Tensor: [n_node], the natural index of fragment a node belongs to\n \"\"\"\n return torch.repeat_interleave(\n torch.arange(natm.size(0), device=natm.device), natm\n ).to(natm.device)"
}
] | import unittest
import torch
from typing import List, Optional
from torch import Tensor, tensor, nn
from pytorch_lightning import seed_everything
from oa_reactdiff.model import LEFTNet
from oa_reactdiff.dynamics import EGNNDynamics, Confidence
from oa_reactdiff.utils import (
get_n_frag_switch,
get_mask_for_frag,
get_edges_index,
) | 8,404 | """Test model forward pass and equivariance."""
seed_everything(0, workers=True)
def init_weights(m):
r"""Weight initialization for all MLP.
Args:
m: a nn.Module
"""
if isinstance(m, nn.Linear):
gain = 0.5
nn.init.xavier_uniform_(m.weight, gain=gain)
if m.bias is not None:
nn.init.uniform_(m.bias, -gain, gain)
egnn_config = dict(
in_node_nf=8,
in_edge_nf=2,
hidden_nf=2,
edge_hidden_nf=3,
act_fn="swish",
n_layers=6,
attention=True,
out_node_nf=None,
tanh=False,
coords_range=15.0,
norm_constant=1.0,
inv_sublayers=2,
sin_embedding=False,
normalization_factor=100.0,
aggregation_method="sum",
)
leftnet_config = dict(
pos_require_grad=False,
cutoff=5.0,
num_layers=2,
hidden_channels=32,
num_radial=8,
in_node_nf=8,
)
node_nfs: List[int] = [4, 5, 6]
edge_nf: int = 3
condition_nf: int = 3
fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"]
pos_dim: int = 3
update_pocket_coords: bool = True
condition_time: bool = True
edge_cutoff: Optional[float] = None
class TestModel(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.egnn_dynamics = EGNNDynamics(
model_config=egnn_config,
node_nfs=node_nfs,
edge_nf=edge_nf,
condition_nf=condition_nf,
fragment_names=fragment_names,
pos_dim=pos_dim,
update_pocket_coords=update_pocket_coords,
condition_time=condition_time,
edge_cutoff=edge_cutoff,
)
cls.egnn_dynamics.model.apply(init_weights)
cls.leftnet_dynamics = EGNNDynamics(
model_config=leftnet_config,
node_nfs=node_nfs,
edge_nf=edge_nf,
condition_nf=condition_nf,
fragment_names=fragment_names,
pos_dim=pos_dim,
update_pocket_coords=update_pocket_coords,
condition_time=condition_time,
edge_cutoff=edge_cutoff,
model=LEFTNet,
)
cls.dynamics = [cls.egnn_dynamics, cls.leftnet_dynamics]
cls.n_samples = 2
cls.fragments_nodes = [
torch.tensor([2, 0]),
torch.tensor([2, 3]),
torch.tensor([1, 2]),
]
cls.fragments_masks = [
get_mask_for_frag(natm_nodes) for natm_nodes in cls.fragments_nodes
]
cls.conditions = torch.rand(cls.n_samples, condition_nf)
cls.n_frag_switch = get_n_frag_switch(cls.fragments_nodes)
cls.combined_mask = torch.cat(cls.fragments_masks)
| """Test model forward pass and equivariance."""
seed_everything(0, workers=True)
def init_weights(m):
r"""Weight initialization for all MLP.
Args:
m: a nn.Module
"""
if isinstance(m, nn.Linear):
gain = 0.5
nn.init.xavier_uniform_(m.weight, gain=gain)
if m.bias is not None:
nn.init.uniform_(m.bias, -gain, gain)
egnn_config = dict(
in_node_nf=8,
in_edge_nf=2,
hidden_nf=2,
edge_hidden_nf=3,
act_fn="swish",
n_layers=6,
attention=True,
out_node_nf=None,
tanh=False,
coords_range=15.0,
norm_constant=1.0,
inv_sublayers=2,
sin_embedding=False,
normalization_factor=100.0,
aggregation_method="sum",
)
leftnet_config = dict(
pos_require_grad=False,
cutoff=5.0,
num_layers=2,
hidden_channels=32,
num_radial=8,
in_node_nf=8,
)
node_nfs: List[int] = [4, 5, 6]
edge_nf: int = 3
condition_nf: int = 3
fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"]
pos_dim: int = 3
update_pocket_coords: bool = True
condition_time: bool = True
edge_cutoff: Optional[float] = None
class TestModel(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.egnn_dynamics = EGNNDynamics(
model_config=egnn_config,
node_nfs=node_nfs,
edge_nf=edge_nf,
condition_nf=condition_nf,
fragment_names=fragment_names,
pos_dim=pos_dim,
update_pocket_coords=update_pocket_coords,
condition_time=condition_time,
edge_cutoff=edge_cutoff,
)
cls.egnn_dynamics.model.apply(init_weights)
cls.leftnet_dynamics = EGNNDynamics(
model_config=leftnet_config,
node_nfs=node_nfs,
edge_nf=edge_nf,
condition_nf=condition_nf,
fragment_names=fragment_names,
pos_dim=pos_dim,
update_pocket_coords=update_pocket_coords,
condition_time=condition_time,
edge_cutoff=edge_cutoff,
model=LEFTNet,
)
cls.dynamics = [cls.egnn_dynamics, cls.leftnet_dynamics]
cls.n_samples = 2
cls.fragments_nodes = [
torch.tensor([2, 0]),
torch.tensor([2, 3]),
torch.tensor([1, 2]),
]
cls.fragments_masks = [
get_mask_for_frag(natm_nodes) for natm_nodes in cls.fragments_nodes
]
cls.conditions = torch.rand(cls.n_samples, condition_nf)
cls.n_frag_switch = get_n_frag_switch(cls.fragments_nodes)
cls.combined_mask = torch.cat(cls.fragments_masks) | cls.edge_index = get_edges_index(cls.combined_mask, remove_self_edge=True) | 3 | 2023-10-30 02:53:38+00:00 | 12k |
Weitheskmt/WeiDMD | build/lib/weidmd/cdmd.py | [
{
"identifier": "DMDBase",
"path": "build/lib/weidmd/dmdbase.py",
"snippet": "class DMDBase:\n \"\"\"\n Dynamic Mode Decomposition base class.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means no truncation.\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param opt: If True, amplitudes are computed like in optimized DMD (see\n :func:`~dmdbase.DMDBase._compute_amplitudes` for reference). If\n False, amplitudes are computed following the standard algorithm. If\n `opt` is an integer, it is used as the (temporal) index of the snapshot\n used to compute DMD modes amplitudes (following the standard\n algorithm).\n The reconstruction will generally be better in time instants near the\n chosen snapshot; however increasing `opt` may lead to wrong results\n when the system presents small eigenvalues. For this reason a manual\n selection of the number of eigenvalues considered for the analyisis may\n be needed (check `svd_rank`). Also setting `svd_rank` to a value\n between 0 and 1 may give better results. Default is False.\n :type opt: bool or int\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param tikhonov_regularization: Tikhonov parameter for the regularization.\n If `None`, no regularization is applied, if `float`, it is used as the\n :math:`\\\\lambda` tikhonov parameter.\n :type tikhonov_regularization: int or float\n\n :cvar dict original_time: dictionary that contains information about the\n time window where the system is sampled:\n\n - `t0` is the time of the first input snapshot;\n - `tend` is the time of the last input snapshot;\n - `dt` is the delta time between the snapshots.\n\n :cvar dict dmd_time: dictionary that contains information about the time\n window where the system is reconstructed:\n\n - `t0` is the time of the first approximated solution;\n - `tend` is the time of the last approximated solution;\n - `dt` is the delta time between the approximated solutions.\n\n \"\"\"\n\n def __init__(\n self,\n svd_rank=0,\n tlsq_rank=0,\n exact=False,\n opt=False,\n rescale_mode=None,\n forward_backward=False,\n sorted_eigs=False,\n tikhonov_regularization=None,\n ):\n self._Atilde = DMDOperator(\n svd_rank=svd_rank,\n exact=exact,\n rescale_mode=rescale_mode,\n forward_backward=forward_backward,\n sorted_eigs=sorted_eigs,\n tikhonov_regularization=tikhonov_regularization,\n )\n\n self._tlsq_rank = tlsq_rank\n self._original_time = None\n self._dmd_time = None\n self._opt = opt\n self._exact = exact\n\n self._b = None # amplitudes\n self._snapshots_holder = None\n\n self._modes_activation_bitmask_proxy = None\n\n @property\n def dmd_timesteps(self):\n \"\"\"\n Get the timesteps of the reconstructed states.\n\n :return: the time intervals of the original snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return np.arange(\n self.dmd_time[\"t0\"],\n self.dmd_time[\"tend\"] + self.dmd_time[\"dt\"],\n self.dmd_time[\"dt\"],\n )\n\n @property\n def original_timesteps(self):\n \"\"\"\n Get the timesteps of the original snapshot.\n\n :return: the time intervals of the original snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return np.arange(\n self.original_time[\"t0\"],\n self.original_time[\"tend\"] + self.original_time[\"dt\"],\n self.original_time[\"dt\"],\n )\n\n @property\n def modes(self):\n \"\"\"\n Get the matrix containing the DMD modes, stored by column.\n\n :return: the matrix containing the DMD modes.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n # if the value is still None, it means that we cannot create\n # the proxy at the moment\n if not self._modes_activation_bitmask_proxy:\n return self.operator.modes\n return self._modes_activation_bitmask_proxy.modes\n\n @property\n def operator(self):\n \"\"\"\n Get the instance of DMDOperator.\n\n :return: the instance of DMDOperator\n :rtype: DMDOperator\n \"\"\"\n return self._Atilde\n\n @property\n def eigs(self):\n \"\"\"\n Get the eigenvalues of A tilde.\n\n :return: the eigenvalues from the eigendecomposition of `atilde`.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n # if the value is still None, it means that we cannot create\n # the proxy at the moment\n if not self._modes_activation_bitmask_proxy:\n return self.operator.eigenvalues\n return self._modes_activation_bitmask_proxy.eigs\n\n @property\n def dynamics(self):\n \"\"\"\n Get the time evolution of each mode.\n\n .. math::\n\n \\\\mathbf{x}(t) \\\\approx\n \\\\sum_{k=1}^{r} \\\\boldsymbol{\\\\phi}_{k} \\\\exp \\\\left( \\\\omega_{k} t\n \\\\right) b_{k} = \\\\sum_{k=1}^{r} \\\\boldsymbol{\\\\phi}_{k} \\\\left(\n \\\\lambda_{k} \\\\right)^{\\\\left( t / \\\\Delta t \\\\right)} b_{k}\n\n :return: the matrix that contains all the time evolution, stored by\n row.\n :rtype: numpy.ndarray\n \"\"\"\n temp = np.repeat(\n self.eigs[:, None], self.dmd_timesteps.shape[0], axis=1\n )\n tpow = (\n self.dmd_timesteps - self.original_time[\"t0\"]\n ) // self.original_time[\"dt\"]\n\n # The new formula is x_(k+j) = \\Phi \\Lambda^k \\Phi^(-1) x_j.\n # Since j is fixed, for a given snapshot \"u\" we have the following\n # formula:\n # x_u = \\Phi \\Lambda^{u-j} \\Phi^(-1) x_j\n # Therefore tpow must be scaled appropriately.\n tpow = self._translate_eigs_exponent(tpow)\n\n return np.power(temp, tpow) * self.amplitudes[:, None]\n\n def _translate_eigs_exponent(self, tpow):\n \"\"\"\n Transforms the exponent of the eigenvalues in the dynamics formula\n according to the selected value of `self._opt` (check the documentation\n for `opt` in :func:`__init__ <dmdbase.DMDBase.__init__>`).\n\n :param tpow: the exponent(s) of Sigma in the original DMD formula.\n :type tpow: int or np.ndarray\n :return: the exponent(s) adjusted according to `self._opt`\n :rtype: int or np.ndarray\n \"\"\"\n\n if isinstance(self._opt, bool):\n amplitudes_snapshot_index = 0\n else:\n amplitudes_snapshot_index = self._opt\n\n if amplitudes_snapshot_index < 0:\n # we take care of negative indexes: -n becomes T - n\n return tpow - (self.snapshots.shape[1] + amplitudes_snapshot_index)\n else:\n return tpow - amplitudes_snapshot_index\n\n @property\n def reconstructed_data(self):\n \"\"\"\n Get the reconstructed data.\n\n :return: the matrix that contains the reconstructed snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return self.modes.dot(self.dynamics)\n\n @property\n def snapshots(self):\n \"\"\"\n Get the input data (space flattened).\n\n :return: the matrix that contains the flattened snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n if self._snapshots_holder:\n return self._snapshots_holder.snapshots\n return None\n\n @property\n def snapshots_shape(self):\n \"\"\"\n Get the original input snapshot shape.\n\n :return: input snapshots shape.\n :rtype: tuple\n \"\"\"\n if self._snapshots_holder:\n return self._snapshots_holder.snapshots_shape\n return None\n\n @property\n def frequency(self):\n \"\"\"\n Get the amplitude spectrum.\n\n :return: the array that contains the frequencies of the eigenvalues.\n :rtype: numpy.ndarray\n \"\"\"\n return np.log(self.eigs).imag / (2 * np.pi * self.original_time[\"dt\"])\n\n @property\n def growth_rate(self): # To check\n \"\"\"\n Get the growth rate values relative to the modes.\n\n :return: the Floquet values\n :rtype: numpy.ndarray\n \"\"\"\n return self.eigs.real / self.original_time[\"dt\"]\n\n @property\n def amplitudes(self):\n \"\"\"\n Get the coefficients that minimize the error between the original\n system and the reconstructed one. For futher information, see\n `dmdbase._compute_amplitudes`.\n\n :return: the array that contains the amplitudes coefficient.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n return self._modes_activation_bitmask_proxy.amplitudes\n\n @property\n def fitted(self):\n \"\"\"Check whether this DMD instance has been fitted.\n\n :return: `True` is the instance has been fitted, `False` otherwise.\n :rtype: bool\n \"\"\"\n try:\n return self.operator.modes is not None\n except (ValueError, AttributeError):\n return False\n\n @property\n def modes_activation_bitmask(self):\n \"\"\"\n Get the bitmask which controls which DMD modes are enabled at the\n moment in this DMD instance.\n\n The DMD instance must be fitted before this property becomes valid.\n After :func:`fit` is called, the defalt value of\n `modes_activation_bitmask` is an array of `True` values of the same\n shape of :func:`amplitudes`.\n\n The array returned is read-only (this allow us to react appropriately\n to changes in the bitmask). In order to modify the bitmask you need to\n set the field to a brand-new value (see example below).\n\n Example:\n\n .. code-block:: python\n\n >>> # this is an error\n >>> dmd.modes_activation_bitmask[[1,2]] = False\n ValueError: assignment destination is read-only\n >>> tmp = np.array(dmd.modes_activation_bitmask)\n >>> tmp[[1,2]] = False\n >>> dmd.modes_activation_bitmask = tmp\n\n :return: The DMD modes activation bitmask.\n :rtype: numpy.ndarray\n \"\"\"\n # check that the DMD was fitted\n if not self.fitted:\n raise RuntimeError(\"This DMD instance has not been fitted yet.\")\n\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n\n bitmask = self._modes_activation_bitmask_proxy.old_bitmask\n # make sure that the array is immutable\n bitmask.flags.writeable = False\n return bitmask\n\n @modes_activation_bitmask.setter\n def modes_activation_bitmask(self, value):\n # check that the DMD was fitted\n if not self.fitted:\n raise RuntimeError(\"This DMD instance has not been fitted yet.\")\n\n value = np.array(value)\n if value.dtype != bool:\n raise RuntimeError(\n \"Unxpected dtype, expected bool, got {}.\".format(value.dtype)\n )\n\n # check that the shape is correct\n if value.shape != self.modes_activation_bitmask.shape:\n raise ValueError(\n \"Expected shape {}, got {}\".format(\n self.modes_activation_bitmask.shape, value.shape\n )\n )\n\n self._modes_activation_bitmask_proxy.change_bitmask(value)\n\n def _allocate_modes_bitmask_proxy(self):\n \"\"\"\n Utility method which allocates the activation bitmask proxy using the\n quantities that are currently available in this DMD instance. Fails\n quietly if the amplitudes are not set.\n \"\"\"\n if hasattr(self, \"_b\") and self._b is not None:\n self._modes_activation_bitmask_proxy = ActivationBitmaskProxy(\n self.operator, self._b\n )\n\n def __getitem__(self, key):\n \"\"\"\n Restrict the DMD modes used by this instance to a subset of indexes\n specified by keys. The value returned is a shallow copy of this DMD\n instance, with a different value in :func:`modes_activation_bitmask`.\n Therefore assignments to attributes are not reflected into the original\n instance.\n\n However the DMD instance returned should not be used for low-level\n manipulations on DMD modes, since the underlying DMD operator is shared\n with the original instance. For this reasons modifications to NumPy\n arrays may result in unwanted and unspecified situations which should\n be avoided in principle.\n\n :param key: An index (integer), slice or list of indexes.\n :type key: int or slice or list or np.ndarray\n :return: A shallow copy of this DMD instance having only a subset of\n DMD modes which are those indexed by `key`.\n :rtype: DMDBase\n \"\"\"\n\n if isinstance(key, (slice, int, list, np.ndarray)):\n filter_function = lambda x: isinstance(x, int)\n\n if isinstance(key, (list, np.ndarray)):\n if not all(map(filter_function, key)):\n raise ValueError(\n \"Invalid argument type, expected a slice, an int, or \"\n \"a list of indexes.\"\n )\n # no repeated elements\n if len(key) != len(set(key)):\n raise ValueError(\"Repeated indexes are not supported.\")\n else:\n raise ValueError(\n \"Invalid argument type, expected a slice, an int, or a list \"\n \"of indexes, got {}\".format(type(key))\n )\n\n mask = np.full(self.modes_activation_bitmask.shape, False)\n mask[key] = True\n\n shallow_copy = copy(self)\n shallow_copy._allocate_modes_bitmask_proxy()\n shallow_copy.modes_activation_bitmask = mask\n\n return shallow_copy\n\n @property\n def original_time(self):\n \"\"\"\n A dictionary which contains information about the time window used to\n fit this DMD instance.\n\n Inside the dictionary:\n\n ====== ====================================================================================\n Key Value\n ====== ====================================================================================\n `t0` Time of the first input snapshot (0 by default).\n `tend` Time of the last input snapshot (usually corresponds to the number of snapshots).\n `dt` Timestep between two snapshots (1 by default).\n ====== ====================================================================================\n\n :return: A dict which contains info about the input time frame.\n :rtype: dict\n \"\"\"\n if self._original_time is None:\n raise RuntimeError(\n \"\"\"\n_set_initial_time_dictionary() has not been called, did you call fit()?\"\"\"\n )\n return self._original_time\n\n @property\n def dmd_time(self):\n \"\"\"\n A dictionary which contains information about the time window used to\n reconstruct/predict using this DMD instance. By default this is equal\n to :func:`original_time`.\n\n Inside the dictionary:\n\n ====== ====================================================================================\n Key Value\n ====== ====================================================================================\n `t0` Time of the first output snapshot.\n `tend` Time of the last output snapshot.\n `dt` Timestep between two snapshots.\n ====== ====================================================================================\n\n :return: A dict which contains info about the input time frame.\n :rtype: dict\n \"\"\"\n if self._dmd_time is None:\n raise RuntimeError(\n \"\"\"\n_set_initial_time_dictionary() has not been called, did you call fit()?\"\"\"\n )\n return self._dmd_time\n\n @dmd_time.setter\n def dmd_time(self, value):\n self._dmd_time = deepcopy(value)\n\n def _set_initial_time_dictionary(self, time_dict):\n \"\"\"\n Set the initial values for the class fields `time_dict` and\n `original_time`. This is usually called in `fit()` and never again.\n\n :param time_dict: Initial time dictionary for this DMD instance.\n :type time_dict: dict\n \"\"\"\n if not (\n \"t0\" in time_dict and \"tend\" in time_dict and \"dt\" in time_dict\n ):\n raise ValueError(\n 'time_dict must contain the keys \"t0\", \"tend\" and \"dt\".'\n )\n if len(time_dict) > 3:\n raise ValueError(\n 'time_dict must contain only the keys \"t0\", \"tend\" and \"dt\".'\n )\n\n self._original_time = DMDTimeDict(dict(time_dict))\n self._dmd_time = DMDTimeDict(dict(time_dict))\n\n def fit(self, X):\n \"\"\"\n Abstract method to fit the snapshots matrices.\n\n Not implemented, it has to be implemented in subclasses.\n \"\"\"\n name = self.__class__.__name__\n msg = f\"Subclass must implement abstract method {name}.fit\"\n raise NotImplementedError(msg)\n\n def _reset(self):\n \"\"\"\n Reset this instance. Should be called in :func:`fit`.\n \"\"\"\n self._modes_activation_bitmask_proxy = None\n self._b = None\n self._snapshots_holder = None\n\n def save(self, fname):\n \"\"\"\n Save the object to `fname` using the pickle module.\n\n :param str fname: the name of file where the reduced order model will\n be saved.\n\n Example:\n\n >>> from pydmd import DMD\n >>> dmd = DMD(...) # Construct here the rom\n >>> dmd.fit(...)\n >>> dmd.save('pydmd.dmd')\n \"\"\"\n with open(fname, \"wb\") as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n @staticmethod\n def load(fname):\n \"\"\"\n Load the object from `fname` using the pickle module.\n\n :return: The `ReducedOrderModel` loaded\n\n Example:\n\n >>> from pydmd import DMD\n >>> dmd = DMD.load('pydmd.dmd')\n >>> print(dmd.reconstructed_data)\n \"\"\"\n with open(fname, \"rb\") as output:\n return pickle.load(output)\n\n def _optimal_dmd_matrices(self):\n # compute the vandermonde matrix\n vander = np.vander(self.eigs, len(self.dmd_timesteps), True)\n\n P = np.multiply(\n np.dot(self.modes.conj().T, self.modes),\n np.conj(np.dot(vander, vander.conj().T)),\n )\n\n if self._exact:\n q = np.conj(\n np.diag(\n np.linalg.multi_dot(\n [vander, self.snapshots.conj().T, self.modes]\n )\n )\n )\n else:\n _, s, V = compute_svd(self.snapshots[:, :-1], self.modes.shape[-1])\n\n q = np.conj(\n np.diag(\n np.linalg.multi_dot(\n [\n vander[:, :-1],\n V,\n np.diag(s).conj(),\n self.operator.eigenvectors,\n ]\n )\n )\n )\n\n return P, q\n\n def _compute_amplitudes(self):\n \"\"\"\n Compute the amplitude coefficients. If `self._opt` is False the\n amplitudes are computed by minimizing the error between the modes and\n the first snapshot; if `self._opt` is True the amplitudes are computed\n by minimizing the error between the modes and all the snapshots, at the\n expense of bigger computational cost.\n\n This method uses the class variables self.snapshots (for the\n snapshots), self.modes and self.eigs.\n\n :return: the amplitudes array\n :rtype: numpy.ndarray\n\n References for optimal amplitudes:\n Jovanovic et al. 2014, Sparsity-promoting dynamic mode decomposition,\n https://hal-polytechnique.archives-ouvertes.fr/hal-00995141/document\n \"\"\"\n if isinstance(self._opt, bool) and self._opt:\n # b optimal\n a = np.linalg.solve(*self._optimal_dmd_matrices())\n else:\n if isinstance(self._opt, bool):\n amplitudes_snapshot_index = 0\n else:\n amplitudes_snapshot_index = self._opt\n\n a = np.linalg.lstsq(\n self.modes,\n self.snapshots.T[amplitudes_snapshot_index],\n rcond=None,\n )[0]\n\n return a"
},
{
"identifier": "DMDOperator",
"path": "build/lib/weidmd/dmdoperator.py",
"snippet": "class DMDOperator:\n \"\"\"\n Dynamic Mode Decomposition standard operator class. Non-standard ways of\n computing the low-rank Atilde operator should be coded into subclasses of\n this class.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param tikhonov_regularization: Tikhonov parameter for the regularization.\n If `None`, no regularization is applied, if `float`, it is used as the\n :math:`\\lambda` tikhonov parameter.\n :type tikhonov_regularization: int or float\n \"\"\"\n\n def __init__(\n self,\n svd_rank,\n exact,\n forward_backward,\n rescale_mode,\n sorted_eigs,\n tikhonov_regularization,\n ):\n self._exact = exact\n self._rescale_mode = rescale_mode\n self._svd_rank = svd_rank\n self._forward_backward = forward_backward\n self._sorted_eigs = sorted_eigs\n self._tikhonov_regularization = tikhonov_regularization\n self._norm_X = None\n\n def compute_operator(self, X, Y):\n \"\"\"\n Compute the low-rank operator.\n\n :param numpy.ndarray X: matrix containing the snapshots x0,..x{n-1} by\n column.\n :param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by\n column.\n :return: the (truncated) left-singular vectors matrix, the (truncated)\n singular values array, the (truncated) right-singular vectors\n matrix of X.\n :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray\n \"\"\"\n\n U, s, V = compute_svd(X, self._svd_rank)\n\n if self._tikhonov_regularization is not None:\n self._norm_X = np.linalg.norm(X)\n atilde = self._least_square_operator(U, s, V, Y)\n\n if self._forward_backward:\n # b stands for \"backward\"\n bU, bs, bV = compute_svd(Y, svd_rank=len(s))\n atilde_back = self._least_square_operator(bU, bs, bV, X)\n atilde = sqrtm(atilde.dot(np.linalg.inv(atilde_back)))\n if hasattr(np, \"complex256\") and atilde.dtype == np.complex256:\n atilde = atilde.astype(np.complex128)\n msg = \"Casting atilde from np.complex256 to np.complex128\"\n logging.info(msg)\n\n if self._rescale_mode == \"auto\":\n self._rescale_mode = s\n\n self._Atilde = atilde\n self._compute_eigenquantities()\n self._compute_modes(Y, U, s, V)\n\n return U, s, V\n\n @property\n def shape(self):\n \"\"\"Shape of the operator\"\"\"\n return self.as_numpy_array.shape\n\n def __call__(self, snapshot_lowrank_modal_coefficients):\n \"\"\"\n Apply the low-rank operator to a vector of the modal coefficients of a\n snapshot(s).\n\n :param numpy.ndarray snapshot_lowrank_modal_coefficients: low-rank\n representation (in modal coefficients) of a snapshot x{n}.\n :return: low-rank representation (in modal coefficients) of x{n+1}.\n :rtype: numpy.ndarray\n \"\"\"\n\n return self._Atilde.dot(snapshot_lowrank_modal_coefficients)\n\n @property\n def eigenvalues(self):\n if not hasattr(self, \"_eigenvalues\"):\n raise ValueError(\"You need to call fit before\")\n return self._eigenvalues\n\n @property\n def eigenvectors(self):\n if not hasattr(self, \"_eigenvectors\"):\n raise ValueError(\"You need to call fit before\")\n return self._eigenvectors\n\n @property\n def modes(self):\n if not hasattr(self, \"_modes\"):\n raise ValueError(\"You need to call fit before\")\n return self._modes\n\n @property\n def Lambda(self):\n if not hasattr(self, \"_Lambda\"):\n raise ValueError(\"You need to call fit before\")\n return self._Lambda\n\n @property\n def as_numpy_array(self):\n if not hasattr(self, \"_Atilde\") or self._Atilde is None:\n raise ValueError(\"You need to call fit before\")\n else:\n return self._Atilde\n\n def _least_square_operator(self, U, s, V, Y):\n \"\"\"\n Private method that computes the lowrank operator from the singular\n value decomposition of matrix X and the matrix Y.\n\n .. math::\n\n \\\\mathbf{\\\\tilde{A}} =\n \\\\mathbf{U}^* \\\\mathbf{Y} \\\\mathbf{X}^\\\\dagger \\\\mathbf{U} =\n \\\\mathbf{U}^* \\\\mathbf{Y} \\\\mathbf{V} \\\\mathbf{S}^{-1}\n\n :param numpy.ndarray U: 2D matrix that contains the left-singular\n vectors of X, stored by column.\n :param numpy.ndarray s: 1D array that contains the singular values of\n X.\n :param numpy.ndarray V: 2D matrix that contains the right-singular\n vectors of X, stored by row.\n :param numpy.ndarray Y: input matrix Y.\n :return: the lowrank operator\n :rtype: numpy.ndarray\n \"\"\"\n if self._tikhonov_regularization is not None:\n s = (\n s**2 + self._tikhonov_regularization * self._norm_X\n ) * np.reciprocal(s)\n return np.linalg.multi_dot([U.T.conj(), Y, V]) * np.reciprocal(s)\n\n def _compute_eigenquantities(self):\n \"\"\"\n Private method that computes eigenvalues and eigenvectors of the\n low-dimensional operator, scaled according to self._rescale_mode.\n \"\"\"\n\n if self._rescale_mode is None:\n # scaling isn't required\n Ahat = self._Atilde\n elif isinstance(self._rescale_mode, np.ndarray):\n if len(self._rescale_mode) != self.as_numpy_array.shape[0]:\n raise ValueError(\n \"\"\"Scaling by an invalid number of\n coefficients\"\"\"\n )\n scaling_factors_array = self._rescale_mode\n\n factors_inv_sqrt = np.diag(np.power(scaling_factors_array, -0.5))\n factors_sqrt = np.diag(np.power(scaling_factors_array, 0.5))\n\n # if an index is 0, we get inf when taking the reciprocal\n for idx, item in enumerate(scaling_factors_array):\n if item == 0:\n factors_inv_sqrt[idx] = 0\n\n Ahat = np.linalg.multi_dot(\n [factors_inv_sqrt, self.as_numpy_array, factors_sqrt]\n )\n else:\n raise ValueError(\n \"Invalid value for rescale_mode: {} of type {}\".format(\n self._rescale_mode, type(self._rescale_mode)\n )\n )\n\n self._eigenvalues, self._eigenvectors = np.linalg.eig(Ahat)\n\n if self._sorted_eigs is not False and self._sorted_eigs is not None:\n if self._sorted_eigs == \"abs\":\n\n def k(tp):\n return abs(tp[0])\n\n elif self._sorted_eigs == \"real\":\n\n def k(tp):\n eig = tp[0]\n if isinstance(eig, complex):\n return (eig.real, eig.imag)\n return (eig.real, 0)\n\n else:\n raise ValueError(\n \"Invalid value for sorted_eigs: {}\".format(\n self._sorted_eigs\n )\n )\n\n # each column is an eigenvector, therefore we take the\n # transpose to associate each row (former column) to an\n # eigenvalue before sorting\n a, b = zip(\n *sorted(zip(self._eigenvalues, self._eigenvectors.T), key=k)\n )\n self._eigenvalues = np.array([eig for eig in a])\n # we restore the original condition (eigenvectors in columns)\n self._eigenvectors = np.array([vec for vec in b]).T\n\n def _compute_modes(self, Y, U, Sigma, V):\n \"\"\"\n Private method that computes eigenvalues and eigenvectors of the\n high-dimensional operator (stored in self.modes and self.Lambda).\n\n :param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by\n column.\n :param numpy.ndarray U: (truncated) left singular vectors of X\n :param numpy.ndarray Sigma: (truncated) singular values of X\n :param numpy.ndarray V: (truncated) right singular vectors of X\n \"\"\"\n\n if self._rescale_mode is None:\n W = self.eigenvectors\n else:\n # compute W as shown in arXiv:1409.5496 (section 2.4)\n factors_sqrt = np.diag(np.power(self._rescale_mode, 0.5))\n W = factors_sqrt.dot(self.eigenvectors)\n\n # compute the eigenvectors of the high-dimensional operator\n if self._exact:\n if self._tikhonov_regularization is not None:\n Sigma = (\n Sigma**2 + self._tikhonov_regularization * self._norm_X\n ) * np.reciprocal(Sigma)\n high_dimensional_eigenvectors = (\n Y.dot(V) * np.reciprocal(Sigma)\n ).dot(W)\n else:\n high_dimensional_eigenvectors = U.dot(W)\n\n # eigenvalues are the same of lowrank\n high_dimensional_eigenvalues = self.eigenvalues\n\n self._modes = high_dimensional_eigenvectors\n self._Lambda = high_dimensional_eigenvalues"
},
{
"identifier": "Snapshots",
"path": "build/lib/weidmd/snapshots.py",
"snippet": "class Snapshots:\n \"\"\"\n Utility class to preprocess snapshots shape for DMD.\n\n This class expects the time to be the last dimensions of the array.\n If a Python list is passed to the constructor, each element in the\n list is assumed to be a snapshot in time.\n\n Space dimensions are flattened (C-order) such that the\n matrix becomes 2D (time changes along the last axis).\n\n :param numpy.array | list(numpy.array) X: Training snapshots.\n \"\"\"\n\n def __init__(self, X):\n (\n self._snapshots,\n self._snapshots_shape,\n ) = Snapshots._unroll_space_dimensions(X)\n\n if self._snapshots.shape[-1] == 1:\n raise ValueError(\"Received only one time snapshot.\")\n\n Snapshots._check_condition_number(self._snapshots)\n\n logging.info(\n \"Snapshots: %s, snapshot shape: %s\",\n self._snapshots.shape,\n self._snapshots_shape,\n )\n\n @staticmethod\n def _unroll_space_dimensions(X):\n if hasattr(X, \"ndim\"):\n if X.ndim == 1:\n raise ValueError(\n \"Expected at least a 2D matrix (space x time).\"\n )\n snapshots = X.reshape((-1, X.shape[-1]))\n shapes = set((X.shape[:-1],))\n else:\n shapes, arrays = zip(\n *[(xarr.shape, xarr.flatten()) for xarr in map(np.asarray, X)]\n )\n\n shapes = set(shapes)\n if len(shapes) != 1:\n raise ValueError(\n f\"Snapshots must have the same size, found {len(shapes)}.\"\n )\n if len(next(iter(shapes))) == 0:\n raise ValueError(\"Expected at least a 2D matrix\")\n\n # move the time to the last axis\n snapshots = np.moveaxis(np.stack(arrays), 0, -1)\n\n return snapshots, shapes.pop()\n\n @staticmethod\n def _check_condition_number(X):\n cond_number = np.linalg.cond(X)\n if cond_number > 10e4:\n warnings.warn(\n f\"Input data condition number {cond_number}. \"\n \"\"\"Consider preprocessing data, passing in augmented data\nmatrix, or regularization methods.\"\"\"\n )\n\n @property\n def snapshots(self):\n \"\"\"\n Snapshots of the system (space flattened).\n \"\"\"\n return self._snapshots\n\n @property\n def snapshots_shape(self):\n \"\"\"\n Original (i.e. non-flattened) snapshot shape (time is ignored).\n \"\"\"\n return self._snapshots_shape"
},
{
"identifier": "compute_svd",
"path": "build/lib/weidmd/utils.py",
"snippet": "def compute_svd(X, svd_rank=0):\n \"\"\"\n Truncated Singular Value Decomposition.\n\n :param numpy.ndarray X: the matrix to decompose.\n :param svd_rank: the rank for the truncation; If 0, the method computes\n the optimal rank and uses it for truncation; if positive interger,\n the method uses the argument for the truncation; if float between 0\n and 1, the rank is the number of the biggest singular values that\n are needed to reach the 'energy' specified by `svd_rank`; if -1,\n the method does not compute truncation. Default is 0.\n :type svd_rank: int or float\n :return: the truncated left-singular vectors matrix, the truncated\n singular values array, the truncated right-singular vectors matrix.\n :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray\n\n References:\n Gavish, Matan, and David L. Donoho, The optimal hard threshold for\n singular values is, IEEE Transactions on Information Theory 60.8\n (2014): 5040-5053.\n \"\"\"\n U, s, V = np.linalg.svd(X, full_matrices=False)\n V = V.conj().T\n\n def omega(x):\n return 0.56 * x**3 - 0.95 * x**2 + 1.82 * x + 1.43\n\n if svd_rank == 0:\n beta = np.divide(*sorted(X.shape))\n tau = np.median(s) * omega(beta)\n rank = np.sum(s > tau)\n if rank == 0:\n warnings.warn(\n \"SVD optimal rank is 0. The largest singular values are \"\n \"indistinguishable from noise. Setting rank truncation to 1.\",\n RuntimeWarning,\n )\n rank = 1\n elif 0 < svd_rank < 1:\n cumulative_energy = np.cumsum(s**2 / (s**2).sum())\n rank = np.searchsorted(cumulative_energy, svd_rank) + 1\n elif svd_rank >= 1 and isinstance(svd_rank, int):\n rank = min(svd_rank, U.shape[1])\n else:\n rank = X.shape[1]\n\n U = U[:, :rank]\n V = V[:, :rank]\n s = s[:rank]\n\n return U, s, V"
},
{
"identifier": "compute_tlsq",
"path": "build/lib/weidmd/utils.py",
"snippet": "def compute_tlsq(X, Y, tlsq_rank):\n \"\"\"\n Compute Total Least Square.\n\n :param numpy.ndarray X: the first matrix;\n :param numpy.ndarray Y: the second matrix;\n :param int tlsq_rank: the rank for the truncation; If 0, the method\n does not compute any noise reduction; if positive number, the\n method uses the argument for the SVD truncation used in the TLSQ\n method.\n :return: the denoised matrix X, the denoised matrix Y\n :rtype: numpy.ndarray, numpy.ndarray\n\n References:\n https://arxiv.org/pdf/1703.11004.pdf\n https://arxiv.org/pdf/1502.03854.pdf\n \"\"\"\n # Do not perform tlsq\n if tlsq_rank == 0:\n return X, Y\n\n V = np.linalg.svd(np.append(X, Y, axis=0), full_matrices=False)[-1]\n rank = min(tlsq_rank, V.shape[0])\n VV = V[:rank, :].conj().T.dot(V[:rank, :])\n\n return X.dot(VV), Y.dot(VV)"
}
] | import numpy as np
import scipy.sparse
from scipy.linalg import sqrtm
from .dmdbase import DMDBase
from .dmdoperator import DMDOperator
from .snapshots import Snapshots
from .utils import compute_svd, compute_tlsq | 10,695 |
from __future__ import division
class CDMDOperator(DMDOperator):
"""
DMD operator for Compressed-DMD.
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param rescale_mode: Scale Atilde as shown in
10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its
eigendecomposition. None means no rescaling, 'auto' means automatic
rescaling using singular values, otherwise the scaling factors.
:type rescale_mode: {'auto'} or None or numpy.ndarray
:param bool forward_backward: If True, the low-rank operator is computed
like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is
False.
:param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by
magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary
part to break ties) if `sorted_eigs='real'`. Default: False.
:type sorted_eigs: {'real', 'abs'} or False
:param tikhonov_regularization: Tikhonov parameter for the regularization.
If `None`, no regularization is applied, if `float`, it is used as the
:math:`\lambda` tikhonov parameter.
:type tikhonov_regularization: int or float
"""
def __init__(
self,
svd_rank,
rescale_mode,
forward_backward,
sorted_eigs,
tikhonov_regularization,
):
super().__init__(
svd_rank=svd_rank,
exact=True,
rescale_mode=rescale_mode,
forward_backward=forward_backward,
sorted_eigs=sorted_eigs,
tikhonov_regularization=tikhonov_regularization,
)
self._Atilde = None
def compute_operator(self, compressedX, compressedY, nonCompressedY):
"""
Compute the low-rank operator.
:param numpy.ndarray compressedX: the compressed version of the matrix
containing the snapshots x0,..x{n-1} by column.
:param numpy.ndarray compressedY: the compressed version of the matrix
containing the snapshots x1,..x{n} by column.
:param numpy.ndarray nonCompressedY: the matrix containing the
snapshots x1,..x{n} by column.
:return: the (truncated) left-singular vectors matrix, the (truncated)
singular values array, the (truncated) right-singular vectors
matrix of compressedX.
:rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray
"""
|
from __future__ import division
class CDMDOperator(DMDOperator):
"""
DMD operator for Compressed-DMD.
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param rescale_mode: Scale Atilde as shown in
10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its
eigendecomposition. None means no rescaling, 'auto' means automatic
rescaling using singular values, otherwise the scaling factors.
:type rescale_mode: {'auto'} or None or numpy.ndarray
:param bool forward_backward: If True, the low-rank operator is computed
like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is
False.
:param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by
magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary
part to break ties) if `sorted_eigs='real'`. Default: False.
:type sorted_eigs: {'real', 'abs'} or False
:param tikhonov_regularization: Tikhonov parameter for the regularization.
If `None`, no regularization is applied, if `float`, it is used as the
:math:`\lambda` tikhonov parameter.
:type tikhonov_regularization: int or float
"""
def __init__(
self,
svd_rank,
rescale_mode,
forward_backward,
sorted_eigs,
tikhonov_regularization,
):
super().__init__(
svd_rank=svd_rank,
exact=True,
rescale_mode=rescale_mode,
forward_backward=forward_backward,
sorted_eigs=sorted_eigs,
tikhonov_regularization=tikhonov_regularization,
)
self._Atilde = None
def compute_operator(self, compressedX, compressedY, nonCompressedY):
"""
Compute the low-rank operator.
:param numpy.ndarray compressedX: the compressed version of the matrix
containing the snapshots x0,..x{n-1} by column.
:param numpy.ndarray compressedY: the compressed version of the matrix
containing the snapshots x1,..x{n} by column.
:param numpy.ndarray nonCompressedY: the matrix containing the
snapshots x1,..x{n} by column.
:return: the (truncated) left-singular vectors matrix, the (truncated)
singular values array, the (truncated) right-singular vectors
matrix of compressedX.
:rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray
"""
| U, s, V = compute_svd(compressedX, svd_rank=self._svd_rank) | 3 | 2023-10-30 12:37:40+00:00 | 12k |
nv-tlabs/trace | tbsim/algos/algos.py | [
{
"identifier": "batch_utils",
"path": "tbsim/utils/batch_utils.py",
"snippet": "def batch_utils():\n return trajdataBatchUtils()"
},
{
"identifier": "Action",
"path": "tbsim/policies/common.py",
"snippet": "class Action(Trajectory):\n pass"
},
{
"identifier": "DiffuserModel",
"path": "tbsim/models/trace.py",
"snippet": "class DiffuserModel(nn.Module):\n '''\n TRACE model.\n '''\n def __init__(\n self,\n map_encoder_model_arch: str,\n input_image_shape,\n map_feature_dim: int,\n map_grid_feature_dim: int,\n diffuser_model_arch: str,\n horizon: int,\n observation_dim: int, \n action_dim: int,\n output_dim: int,\n cond_feature_dim = 256,\n rasterized_map = True,\n use_map_feat_global = False,\n use_map_feat_grid = True,\n hist_num_frames = 31,\n hist_feature_dim = 128,\n n_timesteps=1000,\n loss_type='l2', \n action_weight=1.0, \n loss_discount=1.0, \n dim_mults=(1, 2, 4, 8),\n dynamics_type=None,\n dynamics_kwargs={},\n base_dim=32,\n diffuser_input_mode='state_and_action',\n use_conditioning=True,\n cond_fill_value=-1.0,\n # norm info is ([add_coeffs, div_coeffs])\n diffuser_norm_info=([-17.5, 0, 0, 0, 0, 0],[22.5, 10, 40, 3.14, 500, 31.4]),\n # if using non-rasterized histories, need these\n agent_hist_norm_info=([0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0]),\n neighbor_hist_norm_info=([0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0]),\n dt=0.1,\n ) -> None:\n\n super().__init__()\n\n # this applies to map and past NEIGHBOR conditioning only\n # curr state or past ego trajecotry are always given\n self.use_conditioning = use_conditioning\n # for test-time classifier-free guidance, if desired\n self.cond_fill_value = cond_fill_value \n\n self.rasterized_map = rasterized_map\n\n cond_in_feat_size = 0\n cond_out_feat_size = cond_feature_dim\n\n # history encoding\n self.agent_hist_encoder = self.neighbor_hist_encoder = None\n # ego history is ALWAYS used as conditioning\n self.agent_hist_encoder = AgentHistoryEncoder(hist_num_frames,\n out_dim=hist_feature_dim,\n use_norm=True,\n norm_info=agent_hist_norm_info)\n cond_in_feat_size += hist_feature_dim\n\n if self.use_conditioning:\n self.neighbor_hist_encoder = NeighborHistoryEncoder(hist_num_frames,\n out_dim=hist_feature_dim,\n use_norm=True,\n norm_info=neighbor_hist_norm_info)\n cond_in_feat_size += hist_feature_dim\n\n # map encoding\n self.map_encoder = None\n self.use_map_feat_global = use_map_feat_global\n self.use_map_feat_grid = use_map_feat_grid\n self.input_image_shape = input_image_shape\n if self.use_conditioning and self.rasterized_map:\n self.map_encoder = MapEncoder(\n model_arch=map_encoder_model_arch,\n input_image_shape=input_image_shape,\n global_feature_dim=map_feature_dim if self.use_map_feat_global else None,\n grid_feature_dim=map_grid_feature_dim if self.use_map_feat_grid else None,\n )\n\n if self.use_map_feat_global:\n cond_in_feat_size += map_feature_dim\n\n # MLP to combine conditioning from all sources\n combine_layer_dims = (cond_in_feat_size, cond_in_feat_size, cond_out_feat_size, cond_out_feat_size)\n self.process_cond_mlp = base_models.MLP(cond_in_feat_size,\n cond_out_feat_size,\n combine_layer_dims,\n normalization=True)\n\n self._dynamics_type = dynamics_type\n self._dynamics_kwargs = dynamics_kwargs\n self._create_dynamics()\n \n # ----- diffuser -----\n self.dt = dt\n # x, y, vel, yaw, acc, yawvel\n assert len(diffuser_norm_info) == 2\n norm_add_coeffs = diffuser_norm_info[0]\n norm_div_coeffs = diffuser_norm_info[1]\n assert len(norm_add_coeffs) == 6\n assert len(norm_div_coeffs) == 6\n self.add_coeffs = np.array(norm_add_coeffs).astype('float32')\n self.div_coeffs = np.array(norm_div_coeffs).astype('float32')\n \n self.diffuser_input_mode = diffuser_input_mode\n\n if diffuser_input_mode == 'state_and_action':\n self.default_chosen_inds = [0, 1, 2, 3, 4, 5]\n else:\n raise\n \n self.horizon = horizon\n \n self.observation_dim = observation_dim\n self.action_dim = action_dim\n self.transition_dim = observation_dim + action_dim\n self.output_dim = output_dim\n \n if diffuser_model_arch == \"TemporalMapUnet\":\n transition_in_dim = self.transition_dim\n if self.use_map_feat_grid and self.map_encoder is not None:\n # will be appending map features to each step of trajectory\n transition_in_dim += map_grid_feature_dim\n self.model = TemporalMapUnet(horizon=horizon,\n transition_dim=transition_in_dim,\n cond_dim=cond_out_feat_size,\n output_dim=self.output_dim,\n dim=base_dim,\n dim_mults=dim_mults,\n )\n else:\n print('unknown diffuser_model_arch:', diffuser_model_arch)\n raise\n\n betas = cosine_beta_schedule(n_timesteps)\n alphas = 1. - betas\n alphas_cumprod = torch.cumprod(alphas, axis=0)\n alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]])\n\n self.n_timesteps = int(n_timesteps)\n\n self.register_buffer('betas', betas)\n self.register_buffer('alphas_cumprod', alphas_cumprod)\n self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))\n self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))\n self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))\n\n # calculations for posterior q(x_{t-1} | x_t, x_0)\n posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)\n self.register_buffer('posterior_variance', posterior_variance)\n\n # calculations for class-free guidance\n self.sqrt_alphas_over_one_minus_alphas_cumprod = torch.sqrt(alphas_cumprod / (1.0 - alphas_cumprod))\n self.sqrt_recip_one_minus_alphas_cumprod = 1.0 / torch.sqrt(1. - alphas_cumprod)\n\n ## log calculation clipped because the posterior variance\n ## is 0 at the beginning of the diffusion chain\n self.register_buffer('posterior_log_variance_clipped',\n torch.log(torch.clamp(posterior_variance, min=1e-20)))\n self.register_buffer('posterior_mean_coef1',\n betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))\n self.register_buffer('posterior_mean_coef2',\n (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))\n\n ## get loss coefficients and initialize objective\n loss_weights = self.get_loss_weights(action_weight, loss_discount)\n self.loss_fn = Losses[loss_type](loss_weights, self.action_dim)\n\n # for guided sampling\n self.current_guidance = None\n\n #------------------------------------------ guidance utils ------------------------------------------#\n\n def set_guidance(self, guidance_config_list, example_batch=None):\n '''\n Instantiates test-time guidance functions using the list of configs (dicts) passed in.\n '''\n if guidance_config_list is not None:\n if len(guidance_config_list) > 0 and verify_guidance_config_list(guidance_config_list):\n print('Instantiating test-time guidance with configs:')\n print(guidance_config_list)\n self.current_guidance = DiffuserGuidance(guidance_config_list, example_batch)\n\n def update_guidance(self, **kwargs):\n if self.current_guidance is not None:\n self.current_guidance.update(**kwargs)\n\n def clear_guidance(self):\n self.current_guidance = None\n\n #------------------------------------------ utility ------------------------------------------#\n def _create_dynamics(self):\n if self._dynamics_type in [\"Unicycle\", dynamics.DynType.UNICYCLE]:\n self.dyn = dynamics.Unicycle(\n \"dynamics\",\n max_steer=self._dynamics_kwargs[\"max_steer\"],\n max_yawvel=self._dynamics_kwargs[\"max_yawvel\"],\n acce_bound=self._dynamics_kwargs[\"acce_bound\"]\n )\n else:\n self.dyn = None\n\n def get_aux_info(self, data_batch, include_class_free_cond=False):\n N = data_batch[\"history_positions\"].size(0)\n device = data_batch[\"history_positions\"].device\n\n cond_feat_in = torch.empty((N,0)).to(device)\n non_cond_feat_in = torch.empty((N,0)).to(device)\n\n #\n # current ego state\n #\n # always need this for rolling out actions\n if self._dynamics_type is not None:\n curr_states = batch_utils().get_current_states(data_batch, dyn_type=self.dyn.type())\n else:\n curr_states = None\n\n #\n # rasterized map\n #\n map_grid_feat = map_grid_feat_non_cond = raster_from_agent = None\n if self.map_encoder is not None:\n image_batch = data_batch[\"image\"]\n map_global_feat, map_grid_feat = self.map_encoder(image_batch)\n if self.use_map_feat_global:\n cond_feat_in = torch.cat([cond_feat_in, map_global_feat], dim=-1)\n if self.use_map_feat_grid and self.map_encoder is not None:\n raster_from_agent = data_batch[\"raster_from_agent\"]\n\n if include_class_free_cond:\n image_non_cond = torch.ones_like(image_batch) * self.cond_fill_value\n map_global_feat_non_cond, map_grid_feat_non_cond = self.map_encoder(image_non_cond)\n if self.use_map_feat_global:\n non_cond_feat_in = torch.cat([non_cond_feat_in, map_global_feat_non_cond], dim=-1)\n\n #\n # ego history\n #\n if self.agent_hist_encoder is not None:\n agent_hist_feat = self.agent_hist_encoder(data_batch[\"history_positions\"],\n data_batch[\"history_yaws\"],\n data_batch[\"history_speeds\"],\n data_batch[\"extent\"],\n data_batch[\"history_availabilities\"])\n cond_feat_in = torch.cat([cond_feat_in, agent_hist_feat], dim=-1)\n if include_class_free_cond:\n # make all agents zero availability\n non_cond_avail = torch.zeros_like(data_batch[\"history_speeds\"]).to(torch.bool) # BxT\n agent_hist_feat_non_cond = self.agent_hist_encoder(data_batch[\"history_positions\"],\n data_batch[\"history_yaws\"],\n data_batch[\"history_speeds\"],\n data_batch[\"extent\"],\n non_cond_avail)\n non_cond_feat_in = torch.cat([non_cond_feat_in, agent_hist_feat_non_cond], dim=-1)\n\n #\n # neighbor history\n #\n\n # neighbor trajectory encoding\n if self.neighbor_hist_encoder is not None:\n neighbor_hist_feat = self.neighbor_hist_encoder(data_batch[\"all_other_agents_history_positions\"],\n data_batch[\"all_other_agents_history_yaws\"],\n data_batch[\"all_other_agents_history_speeds\"],\n data_batch[\"all_other_agents_extents\"],\n data_batch[\"all_other_agents_history_availabilities\"])\n cond_feat_in = torch.cat([cond_feat_in, neighbor_hist_feat], dim=-1) \n if include_class_free_cond:\n # make all agents zero availability\n non_cond_neighbor_avail = torch.zeros_like(data_batch[\"all_other_agents_history_speeds\"]).to(torch.bool) # BxNxT\n neighbor_hist_feat_non_cond = self.neighbor_hist_encoder(data_batch[\"all_other_agents_history_positions\"],\n data_batch[\"all_other_agents_history_yaws\"],\n data_batch[\"all_other_agents_history_speeds\"],\n data_batch[\"all_other_agents_extents\"],\n non_cond_neighbor_avail)\n non_cond_feat_in = torch.cat([non_cond_feat_in, neighbor_hist_feat_non_cond], dim=-1)\n\n #\n # Process all features together\n #\n cond_feat = self.process_cond_mlp(cond_feat_in)\n non_cond_feat = None\n if include_class_free_cond:\n non_cond_feat = self.process_cond_mlp(non_cond_feat_in)\n\n aux_info = {\n 'cond_feat': cond_feat, \n 'curr_states': curr_states,\n }\n if include_class_free_cond:\n aux_info['non_cond_feat'] = non_cond_feat\n if self.use_map_feat_grid and self.map_encoder is not None:\n aux_info['map_grid_feat'] = map_grid_feat\n if include_class_free_cond:\n aux_info['map_grid_feat_non_cond'] = map_grid_feat_non_cond\n aux_info['raster_from_agent'] = raster_from_agent\n\n return aux_info\n\n def query_map_feats(self, x, map_grid_feat, raster_from_agent):\n '''\n - x : (B, T, D)\n - map_grid_feat : (B, C, H, W)\n - raster_from_agent: (B, 3, 3)\n '''\n B, T, _ = x.size()\n _, C, Hfeat, Wfeat = map_grid_feat.size()\n\n # unscale to agent coords\n pos_traj = self.descale_traj(x.detach())[:,:,:2]\n # convert to raster frame\n raster_pos_traj = transform_points_tensor(pos_traj, raster_from_agent)\n\n # scale to the feature map size\n _, H, W = self.input_image_shape\n xscale = Wfeat / W\n yscale = Hfeat / H\n raster_pos_traj[:,:,0] = raster_pos_traj[:,:,0] * xscale\n raster_pos_traj[:,:,1] = raster_pos_traj[:,:,1] * yscale\n\n # interpolate into feature grid\n feats_out = query_feature_grid(\n raster_pos_traj,\n map_grid_feat\n )\n feats_out = feats_out.reshape((B, T, -1))\n return feats_out\n\n def get_state_and_action_from_data_batch(self, data_batch, chosen_inds=[]):\n '''\n Extract state and(or) action from the data_batch from data_batch\n\n Input:\n data_batch: dict\n Output:\n x: (batch_size, num_steps, len(chosen_inds)).\n '''\n if len(chosen_inds) == 0:\n chosen_inds = self.default_chosen_inds\n\n # NOTE: for predicted agent, history and future with always be fully available\n traj_state = torch.cat(\n (data_batch[\"target_positions\"], data_batch[\"target_yaws\"]), dim=2)\n\n traj_state_and_action = convert_state_to_state_and_action(traj_state, data_batch[\"curr_speed\"], self.dt)\n\n return traj_state_and_action[..., chosen_inds]\n \n def convert_action_to_state_and_action(self, x_out, aux_info, scaled_input=True, descaled_output=False):\n '''\n Apply dynamics on input action trajectory to get state+action trajectory\n Input:\n x_out: (batch_size, num_steps, 2). scaled action trajectory\n Output:\n x_out: (batch_size, num_steps, 6). scaled state+action trajectory\n '''\n if scaled_input:\n x_out = self.descale_traj(x_out, [4, 5])\n \n x_out_state = unicyle_forward_dynamics(\n dyn_model=self.dyn,\n initial_states=aux_info['curr_states'],\n actions=x_out,\n step_time=self.dt,\n mode='parallel'\n )\n\n x_out_all = torch.cat([x_out_state, x_out], dim=-1)\n if scaled_input and not descaled_output:\n x_out_all = self.scale_traj(x_out_all, [0, 1, 2, 3, 4, 5])\n\n return x_out_all\n\n def scale_traj(self, target_traj_orig, chosen_inds=[]):\n '''\n - traj: B x T x D\n '''\n if len(chosen_inds) == 0:\n chosen_inds = self.default_chosen_inds\n add_coeffs = self.add_coeffs[chosen_inds][None,None] \n div_coeffs = self.div_coeffs[chosen_inds][None,None] \n\n device = target_traj_orig.get_device()\n dx_add = torch.tensor(add_coeffs, device=device)\n dx_div = torch.tensor(div_coeffs, device=device)\n\n target_traj = (target_traj_orig + dx_add) / dx_div\n\n return target_traj\n\n def descale_traj(self, target_traj_orig, chosen_inds=[]):\n '''\n - traj: B x T x D\n '''\n if len(chosen_inds) == 0:\n chosen_inds = self.default_chosen_inds\n add_coeffs = self.add_coeffs[chosen_inds][None,None] \n div_coeffs = self.div_coeffs[chosen_inds][None,None] \n\n device = target_traj_orig.get_device()\n dx_add = torch.tensor(add_coeffs, device=device)\n dx_div = torch.tensor(div_coeffs, device=device) \n\n target_traj = target_traj_orig * dx_div - dx_add\n\n return target_traj\n\n \n def forward(self, data_batch: Dict[str, torch.Tensor], num_samp=1,\n return_diffusion=False,\n return_guidance_losses=False,\n class_free_guide_w=0.0,\n apply_guidance=True,\n guide_clean=False) -> Dict[str, torch.Tensor]:\n use_class_free_guide = class_free_guide_w != 0.0\n aux_info = self.get_aux_info(data_batch, use_class_free_guide)\n \n cond_samp_out = self.conditional_sample(data_batch, \n horizon=None,\n aux_info=aux_info,\n return_diffusion=return_diffusion,\n return_guidance_losses=return_guidance_losses,\n num_samp=num_samp,\n class_free_guide_w=class_free_guide_w,\n apply_guidance=apply_guidance,\n guide_clean=guide_clean)\n traj_init = cond_samp_out['pred_traj']\n diff_init = guide_losses = None\n if return_diffusion:\n diff_init = cond_samp_out['diffusion']\n if return_guidance_losses:\n guide_losses = cond_samp_out['guide_losses']\n\n traj = self.descale_traj(traj_init)\n if diff_init is not None:\n diff_steps = self.descale_traj(diff_init)\n else:\n diff_steps = None\n\n if self.diffuser_input_mode in ['state_and_action']:\n traj = traj[..., [0, 1, 3]]\n else:\n raise\n\n pred_positions = traj[..., :2]\n pred_yaws = traj[..., 2:3]\n\n out_dict = {\n \"trajectories\": traj,\n \"predictions\": {\"positions\": pred_positions, \"yaws\": pred_yaws},\n }\n if diff_steps is not None:\n out_dict[\"predictions\"][\"diffusion_steps\"] = diff_steps\n if guide_losses is not None:\n out_dict[\"predictions\"][\"guide_losses\"] = guide_losses\n if self.dyn is not None:\n out_dict[\"curr_states\"] = aux_info['curr_states']\n\n return out_dict\n\n def compute_losses(self, data_batch):\n aux_info = self.get_aux_info(data_batch)\n target_traj = self.get_state_and_action_from_data_batch(data_batch) \n\n x = self.scale_traj(target_traj)\n \n diffusion_loss, _ = self.loss(x, aux_info=aux_info)\n losses = OrderedDict(\n diffusion_loss=diffusion_loss,\n )\n return losses\n\n def get_loss_weights(self, action_weight, discount):\n '''\n sets loss coefficients for trajectory\n\n action_weight : float\n coefficient on first action loss\n discount : float\n multiplies t^th timestep of trajectory loss by discount**t\n '''\n self.action_weight = action_weight\n\n dim_weights = torch.ones(self.transition_dim, dtype=torch.float32)\n\n ## decay loss with trajectory timestep: discount**t\n discounts = discount ** torch.arange(self.horizon, dtype=torch.float)\n discounts = discounts / discounts.mean()\n loss_weights = torch.einsum('h,t->ht', discounts, dim_weights)\n ## manually set a0 weight\n loss_weights[0, -self.action_dim:] = action_weight\n\n return loss_weights\n\n #------------------------------------------ sampling ------------------------------------------#\n def predict_start_from_noise(self, x_t, t, noise, force_noise=False):\n if force_noise:\n return (\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -\n extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise\n )\n else:\n return noise\n\n def predict_noise_from_start(self, x_t, t, x_start):\n return (\n extract(self.sqrt_recip_one_minus_alphas_cumprod.to(x_t.device), t, x_t.shape) * x_t -\n extract(self.sqrt_alphas_over_one_minus_alphas_cumprod.to(x_t.device), t, x_t.shape) * x_start\n )\n\n def q_posterior(self, x_start, x_t, t):\n posterior_mean = (\n extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +\n extract(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_variance = extract(self.posterior_variance, t, x_t.shape)\n posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def p_mean_variance(self, x, t, aux_info={}, class_free_guide_w=0.0):\n t_inp = t\n\n x_model_in = x\n if self.use_map_feat_grid and self.map_encoder is not None:\n # get features from map and append to the trajectory\n map_feat_traj = self.query_map_feats(x_model_in.detach(),\n aux_info['map_grid_feat'],\n aux_info['raster_from_agent'])\n x_model_in = torch.cat([x_model_in, map_feat_traj], dim=-1)\n\n model_prediction = self.model(x_model_in, aux_info['cond_feat'], t_inp)\n\n if self.diffuser_input_mode == 'state_and_action':\n x_tmp = x[..., 4:].detach()\n else:\n raise\n\n if class_free_guide_w != 0.0:\n # now run non-cond once\n x_model_non_cond_in = x\n if self.use_map_feat_grid and self.map_encoder is not None:\n # get features from map and append to the trajectory\n map_feat_traj = self.query_map_feats(x_model_non_cond_in.detach(),\n aux_info['map_grid_feat_non_cond'],\n aux_info['raster_from_agent'])\n x_model_non_cond_in = torch.cat([x_model_non_cond_in, map_feat_traj], dim=-1)\n model_non_cond_prediction = self.model(x_model_non_cond_in, aux_info['non_cond_feat'], t_inp)\n\n # and combine to get actual model prediction (in noise space as in original paper)\n model_pred_noise = self.predict_noise_from_start(x_tmp, t=t, x_start=model_prediction)\n model_non_cond_pred_noise = self.predict_noise_from_start(x_tmp, t=t, x_start=model_non_cond_prediction)\n\n class_free_guide_noise = (1 + class_free_guide_w)*model_pred_noise - class_free_guide_w*model_non_cond_pred_noise\n\n model_prediction = self.predict_start_from_noise(x_tmp, t=t, noise=class_free_guide_noise, force_noise=True)\n\n x_recon = self.predict_start_from_noise(x_tmp, t=t, noise=model_prediction)\n \n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x_tmp, t=t)\n return model_mean, posterior_variance, posterior_log_variance, (x_recon, x_tmp, t)\n\n def guidance(self, x, data_batch, aux_info, num_samp=1,\n return_grad_of=None):\n '''\n estimate the gradient of rule reward w.r.t. the input trajectory\n Input:\n x: [batch_size*num_samp, time_steps, feature_dim]. scaled input trajectory.\n data_batch: additional info.\n aux_info: additional info.\n return_grad_of: which variable to take gradient of guidance loss wrt, if not given,\n takes wrt the input x.\n '''\n assert self.current_guidance is not None, 'Must instantiate guidance object before calling'\n bsize = int(x.size(0) / num_samp)\n num_t = x.size(1)\n with torch.enable_grad():\n # losses are applied on unscaled trajectories containing both states and actions\n if self.diffuser_input_mode in ['state_and_action']:\n # forward dynamics to get actions\n x_all = self.convert_action_to_state_and_action(x, aux_info, scaled_input=True, descaled_output=True)\n else:\n raise\n\n # compute losses and gradient\n x_loss = x_all.reshape((bsize, num_samp, num_t, 6))\n tot_loss, per_losses = self.current_guidance.compute_guidance_loss(x_loss, data_batch)\n # print(tot_loss)\n tot_loss.backward()\n guide_grad = x.grad if return_grad_of is None else return_grad_of.grad\n\n return guide_grad, per_losses\n\n @torch.no_grad()\n def p_sample(self, x, t, data_batch, aux_info={}, num_samp=1, class_free_guide_w=0.0, \n apply_guidance=True, guide_clean=False, eval_final_guide_loss=False):\n b, *_, device = *x.shape, x.device\n with_func = torch.no_grad\n if self.current_guidance is not None and apply_guidance and guide_clean:\n # will need to take grad wrt noisy input\n x = x.detach()\n x.requires_grad_()\n with_func = torch.enable_grad\n\n with with_func():\n # get prior mean and variance for next step\n model_mean, _, model_log_variance, q_posterior_in = self.p_mean_variance(x=x, t=t, aux_info=aux_info,\n class_free_guide_w=class_free_guide_w)\n\n # no noise or guidance when t == 0\n # i.e. use the mean of the distribution predicted at the final step rather than sampling.\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n noise = torch.randn_like(model_mean)\n sigma = (0.5 * model_log_variance).exp()\n\n # compute guidance\n guide_losses = None\n guide_grad = torch.zeros_like(model_mean)\n if self.current_guidance is not None and apply_guidance:\n if guide_clean:\n # want to guide the predicted clean traj from model, not the noisy one\n model_clean_pred = q_posterior_in[0]\n x_guidance = model_clean_pred\n return_grad_of = x\n else:\n x_guidance = model_mean.clone().detach()\n return_grad_of = x_guidance\n x_guidance.requires_grad_()\n\n guide_grad, guide_losses = self.guidance(x_guidance, data_batch, aux_info, num_samp=num_samp, return_grad_of=return_grad_of)\n\n if guide_clean and self.diffuser_input_mode == 'state_and_action':\n # only need the grad w.r.t noisy action\n guide_grad = guide_grad[..., [4,5]]\n\n # NOTE: empirally, scaling by the variance (sigma) seems to degrade results\n guide_grad = nonzero_mask * guide_grad #* sigma\n\n noise = nonzero_mask * sigma * noise\n\n if self.current_guidance is not None and guide_clean:\n # perturb clean trajectory\n guided_clean = q_posterior_in[0] - guide_grad\n # use the same noisy input again\n guided_x_t = q_posterior_in[1]\n # re-compute next step distribution with guided clean & noisy trajectories\n model_mean, _, _ = self.q_posterior(x_start=guided_clean,\n x_t=guided_x_t,\n t=q_posterior_in[2])\n # NOTE: variance is not dependent on x_start, so it won't change. Therefore, fine to use same noise.\n x_out = model_mean + noise\n else:\n x_out = model_mean - guide_grad + noise\n\n if self.current_guidance is not None and eval_final_guide_loss:\n # eval guidance loss one last time for filtering if desired\n # (even if not applied during sampling)\n _, guide_losses = self.guidance(x_out.clone().detach().requires_grad_(), data_batch, aux_info, num_samp=num_samp)\n \n # convert action to state+action\n if self.diffuser_input_mode == 'state_and_action':\n x_out = self.convert_action_to_state_and_action(x_out, aux_info)\n \n return x_out, guide_losses\n\n \n @torch.no_grad()\n def p_sample_loop(self, shape, data_batch, num_samp,\n aux_info={},\n return_diffusion=False,\n return_guidance_losses=False,\n class_free_guide_w=0.0,\n apply_guidance=True,\n guide_clean=False):\n device = self.betas.device\n\n batch_size = shape[0]\n # sample from base distribution\n x = torch.randn(shape, device=device) # (B, N, T, D)\n\n x = TensorUtils.join_dimensions(x, begin_axis=0, end_axis=2) # B*N, T, D\n aux_info = TensorUtils.repeat_by_expand_at(aux_info, repeats=num_samp, dim=0)\n\n if self.current_guidance is not None and not apply_guidance:\n print('DIFFUSER: Note, not using guidance during sampling, only evaluating guidance loss at very end...')\n\n # convert action to state+action\n if self.diffuser_input_mode == 'state_and_action':\n x = self.convert_action_to_state_and_action(x[..., [4, 5]], aux_info)\n\n if return_diffusion: diffusion = [x]\n\n stride = 1 # NOTE: different from training time if > 1\n steps = [i for i in reversed(range(0, self.n_timesteps, stride))]\n for i in steps:\n timesteps = torch.full((batch_size*num_samp,), i, device=device, dtype=torch.long)\n \n x, guide_losses = self.p_sample(x, timesteps, data_batch,\n aux_info=aux_info,\n num_samp=num_samp,\n class_free_guide_w=class_free_guide_w,\n apply_guidance=apply_guidance,\n guide_clean=guide_clean,\n eval_final_guide_loss=(i == steps[-1]))\n \n\n if return_diffusion: diffusion.append(x)\n\n if guide_losses is not None:\n print('===== GUIDANCE LOSSES ======')\n for k,v in guide_losses.items():\n print('%s: %.012f' % (k, np.nanmean(v.cpu())))\n\n x = TensorUtils.reshape_dimensions(x, begin_axis=0, end_axis=1, target_dims=(batch_size, num_samp))\n\n out_dict = {'pred_traj' : x}\n if return_guidance_losses:\n out_dict['guide_losses'] = guide_losses\n if return_diffusion:\n diffusion = [TensorUtils.reshape_dimensions(cur_diff, begin_axis=0, end_axis=1, target_dims=(batch_size, num_samp))\n for cur_diff in diffusion]\n out_dict['diffusion'] = torch.stack(diffusion, dim=3)\n\n return out_dict\n\n @torch.no_grad()\n def conditional_sample(self, data_batch, horizon=None, num_samp=1, class_free_guide_w=0.0, **kwargs):\n batch_size = data_batch['history_positions'].size()[0]\n horizon = horizon or self.horizon\n shape = (batch_size, num_samp, horizon, self.transition_dim)\n\n return self.p_sample_loop(shape, data_batch, num_samp, class_free_guide_w=class_free_guide_w, **kwargs)\n\n #------------------------------------------ training ------------------------------------------#\n\n def q_sample(self, x_start, t, noise): \n sample = (\n extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +\n extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise\n )\n return sample\n\n def p_losses(self, x_start_init, t, aux_info={}):\n noise_init = torch.randn_like(x_start_init)\n\n x_start = x_start_init\n noise = noise_init\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n t_inp = t\n\n if self.diffuser_input_mode == 'state_and_action':\n x_action_noisy = x_noisy[..., [4, 5]]\n x_noisy = self.convert_action_to_state_and_action(x_action_noisy, aux_info)\n\n if self.use_map_feat_grid and self.map_encoder is not None:\n # get features from map and append to the trajectory\n map_feat_traj = self.query_map_feats(x_noisy.detach(),\n aux_info['map_grid_feat'],\n aux_info['raster_from_agent'])\n x_noisy = torch.cat([x_noisy, map_feat_traj], dim=-1)\n\n noise = self.model(x_noisy, aux_info['cond_feat'], t_inp)\n\n if self.diffuser_input_mode == 'state_and_action':\n x_recon_action = self.predict_start_from_noise(x_action_noisy, t=t, noise=noise)\n x_recon = self.convert_action_to_state_and_action(x_recon_action, aux_info)\n else:\n x_recon = self.predict_start_from_noise(x_noisy, t=t, noise=noise)\n\n # Note: we convert noise into x_start for loss estimation since we need to apply forward dynamics\n loss, info = self.loss_fn(x_recon, x_start)\n\n return loss, info\n\n def loss(self, x, aux_info={}):\n batch_size = len(x)\n t = torch.randint(0, self.n_timesteps, (batch_size,), device=x.device).long()\n \n return self.p_losses(x, t, aux_info=aux_info)"
},
{
"identifier": "EMA",
"path": "tbsim/models/trace_helpers.py",
"snippet": "class EMA():\n '''\n empirical moving average\n '''\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n\n def update_model_average(self, ma_model, current_model):\n with torch.no_grad():\n ema_state_dict = ma_model.state_dict()\n for key, value in current_model.state_dict().items():\n ema_value = ema_state_dict[key]\n ema_value.copy_(self.beta * ema_value + (1. - self.beta) * value)"
},
{
"identifier": "choose_action_from_guidance",
"path": "tbsim/utils/guidance_loss.py",
"snippet": "def choose_action_from_guidance(preds, obs_dict, guide_configs, guide_losses):\n B, N, T, _ = preds[\"positions\"].size()\n # arbitrarily use the first sample as the action if no guidance given\n act_idx = torch.zeros((B), dtype=torch.long, device=preds[\"positions\"].device)\n # choose sample closest to desired guidance\n accum_guide_loss = torch.stack([v for k,v in guide_losses.items()], dim=2)\n # each scene separately since may contain different guidance\n scount = 0\n for sidx in range(len(guide_configs)):\n scene_guide_cfg = guide_configs[sidx]\n ends = scount + len(scene_guide_cfg)\n scene_guide_loss = accum_guide_loss[..., scount:ends]\n scount = ends\n scene_mask = ~torch.isnan(torch.sum(scene_guide_loss, dim=[1,2]))\n scene_guide_loss = scene_guide_loss[scene_mask].cpu()\n scene_guide_loss = torch.nansum(scene_guide_loss, dim=-1)\n is_scene_level = np.array([guide_cfg.name in ['agent_collision', 'social_group'] for guide_cfg in scene_guide_cfg])\n if np.sum(is_scene_level) > 0: \n # choose which sample minimizes at the scene level (where each sample is a \"scene\")\n scene_act_idx = torch.argmin(torch.sum(scene_guide_loss, dim=0))\n else:\n # each agent can choose the sample that minimizes guidance loss independently\n scene_act_idx = torch.argmin(scene_guide_loss, dim=-1)\n\n act_idx[scene_mask] = scene_act_idx.to(act_idx.device)\n\n return act_idx"
},
{
"identifier": "choose_action_from_gt",
"path": "tbsim/utils/guidance_loss.py",
"snippet": "def choose_action_from_gt(preds, obs_dict):\n B, N, T, _ = preds[\"positions\"].size()\n # arbitrarily use the first sample as the action if no gt given\n act_idx = torch.zeros((B), dtype=torch.long, device=preds[\"positions\"].device)\n if \"target_positions\" in obs_dict:\n print(\"DIFFUSER: WARNING using sample closest to GT from diffusion model!\")\n # use the sample closest to GT\n # pred and gt may not be the same if gt is missing data at the end\n endT = min(T, obs_dict[\"target_positions\"].size(1))\n pred_pos = preds[\"positions\"][:,:,:endT]\n gt_pos = obs_dict[\"target_positions\"][:,:endT].unsqueeze(1)\n gt_valid = obs_dict[\"target_availabilities\"][:,:endT].unsqueeze(1).expand((B, N, endT))\n err = torch.norm(pred_pos - gt_pos, dim=-1)\n err[~gt_valid] = torch.nan # so doesn't affect\n ade = torch.nanmean(err, dim=-1) # B x N\n res_valid = torch.sum(torch.isnan(ade), dim=-1) == 0\n if torch.sum(res_valid) > 0:\n min_ade_idx = torch.argmin(ade, dim=-1)\n act_idx[res_valid] = min_ade_idx[res_valid]\n else:\n print('Could not choose sample based on GT, as no GT in data')\n\n return act_idx"
}
] | import numpy as np
import copy
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
import torch.nn.functional as F
import tbsim.utils.tensor_utils as TensorUtils
import tbsim.utils.metrics as Metrics
from tbsim.utils.batch_utils import batch_utils
from tbsim.policies.common import Action
from tbsim.models.trace import DiffuserModel
from tbsim.models.trace_helpers import EMA
from tbsim.utils.guidance_loss import choose_action_from_guidance, choose_action_from_gt | 10,371 |
class DiffuserTrafficModel(pl.LightningModule):
def __init__(self, algo_config, modality_shapes, guidance_config=None):
"""
Creates networks and places them into @self.nets.
"""
super(DiffuserTrafficModel, self).__init__()
self.algo_config = algo_config
self.nets = nn.ModuleDict()
if algo_config.diffuser_input_mode == 'state_and_action':
# "Observations" are inputs to diffuser that are not outputs
observation_dim = 4 # x, y, vel, yaw
# "Actions" are inputs and outputs
action_dim = 2 # acc, yawvel
# "output" is final output of the entired denoising process
output_dim = 2 # acc, yawvel
else:
raise
self.cond_drop_map_p = algo_config.conditioning_drop_map_p
self.cond_drop_neighbor_p = algo_config.conditioning_drop_neighbor_p
min_cond_drop_p = min([self.cond_drop_map_p, self.cond_drop_neighbor_p])
max_cond_drop_p = max([self.cond_drop_map_p, self.cond_drop_neighbor_p])
assert min_cond_drop_p >= 0.0 and max_cond_drop_p <= 1.0
self.use_cond = self.cond_drop_map_p < 1.0 and self.cond_drop_neighbor_p < 1.0 # no need for conditioning arch if always dropping
self.cond_fill_val = algo_config.conditioning_drop_fill
self.use_rasterized_map = algo_config.rasterized_map
if self.use_cond:
if self.cond_drop_map_p > 0:
print('DIFFUSER: Dropping map input conditioning with p = %f during training...' % (self.cond_drop_map_p))
if self.cond_drop_neighbor_p > 0:
print('DIFFUSER: Dropping neighbor traj input conditioning with p = %f during training...' % (self.cond_drop_neighbor_p))
self.nets["policy"] = DiffuserModel(
rasterized_map=algo_config.rasterized_map,
use_map_feat_global=algo_config.use_map_feat_global,
use_map_feat_grid=algo_config.use_map_feat_grid,
map_encoder_model_arch=algo_config.map_encoder_model_arch,
input_image_shape=modality_shapes["image"], # [C, H, W]
map_feature_dim=algo_config.map_feature_dim,
map_grid_feature_dim=algo_config.map_grid_feature_dim,
hist_num_frames=algo_config.history_num_frames+1, # the current step is concat to the history
hist_feature_dim=algo_config.history_feature_dim,
cond_feature_dim=algo_config.cond_feat_dim,
diffuser_model_arch=algo_config.diffuser_model_arch,
horizon=algo_config.horizon,
observation_dim=observation_dim,
action_dim=action_dim,
output_dim=output_dim,
n_timesteps=algo_config.n_diffusion_steps,
loss_type=algo_config.loss_type,
action_weight=algo_config.action_weight,
loss_discount=algo_config.loss_discount,
dim_mults=algo_config.dim_mults,
dynamics_type=algo_config.dynamics.type,
dynamics_kwargs=algo_config.dynamics,
base_dim=algo_config.base_dim,
diffuser_input_mode=algo_config.diffuser_input_mode,
use_conditioning=self.use_cond,
cond_fill_value=self.cond_fill_val,
diffuser_norm_info=algo_config.diffuser_norm_info,
agent_hist_norm_info=algo_config.agent_hist_norm_info,
neighbor_hist_norm_info=algo_config.neighbor_hist_norm_info,
dt=algo_config.step_time,
)
# set up initial guidance
if guidance_config is not None:
self.set_guidance(guidance_config)
# set up EMA
self.use_ema = algo_config.use_ema
if self.use_ema:
print('DIFFUSER: using EMA... val and get_action will use ema model')
|
class DiffuserTrafficModel(pl.LightningModule):
def __init__(self, algo_config, modality_shapes, guidance_config=None):
"""
Creates networks and places them into @self.nets.
"""
super(DiffuserTrafficModel, self).__init__()
self.algo_config = algo_config
self.nets = nn.ModuleDict()
if algo_config.diffuser_input_mode == 'state_and_action':
# "Observations" are inputs to diffuser that are not outputs
observation_dim = 4 # x, y, vel, yaw
# "Actions" are inputs and outputs
action_dim = 2 # acc, yawvel
# "output" is final output of the entired denoising process
output_dim = 2 # acc, yawvel
else:
raise
self.cond_drop_map_p = algo_config.conditioning_drop_map_p
self.cond_drop_neighbor_p = algo_config.conditioning_drop_neighbor_p
min_cond_drop_p = min([self.cond_drop_map_p, self.cond_drop_neighbor_p])
max_cond_drop_p = max([self.cond_drop_map_p, self.cond_drop_neighbor_p])
assert min_cond_drop_p >= 0.0 and max_cond_drop_p <= 1.0
self.use_cond = self.cond_drop_map_p < 1.0 and self.cond_drop_neighbor_p < 1.0 # no need for conditioning arch if always dropping
self.cond_fill_val = algo_config.conditioning_drop_fill
self.use_rasterized_map = algo_config.rasterized_map
if self.use_cond:
if self.cond_drop_map_p > 0:
print('DIFFUSER: Dropping map input conditioning with p = %f during training...' % (self.cond_drop_map_p))
if self.cond_drop_neighbor_p > 0:
print('DIFFUSER: Dropping neighbor traj input conditioning with p = %f during training...' % (self.cond_drop_neighbor_p))
self.nets["policy"] = DiffuserModel(
rasterized_map=algo_config.rasterized_map,
use_map_feat_global=algo_config.use_map_feat_global,
use_map_feat_grid=algo_config.use_map_feat_grid,
map_encoder_model_arch=algo_config.map_encoder_model_arch,
input_image_shape=modality_shapes["image"], # [C, H, W]
map_feature_dim=algo_config.map_feature_dim,
map_grid_feature_dim=algo_config.map_grid_feature_dim,
hist_num_frames=algo_config.history_num_frames+1, # the current step is concat to the history
hist_feature_dim=algo_config.history_feature_dim,
cond_feature_dim=algo_config.cond_feat_dim,
diffuser_model_arch=algo_config.diffuser_model_arch,
horizon=algo_config.horizon,
observation_dim=observation_dim,
action_dim=action_dim,
output_dim=output_dim,
n_timesteps=algo_config.n_diffusion_steps,
loss_type=algo_config.loss_type,
action_weight=algo_config.action_weight,
loss_discount=algo_config.loss_discount,
dim_mults=algo_config.dim_mults,
dynamics_type=algo_config.dynamics.type,
dynamics_kwargs=algo_config.dynamics,
base_dim=algo_config.base_dim,
diffuser_input_mode=algo_config.diffuser_input_mode,
use_conditioning=self.use_cond,
cond_fill_value=self.cond_fill_val,
diffuser_norm_info=algo_config.diffuser_norm_info,
agent_hist_norm_info=algo_config.agent_hist_norm_info,
neighbor_hist_norm_info=algo_config.neighbor_hist_norm_info,
dt=algo_config.step_time,
)
# set up initial guidance
if guidance_config is not None:
self.set_guidance(guidance_config)
# set up EMA
self.use_ema = algo_config.use_ema
if self.use_ema:
print('DIFFUSER: using EMA... val and get_action will use ema model') | self.ema = EMA(algo_config.ema_decay) | 3 | 2023-10-31 18:43:07+00:00 | 12k |
nv-tlabs/pacer | uhc/smpllib/np_smpl_humanoid_batch.py | [
{
"identifier": "dict_to_torch",
"path": "uhc/utils/torch_ext.py",
"snippet": "def dict_to_torch(input_dict, dtype = None, device = None, add_dim = False):\n if not isinstance(input_dict, dict):\n return None\n out_dict = {}\n for key, value in input_dict.items():\n if isinstance(value, np.ndarray):\n value = torch.from_numpy(value)\n else:\n pass\n\n if torch.is_tensor(value):\n if dtype is not None:\n value = value.type(dtype)\n if device is not None:\n value = value.to(device)\n if add_dim:\n value = value[None, ]\n\n out_dict[key] = value\n\n return out_dict"
},
{
"identifier": "SMPLConverter",
"path": "uhc/smpllib/smpl_mujoco.py",
"snippet": "class SMPLConverter:\nclass SMPL_M_Renderer(object):\nclass SMPL_M_Viewer(object):\n def __init__(self, model, new_model, smpl_model=\"smpl\"):\n def qpos_smpl_2_new(self, qpos):\n def qvel_smpl_2_new(self, qpvel):\n def qpos_new_2_smpl(self, qpos):\n def qvel_new_2_smpl(self, qvel):\n def jpos_new_2_smpl(self, jpos):\n def get_new_qpos_lim(self):\n def get_new_qvel_lim(self):\n def get_new_body_lim(self):\n def get_new_diff_weight(self):\n def get_new_jkp(self):\n def get_new_jkd(self):\n def get_new_a_scale(self):\n def get_new_torque_limit(self):\n def __init__(\n self,\n model_file=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/humanoid_smpl_neutral_mesh.xml\",\n render_size=(960, 480),\n ):\n def render_smpl(\n self,\n body_pose,\n tran=None,\n output_name=None,\n size=(960, 480),\n frame_rate=30,\n add_text=None,\n offset_z=0,\n ):\n def render_qpose_and_write(\n self,\n qpos,\n output_name=None,\n size=(960, 480),\n frame_rate=30,\n add_text=None,\n offset_z=0,\n follow=False,\n ):\n def render_qpose(\n self,\n qpose,\n size=(960, 480),\n frame_rate=30,\n add_text=None,\n offset_z=0,\n follow=False,\n ):\n def show_pose(self, size=(960, 480), loop=False):\n def set_smpl_pose(self, pose, tran=None, offset_z=0):\n def set_smpl_pose_6d(self, full_pose, tran=None, offset_z=0):\n def set_qpose(self, qpose):\n def show_pose_thread(self, return_img=False):\n def __init__(\n self,\n model_file=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/humanoid_smpl_neutral_mesh.xml\",\n render_size=(960, 480),\n ):\n def render_qpose(self, qpose, follow=False):\n def show_pose(self, return_img=False, size=(1920, 1080), loop=False):\n def show_pose_in_thread(self, return_img=False, size=(1920, 1080)):\n def show_pose_thread(self, return_img=False):\n def set_smpl_pose(self, pose, trans=None, offset_z=0):\n def set_smpl_pose_6d(self, full_pose, offset_z=0):\n def set_qpose(self, qpose):\ndef smplh_to_smpl(pose):\ndef smpl_to_smplh(pose):\ndef smpl_to_qpose(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_multi(\n pose,\n offset,\n mujoco_body_order,\n num_people=1,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_torch(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef qpos_to_smpl(qpos, mj_model, smpl_model=\"smpl\"):\ndef qpos_to_smpl_torch(qpos, mj_model, smpl_model=\"smpl\"):\ndef smpl_6d_to_qpose(full_pose, model, normalize=False):\ndef normalize_smpl_pose(pose_aa, trans=None, random_root=False):"
},
{
"identifier": "SMPL_EE_NAMES",
"path": "uhc/smpllib/smpl_parser.py",
"snippet": "SMPL_EE_NAMES = [\"L_Ankle\", \"R_Ankle\", \"L_Wrist\", \"R_Wrist\", \"Head\"]"
},
{
"identifier": "get_expert",
"path": "uhc/utils/tools.py",
"snippet": "def get_expert(expert_qpos, expert_meta, env):\n old_state = env.sim.get_state()\n expert = defaultdict(list)\n expert[\"qpos\"] = expert_qpos\n expert[\"meta\"] = expert_meta\n feat_keys = {\n \"qvel\",\n \"rlinv\",\n \"rlinv_local\",\n \"rangv\",\n \"rq_rmh\",\n \"com\",\n \"body_com\",\n \"head_pose\",\n \"ee_pos\",\n \"ee_wpos\",\n \"bquat\",\n \"bangvel\",\n \"wbpos\",\n \"wbquat\",\n }\n\n for i in range(expert_qpos.shape[0]):\n qpos = expert_qpos[i]\n env.data.qpos[:76] = qpos\n env.sim.forward()\n rq_rmh = de_heading(qpos[3:7])\n ee_pos = env.get_ee_pos(env.cc_cfg.obs_coord)\n wbpos = env.get_wbody_pos()\n wbquat = env.get_wbody_quat()\n\n ee_wpos = env.get_ee_pos(None)\n bquat = env.get_body_quat() # current pose (body) in quaternion\n com = env.get_com().copy()\n head_pose = env.get_head().copy()\n body_com = env.get_body_com()\n\n if i > 0:\n prev_qpos = expert_qpos[i - 1]\n qvel = get_qvel_fd_new(prev_qpos, qpos, env.dt)\n qvel = qvel.clip(-10.0, 10.0)\n rlinv = qvel[:3].copy()\n rlinv_local = transform_vec(\n qvel[:3].copy(), qpos[3:7], env.cc_cfg.obs_coord\n )\n rangv = qvel[3:6].copy()\n expert[\"qvel\"].append(qvel)\n expert[\"rlinv\"].append(rlinv)\n expert[\"rlinv_local\"].append(rlinv_local)\n expert[\"rangv\"].append(rangv)\n\n expert[\"wbquat\"].append(wbquat)\n expert[\"wbpos\"].append(wbpos)\n expert[\"ee_pos\"].append(ee_pos)\n expert[\"ee_wpos\"].append(ee_wpos)\n expert[\"bquat\"].append(bquat)\n expert[\"com\"].append(com)\n expert[\"body_com\"].append(body_com)\n expert[\"head_pose\"].append(head_pose)\n expert[\"rq_rmh\"].append(rq_rmh)\n\n expert[\"qvel\"].insert(0, expert[\"qvel\"][0].copy())\n expert[\"rlinv\"].insert(0, expert[\"rlinv\"][0].copy())\n expert[\"rlinv_local\"].insert(0, expert[\"rlinv_local\"][0].copy())\n expert[\"rangv\"].insert(0, expert[\"rangv\"][0].copy())\n # get expert body quaternions\n for i in range(1, expert_qpos.shape[0]):\n bangvel = get_angvel_fd(expert[\"bquat\"][i - 1], expert[\"bquat\"][i], env.dt)\n expert[\"bangvel\"].append(bangvel)\n expert[\"bangvel\"].insert(0, expert[\"bangvel\"][0].copy())\n\n for key in feat_keys:\n expert[key] = np.vstack(expert[key])\n\n expert[\"len\"] = expert[\"qpos\"].shape[0]\n expert[\"height_lb\"] = expert[\"qpos\"][:, 2].min()\n expert[\"head_height_lb\"] = expert[\"head_pose\"][:, 2].min()\n if expert_meta[\"cyclic\"]:\n expert[\"init_heading\"] = get_heading_q(expert_qpos[0, 3:7])\n expert[\"init_pos\"] = expert_qpos[0, :3].copy()\n env.sim.set_state(old_state)\n env.sim.forward()\n return expert"
},
{
"identifier": "get_expert_master",
"path": "uhc/utils/tools.py",
"snippet": "def get_expert_master(expert_qpos, expert_meta, env):\n old_state = env.sim.get_state()\n expert = defaultdict(list)\n expert_qpos = env.converter.qpos_smpl_2_new(expert_qpos)\n expert[\"qpos\"] = expert_qpos\n expert[\"meta\"] = expert_meta\n feat_keys = {\n \"qvel\",\n \"rlinv\",\n \"rlinv_local\",\n \"rangv\",\n \"rq_rmh\",\n \"com\",\n \"body_com\",\n \"head_pose\",\n \"ee_pos\",\n \"ee_wpos\",\n \"bquat\",\n \"bangvel\",\n \"wbpos\",\n \"wbquat\",\n }\n for i in range(expert_qpos.shape[0]):\n qpos = expert_qpos[i]\n env.data.qpos[: env.qpos_lim] = qpos\n env.sim.forward()\n rq_rmh = de_heading(qpos[3:7])\n ee_pos = env.get_ee_pos(env.cc_cfg.obs_coord)\n wbpos = env.get_wbody_pos()\n wbquat = env.get_wbody_quat()\n\n ee_wpos = env.get_ee_pos(None)\n bquat = env.get_body_quat() # current pose (body) in quaternion\n com = env.get_com()\n head_pose = env.get_head().copy()\n body_com = env.get_body_com()\n\n if i > 0:\n prev_qpos = expert_qpos[i - 1]\n qvel = get_qvel_fd_new(prev_qpos, qpos, env.dt)\n qvel = qvel.clip(-10.0, 10.0)\n rlinv = qvel[:3].copy()\n rlinv_local = transform_vec(\n qvel[:3].copy(), qpos[3:7], env.cc_cfg.obs_coord\n )\n rangv = qvel[3:6].copy()\n expert[\"qvel\"].append(qvel)\n expert[\"rlinv\"].append(rlinv)\n expert[\"rlinv_local\"].append(rlinv_local)\n expert[\"rangv\"].append(rangv)\n\n expert[\"wbquat\"].append(wbquat)\n expert[\"wbpos\"].append(wbpos)\n expert[\"ee_pos\"].append(ee_pos)\n expert[\"ee_wpos\"].append(ee_wpos)\n expert[\"bquat\"].append(bquat)\n expert[\"com\"].append(com)\n expert[\"body_com\"].append(body_com)\n expert[\"head_pose\"].append(head_pose)\n expert[\"rq_rmh\"].append(rq_rmh)\n\n expert[\"qvel\"].insert(0, expert[\"qvel\"][0].copy())\n expert[\"rlinv\"].insert(0, expert[\"rlinv\"][0].copy())\n expert[\"rlinv_local\"].insert(0, expert[\"rlinv_local\"][0].copy())\n expert[\"rangv\"].insert(0, expert[\"rangv\"][0].copy())\n # get expert body quaternions\n for i in range(1, expert_qpos.shape[0]):\n bangvel = get_angvel_fd(expert[\"bquat\"][i - 1], expert[\"bquat\"][i], env.dt)\n expert[\"bangvel\"].append(bangvel)\n expert[\"bangvel\"].insert(0, expert[\"bangvel\"][0].copy())\n\n for key in feat_keys:\n expert[key] = np.vstack(expert[key])\n\n expert[\"len\"] = expert[\"qpos\"].shape[0]\n expert[\"height_lb\"] = expert[\"qpos\"][:, 2].min()\n expert[\"head_height_lb\"] = expert[\"head_pose\"][:, 2].min()\n if expert_meta[\"cyclic\"]:\n expert[\"init_heading\"] = get_heading_q(expert_qpos[0, 3:7])\n expert[\"init_pos\"] = expert_qpos[0, :3].copy()\n env.sim.set_state(old_state)\n env.sim.forward()\n return expert"
},
{
"identifier": "SMPL_Parser",
"path": "uhc/smpllib/smpl_parser.py",
"snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n self.joint_range[\"L_Shoulder\"] *= 4\n self.joint_range[\"R_Shoulder\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n\n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n parents_dict = {\n joint_names[i]: joint_names[parents[i]]\n for i in range(len(joint_names))\n }\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names,\n joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self,\n zero_pose=None,\n betas=torch.zeros(1, 10),\n flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(\n betas.shape[0], 1),\n th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {\n joint_names[c]:\n (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:,\n c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )"
},
{
"identifier": "SMPLH_Parser",
"path": "uhc/smpllib/smpl_parser.py",
"snippet": "class SMPLH_Parser(_SMPLH):\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )"
},
{
"identifier": "SMPLX_Parser",
"path": "uhc/smpllib/smpl_parser.py",
"snippet": "class SMPLX_Parser(_SMPLX):\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [\n SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES\n ]\n self.parents_to_use = np.concatenate(\n [np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n\n def get_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n if joint_names[c] in self.joint_names\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n if joint_names[i] in self.joint_names\n }\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )"
}
] | import torch
import glob
import os
import sys
import pdb
import os.path as osp
import joblib
import pytorch3d.transforms as tR
import autograd.numpy as np
import time
import ipdb
from uhc.utils.torch_ext import dict_to_torch
from uhc.utils.torch_utils import *
from uhc.utils.transform_utils import *
from scipy.spatial.transform import Rotation as sRot
from uhc.smpllib.smpl_mujoco import SMPLConverter, smpl_to_qpose, smpl_to_qpose_torch, SMPL_BONE_ORDER_NAMES
from uhc.smpllib.smpl_parser import SMPL_EE_NAMES
from uhc.utils.tools import get_expert, get_expert_master
from uhc.smpllib.smpl_parser import (
SMPL_Parser,
SMPLH_Parser,
SMPLX_Parser,
)
from autograd import elementwise_grad as egrad
from uhc.smpllib.smpl_robot import Robot
from uhc.smpllib.torch_smpl_humanoid import Humanoid
from uhc.utils.config_utils.copycat_config import Config
from uhc.data_loaders.dataset_amass_single import DatasetAMASSSingle
from uhc.utils.torch_ext import dict_to_torch
from uhc.smpllib.smpl_mujoco import smpl_to_qpose_torch, smplh_to_smpl | 9,512 | # import numpy as np
sys.path.append(os.getcwd())
def smpl_op_to_op(pred_joints2d):
new_2d = np.concatenate([pred_joints2d[..., [1, 4], :].mean(axis = -2, keepdims = True), \
pred_joints2d[..., 1:7, :], \
pred_joints2d[..., [7, 8, 11], :].mean(axis = -2, keepdims = True), \
pred_joints2d[..., 9:11, :], \
pred_joints2d[..., 12:, :]], \
axis = -2)
return new_2d
def normalize_screen_coordinates(X, w=1920, h=1080):
assert X.shape[-1] == 2
# Normalize so that [0, w] is mapped to
# [-1, 1], while preserving the aspect ratio
return X / w * 2 - np.array([1, h / w])
def rodrigues(r):
"""
Rodrigues' rotation formula that turns axis-angle vector into rotation
matrix in a batch-ed manner.
Parameter:
----------
r: Axis-angle rotation vector of shape [batch_size, 1, 3].
Return:
-------
Rotation matrix of shape [batch_size, 3, 3].
"""
theta = np.linalg.norm(r, axis=(1, 2))[:, None, None]
# avoid zero divide
theta = np.maximum(theta, np.finfo(r.dtype).eps)
r_hat = r / theta
cos = np.cos(theta)
z_stick = np.zeros(theta.shape[0])
m = np.stack([
z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
-r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick
],
axis=1).reshape([-1, 3, 3])
i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
[theta.shape[0], 3, 3])
A = np.transpose(r_hat, axes=[0, 2, 1])
B = r_hat
dot = np.matmul(A, B)
R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m
return R
def rodrigues_vec_to_rotation_mat(rot):
theta = np.linalg.norm(rot, axis=0)
if theta < sys.float_info.epsilon:
rotation_mat = np.eye(3, dtype=float)
else:
rot = rot / theta
I = np.eye(3, dtype=float)
r_rT = np.array([[rot[0] * rot[0], rot[0] * rot[1], rot[0] * rot[2]],
[rot[1] * rot[0], rot[1] * rot[1], rot[1] * rot[2]],
[rot[2] * rot[0], rot[2] * rot[1], rot[2] * rot[2]]])
r_cross = np.array([[0, -rot[2], rot[1]], [rot[2], 0, -rot[0]],
[-rot[1], rot[0], 0]])
rotation_mat = np.cos(theta) * I + (
1 - np.cos(theta)) * r_rT + np.sin(theta) * r_cross
return rotation_mat
class Humanoid_Batch:
def __init__(self, smpl_model="smpl", data_dir="data/smpl"):
self.smpl_model = smpl_model
if self.smpl_model == "smpl":
self.smpl_parser_n = SMPL_Parser(model_path=data_dir,
gender="neutral")
self.smpl_parser_m = SMPL_Parser(model_path=data_dir,
gender="male")
self.smpl_parser_f = SMPL_Parser(model_path=data_dir,
gender="female")
elif self.smpl_model == "smplh":
self.smpl_parser_n = SMPLH_Parser(
model_path=data_dir,
gender="neutral",
use_pca=False,
create_transl=False,
)
self.smpl_parser_m = SMPLH_Parser(model_path=data_dir,
gender="male",
use_pca=False,
create_transl=False)
self.smpl_parser_f = SMPLH_Parser(model_path=data_dir,
gender="female",
use_pca=False,
create_transl=False)
elif self.smpl_model == "smplx":
| # import numpy as np
sys.path.append(os.getcwd())
def smpl_op_to_op(pred_joints2d):
new_2d = np.concatenate([pred_joints2d[..., [1, 4], :].mean(axis = -2, keepdims = True), \
pred_joints2d[..., 1:7, :], \
pred_joints2d[..., [7, 8, 11], :].mean(axis = -2, keepdims = True), \
pred_joints2d[..., 9:11, :], \
pred_joints2d[..., 12:, :]], \
axis = -2)
return new_2d
def normalize_screen_coordinates(X, w=1920, h=1080):
assert X.shape[-1] == 2
# Normalize so that [0, w] is mapped to
# [-1, 1], while preserving the aspect ratio
return X / w * 2 - np.array([1, h / w])
def rodrigues(r):
"""
Rodrigues' rotation formula that turns axis-angle vector into rotation
matrix in a batch-ed manner.
Parameter:
----------
r: Axis-angle rotation vector of shape [batch_size, 1, 3].
Return:
-------
Rotation matrix of shape [batch_size, 3, 3].
"""
theta = np.linalg.norm(r, axis=(1, 2))[:, None, None]
# avoid zero divide
theta = np.maximum(theta, np.finfo(r.dtype).eps)
r_hat = r / theta
cos = np.cos(theta)
z_stick = np.zeros(theta.shape[0])
m = np.stack([
z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
-r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick
],
axis=1).reshape([-1, 3, 3])
i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
[theta.shape[0], 3, 3])
A = np.transpose(r_hat, axes=[0, 2, 1])
B = r_hat
dot = np.matmul(A, B)
R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m
return R
def rodrigues_vec_to_rotation_mat(rot):
theta = np.linalg.norm(rot, axis=0)
if theta < sys.float_info.epsilon:
rotation_mat = np.eye(3, dtype=float)
else:
rot = rot / theta
I = np.eye(3, dtype=float)
r_rT = np.array([[rot[0] * rot[0], rot[0] * rot[1], rot[0] * rot[2]],
[rot[1] * rot[0], rot[1] * rot[1], rot[1] * rot[2]],
[rot[2] * rot[0], rot[2] * rot[1], rot[2] * rot[2]]])
r_cross = np.array([[0, -rot[2], rot[1]], [rot[2], 0, -rot[0]],
[-rot[1], rot[0], 0]])
rotation_mat = np.cos(theta) * I + (
1 - np.cos(theta)) * r_rT + np.sin(theta) * r_cross
return rotation_mat
class Humanoid_Batch:
def __init__(self, smpl_model="smpl", data_dir="data/smpl"):
self.smpl_model = smpl_model
if self.smpl_model == "smpl":
self.smpl_parser_n = SMPL_Parser(model_path=data_dir,
gender="neutral")
self.smpl_parser_m = SMPL_Parser(model_path=data_dir,
gender="male")
self.smpl_parser_f = SMPL_Parser(model_path=data_dir,
gender="female")
elif self.smpl_model == "smplh":
self.smpl_parser_n = SMPLH_Parser(
model_path=data_dir,
gender="neutral",
use_pca=False,
create_transl=False,
)
self.smpl_parser_m = SMPLH_Parser(model_path=data_dir,
gender="male",
use_pca=False,
create_transl=False)
self.smpl_parser_f = SMPLH_Parser(model_path=data_dir,
gender="female",
use_pca=False,
create_transl=False)
elif self.smpl_model == "smplx": | self.smpl_parser_n = SMPLX_Parser( | 7 | 2023-10-31 20:47:12+00:00 | 12k |
Improbable-AI/dexenv | dexenv/envs/dclaw_base.py | [
{
"identifier": "VecTask",
"path": "dexenv/envs/base/vec_task.py",
"snippet": "class VecTask(Env):\n\n def __init__(self, config, sim_device, rl_device, graphics_device_id, headless):\n \"\"\"Initialise the `VecTask`.\n Args:\n config: config dictionary for the environment.\n sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu'\n graphics_device_id: the device ID to render with.\n headless: Set to False to disable viewer rendering.\n \"\"\"\n super().__init__(config, sim_device, rl_device, graphics_device_id, headless)\n\n self.sim_params = self.__parse_sim_params(self.cfg[\"physics_engine\"], self.cfg[\"sim\"])\n if self.cfg[\"physics_engine\"] == \"physx\":\n self.physics_engine = gymapi.SIM_PHYSX\n elif self.cfg[\"physics_engine\"] == \"flex\":\n self.physics_engine = gymapi.SIM_FLEX\n else:\n msg = f\"Invalid physics engine backend: {self.cfg['physics_engine']}\"\n raise ValueError(msg)\n\n # optimization flags for pytorch JIT\n torch._C._jit_set_profiling_mode(False)\n torch._C._jit_set_profiling_executor(False)\n\n self.gym = gymapi.acquire_gym()\n\n self.first_randomization = True\n self.original_props = {}\n self.dr_randomizations = {}\n self.actor_params_generator = None\n self.extern_actor_params = {}\n self.last_step = -1\n self.last_rand_step = -1\n for env_id in range(self.num_envs):\n self.extern_actor_params[env_id] = None\n\n # create envs, sim and viewer\n self.sim_initialized = False\n self.create_sim()\n self.gym.prepare_sim(self.sim)\n self.sim_initialized = True\n\n self.set_viewer()\n self.allocate_buffers()\n\n self.obs_dict = {}\n\n def set_viewer(self):\n \"\"\"Create the viewer.\"\"\"\n\n # todo: read from config\n self.enable_viewer_sync = True\n self.viewer = None\n\n # if running with a viewer, set up keyboard shortcuts and camera\n if self.headless == False:\n # subscribe to keyboard shortcuts\n self.viewer = self.gym.create_viewer(\n self.sim, gymapi.CameraProperties())\n self.gym.subscribe_viewer_keyboard_event(\n self.viewer, gymapi.KEY_ESCAPE, \"QUIT\")\n self.gym.subscribe_viewer_keyboard_event(\n self.viewer, gymapi.KEY_V, \"toggle_viewer_sync\")\n\n # set the camera position based on up axis\n sim_params = self.gym.get_sim_params(self.sim)\n if sim_params.up_axis == gymapi.UP_AXIS_Z:\n cam_pos = gymapi.Vec3(20.0, 25.0, 3.0)\n cam_target = gymapi.Vec3(10.0, 15.0, 0.0)\n else:\n cam_pos = gymapi.Vec3(20.0, 3.0, 25.0)\n cam_target = gymapi.Vec3(10.0, 0.0, 15.0)\n\n self.gym.viewer_camera_look_at(\n self.viewer, None, cam_pos, cam_target)\n\n def allocate_buffers(self):\n \"\"\"Allocate the observation, states, etc. buffers.\n These are what is used to set observations and states in the environment classes which\n inherit from this one, and are read in `step` and other related functions.\n \"\"\"\n\n # allocate buffers\n self.allocate_ob_buffers()\n self.rew_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.float)\n self.done_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.reset_buf = torch.ones(\n self.num_envs, device=self.device, dtype=torch.long)\n self.timeout_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.progress_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.randomize_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.extras = {}\n\n def allocate_ob_buffers(self):\n self.obs_buf = torch.zeros(\n (self.num_envs, self.num_obs), device=self.device, dtype=torch.float)\n self.states_buf = torch.zeros(\n (self.num_envs, self.num_states), device=self.device, dtype=torch.float)\n\n #\n def set_sim_params_up_axis(self, sim_params: gymapi.SimParams, axis: str) -> int:\n \"\"\"Set gravity based on up axis and return axis index.\n Args:\n sim_params: sim params to modify the axis for.\n axis: axis to set sim params for.\n Returns:\n axis index for up axis.\n \"\"\"\n if axis == 'z':\n sim_params.up_axis = gymapi.UP_AXIS_Z\n sim_params.gravity.x = 0\n sim_params.gravity.y = 0\n sim_params.gravity.z = -9.81\n return 2\n return 1\n\n def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams):\n \"\"\"Create an Isaac Gym sim object.\n Args:\n compute_device: ID of compute device to use.\n graphics_device: ID of graphics device to use.\n physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`)\n sim_params: sim params to use.\n Returns:\n the Isaac Gym sim object.\n \"\"\"\n sim = self.gym.create_sim(compute_device, graphics_device, physics_engine, sim_params)\n if sim is None:\n print(\"*** Failed to create sim\")\n quit()\n\n return sim\n\n def get_state(self):\n \"\"\"Returns the state buffer of the environment (the priviledged observations for asymmetric training).\"\"\"\n return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n\n @abc.abstractmethod\n def pre_physics_step(self, actions: torch.Tensor):\n \"\"\"Apply the actions to the environment (eg by setting torques, position targets).\n Args:\n actions: the actions to apply\n \"\"\"\n\n @abc.abstractmethod\n def post_physics_step(self):\n \"\"\"Compute reward and observations, reset any environments that require it.\"\"\"\n\n def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]:\n \"\"\"Step the physics of the environment.\n Args:\n actions: actions to apply\n Returns:\n Observations, rewards, resets, info\n Observations are dict of observations (currently only one member called 'obs')\n \"\"\"\n self.raw_actions_from_policy = actions.clone()\n # randomize actions\n if self.dr_randomizations.get('actions', None):\n actions = self.dr_randomizations['actions']['noise_lambda'](actions)\n action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)\n # apply actions\n self.pre_physics_step(action_tensor)\n\n # # step physics and render each frame\n for i in range(self.control_freq_inv):\n self.render()\n self.gym.simulate(self.sim)\n # to fix!\n if self.device == 'cpu':\n self.gym.fetch_results(self.sim, True)\n\n # fill time out buffer\n self.timeout_buf = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.timeout_buf),\n torch.zeros_like(self.timeout_buf))\n\n # compute observations, rewards, resets, ...\n self.post_physics_step()\n\n self.extras[\"time_outs\"] = self.timeout_buf.to(self.rl_device)\n return self.update_obs(), self.rew_buf.to(self.rl_device), self.done_buf.to(self.rl_device), self.extras\n\n def update_obs(self):\n # randomize observations\n if self.dr_randomizations.get('observations', None):\n self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)\n self.obs_dict[\"ob\"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n\n # asymmetric actor-critic\n if self.num_states > 0:\n self.obs_dict[\"state\"] = self.get_state()\n return self.obs_dict\n\n def zero_actions(self) -> torch.Tensor:\n \"\"\"Returns a buffer with zero actions.\n Returns:\n A buffer of zero torch actions\n \"\"\"\n actions = torch.zeros([self.num_envs, self.num_actions], dtype=torch.float32, device=self.device)\n\n return actions\n\n ## original code from Nvidia\n def reset(self) -> torch.Tensor:\n \"\"\"Reset the environment.\n Returns:\n Observation dictionary\n \"\"\"\n zero_actions = self.zero_actions()\n\n # step the simulator\n self.step(zero_actions)\n\n return self.update_obs()\n\n def render(self):\n \"\"\"Draw the frame to the viewer, and check for keyboard events.\"\"\"\n if self.viewer:\n # check for window closed\n if self.gym.query_viewer_has_closed(self.viewer):\n sys.exit()\n\n # check for keyboard events\n for evt in self.gym.query_viewer_action_events(self.viewer):\n if evt.action == \"QUIT\" and evt.value > 0:\n sys.exit()\n elif evt.action == \"toggle_viewer_sync\" and evt.value > 0:\n self.enable_viewer_sync = not self.enable_viewer_sync\n\n # fetch results\n if self.device != 'cpu':\n self.gym.fetch_results(self.sim, True)\n\n # step graphics\n if self.enable_viewer_sync:\n self.gym.step_graphics(self.sim)\n self.gym.draw_viewer(self.viewer, self.sim, True)\n\n # Wait for dt to elapse in real time.\n # This synchronizes the physics simulation with the rendering rate.\n self.gym.sync_frame_time(self.sim)\n\n else:\n self.gym.poll_viewer_events(self.viewer)\n\n def __parse_sim_params(self, physics_engine: str, config_sim: Dict[str, Any]) -> gymapi.SimParams:\n \"\"\"Parse the config dictionary for physics stepping settings.\n Args:\n physics_engine: which physics engine to use. \"physx\" or \"flex\"\n config_sim: dict of sim configuration parameters\n Returns\n IsaacGym SimParams object with updated settings.\n \"\"\"\n sim_params = gymapi.SimParams()\n\n # check correct up-axis\n if config_sim[\"up_axis\"] not in [\"z\", \"y\"]:\n msg = f\"Invalid physics up-axis: {config_sim['up_axis']}\"\n print(msg)\n raise ValueError(msg)\n\n # assign general sim parameters\n sim_params.dt = config_sim[\"dt\"]\n sim_params.num_client_threads = config_sim.get(\"num_client_threads\", 0)\n sim_params.use_gpu_pipeline = config_sim[\"use_gpu_pipeline\"]\n sim_params.substeps = config_sim.get(\"substeps\", 2)\n\n # assign up-axis\n if config_sim[\"up_axis\"] == \"z\":\n sim_params.up_axis = gymapi.UP_AXIS_Z\n else:\n sim_params.up_axis = gymapi.UP_AXIS_Y\n\n # assign gravity\n sim_params.gravity = gymapi.Vec3(*config_sim[\"gravity\"])\n\n # configure physics parameters\n if physics_engine == \"physx\":\n # set the parameters\n if \"physx\" in config_sim:\n for opt in config_sim[\"physx\"].keys():\n if opt == \"contact_collection\":\n setattr(sim_params.physx, opt, gymapi.ContactCollection(config_sim[\"physx\"][opt]))\n else:\n setattr(sim_params.physx, opt, config_sim[\"physx\"][opt])\n else:\n # set the parameters\n if \"flex\" in config_sim:\n for opt in config_sim[\"flex\"].keys():\n setattr(sim_params.flex, opt, config_sim[\"flex\"][opt])\n\n # return the configured params\n return sim_params\n\n \"\"\"\n Domain Randomization methods\n \"\"\"\n\n def get_actor_params_info(self, dr_params: Dict[str, Any], env):\n \"\"\"Generate a flat array of actor params, their names and ranges.\n Returns:\n The array\n \"\"\"\n\n if \"actor_params\" not in dr_params:\n return None\n params = []\n names = []\n lows = []\n highs = []\n param_getters_map = get_property_getter_map(self.gym)\n for actor, actor_properties in dr_params[\"actor_params\"].items():\n handle = self.gym.find_actor_handle(env, actor)\n for prop_name, prop_attrs in actor_properties.items():\n if prop_name in ['color', 'scale']:\n continue # this is set randomly\n props = param_getters_map[prop_name](env, handle)\n if not isinstance(props, list):\n props = [props]\n for prop_idx, prop in enumerate(props):\n for attr, attr_randomization_params in prop_attrs.items():\n name = prop_name + '_' + str(prop_idx) + '_' + attr\n lo_hi = attr_randomization_params['range']\n distr = attr_randomization_params['distribution']\n if 'uniform' not in distr:\n lo_hi = (-1.0 * float('Inf'), float('Inf'))\n if isinstance(prop, np.ndarray):\n for attr_idx in range(prop[attr].shape[0]):\n params.append(prop[attr][attr_idx])\n names.append(name + '_' + str(attr_idx))\n lows.append(lo_hi[0])\n highs.append(lo_hi[1])\n else:\n params.append(getattr(prop, attr))\n names.append(name)\n lows.append(lo_hi[0])\n highs.append(lo_hi[1])\n return params, names, lows, highs\n\n def apply_randomizations(self, dr_params):\n rand_freq = dr_params.get(\"frequency\", 1)\n self.last_step = self.gym.get_frame_count(self.sim)\n if self.first_randomization:\n do_nonenv_randomize = True\n env_ids = list(range(self.num_envs))\n else:\n do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq\n rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf))\n rand_envs = torch.logical_and(rand_envs, self.reset_buf)\n env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist()\n self.randomize_buf[rand_envs] = 0\n\n if do_nonenv_randomize:\n self.last_rand_step = self.last_step\n\n param_setters_map = get_property_setter_map(self.gym)\n param_setter_defaults_map = get_default_setter_args(self.gym)\n param_getters_map = get_property_getter_map(self.gym)\n\n # On first iteration, check the number of buckets\n if self.first_randomization:\n check_buckets(self.gym, self.envs, dr_params)\n\n for nonphysical_param in [\"observations\", \"actions\"]:\n if nonphysical_param in dr_params and do_nonenv_randomize:\n dist = dr_params[nonphysical_param][\"distribution\"]\n op_type = dr_params[nonphysical_param][\"operation\"]\n sched_type = dr_params[nonphysical_param][\"schedule\"] if \"schedule\" in dr_params[nonphysical_param] else None\n sched_step = dr_params[nonphysical_param][\"schedule_steps\"] if \"schedule\" in dr_params[nonphysical_param] else None\n op = operator.add if op_type == 'additive' else operator.mul\n\n if sched_type == 'linear':\n sched_scaling = 1.0 / sched_step * \\\n min(self.last_step, sched_step)\n elif sched_type == 'constant':\n sched_scaling = 0 if self.last_step < sched_step else 1\n else:\n sched_scaling = 1\n\n if dist == 'gaussian':\n mu, var = dr_params[nonphysical_param][\"range\"]\n mu_corr, var_corr = dr_params[nonphysical_param].get(\"range_correlated\", [0., 0.])\n\n if op_type == 'additive':\n mu *= sched_scaling\n var *= sched_scaling\n mu_corr *= sched_scaling\n var_corr *= sched_scaling\n elif op_type == 'scaling':\n var = var * sched_scaling # scale up var over time\n mu = mu * sched_scaling + 1.0 * \\\n (1.0 - sched_scaling) # linearly interpolate\n\n var_corr = var_corr * sched_scaling # scale up var over time\n mu_corr = mu_corr * sched_scaling + 1.0 * \\\n (1.0 - sched_scaling) # linearly interpolate\n\n def noise_lambda(tensor, param_name=nonphysical_param):\n params = self.dr_randomizations[param_name]\n corr = params.get('corr', None)\n if corr is None:\n corr = torch.randn_like(tensor)\n params['corr'] = corr\n corr = corr * params['var_corr'] + params['mu_corr']\n return op(\n tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu'])\n\n self.dr_randomizations[nonphysical_param] = {'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr,\n 'noise_lambda': noise_lambda}\n\n elif dist == 'uniform':\n lo, hi = dr_params[nonphysical_param][\"range\"]\n lo_corr, hi_corr = dr_params[nonphysical_param].get(\"range_correlated\", [0., 0.])\n\n if op_type == 'additive':\n lo *= sched_scaling\n hi *= sched_scaling\n lo_corr *= sched_scaling\n hi_corr *= sched_scaling\n elif op_type == 'scaling':\n lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling)\n hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling)\n lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)\n hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)\n\n def noise_lambda(tensor, param_name=nonphysical_param):\n params = self.dr_randomizations[param_name]\n corr = params.get('corr', None)\n if corr is None:\n corr = torch.randn_like(tensor)\n params['corr'] = corr\n corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr']\n return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo'])\n\n self.dr_randomizations[nonphysical_param] = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr,\n 'noise_lambda': noise_lambda}\n\n if \"sim_params\" in dr_params and do_nonenv_randomize:\n prop_attrs = dr_params[\"sim_params\"]\n prop = self.gym.get_sim_params(self.sim)\n\n if self.first_randomization:\n self.original_props[\"sim_params\"] = {\n attr: getattr(prop, attr) for attr in dir(prop)}\n\n for attr, attr_randomization_params in prop_attrs.items():\n apply_random_samples(\n prop, self.original_props[\"sim_params\"], attr, attr_randomization_params, self.last_step)\n\n self.gym.set_sim_params(self.sim, prop)\n extern_offsets = {}\n if self.actor_params_generator is not None:\n for env_id in env_ids:\n self.extern_actor_params[env_id] = \\\n self.actor_params_generator.sample()\n extern_offsets[env_id] = 0\n\n for actor, actor_properties in dr_params[\"actor_params\"].items():\n for env_id in env_ids:\n self.original_props.setdefault(env_id, dict())\n env = self.envs[env_id]\n handle = self.gym.find_actor_handle(env, actor)\n self.original_props[env_id].setdefault(handle, dict())\n extern_sample = self.extern_actor_params[env_id]\n\n for prop_name, prop_attrs in actor_properties.items():\n if prop_name == 'color':\n num_bodies = self.gym.get_actor_rigid_body_count(\n env, handle)\n for n in range(num_bodies):\n self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL,\n gymapi.Vec3(random.uniform(0, 1),\n random.uniform(0, 1),\n random.uniform(0, 1)))\n continue\n if prop_name == 'scale':\n setup_only = prop_attrs.get('setup_only', False)\n if (setup_only and not self.sim_initialized) or not setup_only:\n attr_randomization_params = prop_attrs\n sample = generate_random_samples(attr_randomization_params, 1,\n self.last_step, None)\n og_scale = 1\n if attr_randomization_params['operation'] == 'scaling':\n new_scale = og_scale * sample\n elif attr_randomization_params['operation'] == 'additive':\n new_scale = og_scale + sample\n self.gym.set_actor_scale(env, handle, new_scale)\n continue\n\n prop = param_getters_map[prop_name](env, handle)\n set_random_properties = True\n if isinstance(prop, list):\n if self.first_randomization:\n self.original_props[env_id][handle][prop_name] = [\n {attr: getattr(p, attr) for attr in dir(p)} for p in prop]\n for attr, attr_randomization_params in prop_attrs.items():\n same_for_all = attr_randomization_params.get('same_for_all', False)\n setup_only = attr_randomization_params.get('setup_only', False)\n attr_sample = None\n assert len(prop) == len(self.original_props[env_id][handle][prop_name])\n for p, og_p in zip(prop, self.original_props[env_id][handle][prop_name]):\n if (setup_only and not self.sim_initialized) or not setup_only:\n smpl = None\n if self.actor_params_generator is not None:\n smpl, extern_offsets[env_id] = get_attr_val_from_sample(\n extern_sample, extern_offsets[env_id], p, attr)\n if same_for_all and attr_sample is not None:\n apply_prop_samples(p, og_p, attr, attr_randomization_params, attr_sample)\n else:\n attr_sample = apply_random_samples(\n p, og_p, attr, attr_randomization_params,\n self.last_step, smpl)\n else:\n set_random_properties = False\n else:\n if self.first_randomization:\n self.original_props[env_id][handle][prop_name] = deepcopy(prop)\n for attr, attr_randomization_params in prop_attrs.items():\n setup_only = attr_randomization_params.get('setup_only', False)\n if (setup_only and not self.sim_initialized) or not setup_only:\n smpl = None\n if self.actor_params_generator is not None:\n smpl, extern_offsets[env_id] = get_attr_val_from_sample(\n extern_sample, extern_offsets[env_id], prop, attr)\n apply_random_samples(\n prop, self.original_props[env_id][handle][prop_name], attr,\n attr_randomization_params, self.last_step, smpl)\n else:\n set_random_properties = False\n if set_random_properties:\n setter = param_setters_map[prop_name]\n default_args = param_setter_defaults_map[prop_name]\n setter(env, handle, prop, *default_args)\n\n if self.actor_params_generator is not None:\n for env_id in env_ids: # check that we used all dims in sample\n if extern_offsets[env_id] > 0:\n extern_sample = self.extern_actor_params[env_id]\n if extern_offsets[env_id] != extern_sample.shape[0]:\n print('env_id', env_id,\n 'extern_offset', extern_offsets[env_id],\n 'vs extern_sample.shape', extern_sample.shape)\n raise Exception(\"Invalid extern_sample size\")\n self.first_randomization = False\n return env_ids"
},
{
"identifier": "compute_dclaw_reward",
"path": "dexenv/envs/rewards.py",
"snippet": "@torch.no_grad()\ndef compute_dclaw_reward(reset_buf, reset_goal_buf, progress_buf,\n successes, max_episode_length: float,\n object_pos, object_rot, target_pos, target_rot,\n reward_cfg, actions,\n fingertip_pos=None, fingertip_vel=None,\n object_linvel=None, object_angvel=None, dof_vel=None,\n dof_torque=None, table_cf=None\n ):\n rot_reward_scale = reward_cfg.rotRewardScale\n rot_eps = reward_cfg.rotEps\n reach_goal_bonus = reward_cfg.reachGoalBonus\n fall_dist = reward_cfg.fallDistance\n fall_penalty = reward_cfg.fallPenalty\n success_tolerance = reward_cfg.successTolerance\n ftip_reward_scale = reward_cfg.ftipRewardScale\n penalize_tb_contact = reward_cfg.pen_tb_contact\n kwargs = dict(\n reset_buf=reset_buf,\n reset_goal_buf=reset_goal_buf,\n progress_buf=progress_buf,\n successes=successes,\n max_episode_length=max_episode_length,\n object_pos=object_pos,\n object_rot=object_rot,\n target_pos=target_pos,\n target_rot=target_rot,\n actions=actions,\n fingertip_pos=fingertip_pos,\n object_linvel=object_linvel,\n object_angvel=object_angvel,\n dof_vel=dof_vel,\n dof_torque=dof_torque,\n rot_reward_scale=rot_reward_scale,\n rot_eps=rot_eps,\n reach_goal_bonus=reach_goal_bonus,\n fall_dist=fall_dist,\n fall_penalty=fall_penalty,\n success_tolerance=success_tolerance,\n ftip_reward_scale=ftip_reward_scale,\n energy_scale=reward_cfg.energy_scale,\n dof_vel_thresh=reward_cfg.dof_vel_thresh,\n obj_lin_vel_thresh=reward_cfg.obj_lin_vel_thresh,\n obj_ang_vel_thresh=reward_cfg.obj_ang_vel_thresh,\n action_norm_thresh=reward_cfg.action_norm_thresh,\n penalize_tb_contact=penalize_tb_contact,\n table_cf=table_cf if table_cf is not None else torch.ones(1),\n tb_cf_scale=reward_cfg.tb_cf_scale,\n clip_energy_reward=reward_cfg.clip_energy_reward,\n energy_upper_bound=reward_cfg.energy_upper_bound,\n )\n out = compute_reward(**kwargs)\n return out"
},
{
"identifier": "get_module_path",
"path": "dexenv/utils/common.py",
"snippet": "def get_module_path(module):\n modu = importlib.util.find_spec(module)\n return Path(list(modu.submodule_search_locations)[0])"
},
{
"identifier": "pathlib_file",
"path": "dexenv/utils/common.py",
"snippet": "def pathlib_file(file_name):\n if isinstance(file_name, str):\n file_name = Path(file_name)\n elif not isinstance(file_name, Path):\n raise TypeError(f'Please check the type of the filename:{file_name}')\n return file_name"
},
{
"identifier": "dclaw_body_color_mapping",
"path": "dexenv/utils/hand_color.py",
"snippet": "FINGERTIP_COLORS = np.array([\n [111, 29, 27],\n [187, 148, 87],\n [67, 40, 24],\n [153, 88, 42],\n [255, 230, 167]\n]) / 255.0\nFINGERTIP_COLORS = FINGERTIP_COLORS.tolist()"
},
{
"identifier": "get_camera_params",
"path": "dexenv/utils/isaac_utils.py",
"snippet": "def get_camera_params(width=640, height=480, hov=75, cuda=True):\n camera_props = gymapi.CameraProperties()\n camera_props.horizontal_fov = hov\n camera_props.width = width\n camera_props.height = height\n camera_props.enable_tensors = cuda\n return camera_props"
},
{
"identifier": "random_quaternions",
"path": "dexenv/utils/torch_utils.py",
"snippet": "@torch.no_grad()\ndef random_quaternions(num, dtype=None, device=None, order='xyzw'):\n \"\"\"\n return quaternions in [w, x, y, z] or [x, y, z, w]\n \"\"\"\n if PYTORCH3D_AVAILABLE:\n quats = py3d_rot_cvt.random_quaternions(num, dtype=dtype, device=device)\n else:\n \"\"\"\n http://planning.cs.uiuc.edu/node198.html\n \"\"\"\n ran = torch.rand(num, 3, dtype=dtype, device=device)\n r1, r2, r3 = ran[:, 0], ran[:, 1], ran[:, 2]\n pi2 = 2 * np.pi\n r1_1 = torch.sqrt(1.0 - r1)\n r1_2 = torch.sqrt(r1)\n t1 = pi2 * r2\n t2 = pi2 * r3\n\n quats = torch.zeros(num, 4, dtype=dtype, device=device)\n quats[:, 0] = r1_1 * (torch.sin(t1))\n quats[:, 1] = r1_1 * (torch.cos(t1))\n quats[:, 2] = r1_2 * (torch.sin(t2))\n quats[:, 3] = r1_2 * (torch.cos(t2))\n\n assert order in ['xyzw', 'wxyz']\n if order == 'xyzw':\n quats = quat_wxyz_to_xyzw(quats)\n return quats"
},
{
"identifier": "torch_long",
"path": "dexenv/utils/torch_utils.py",
"snippet": "def torch_long(array, device='cpu'):\n if isinstance(array, torch.Tensor):\n return array.long().to(device)\n elif isinstance(array, np.ndarray):\n return torch.from_numpy(array).long().to(device)\n elif isinstance(array, list):\n return torch.LongTensor(array).to(device)\n elif isinstance(array, dict):\n new_dict = dict()\n for k, v in array.items():\n new_dict[k] = torch_long(v, device)\n return new_dict"
}
] | import time
import torch
import dexenv
from isaacgym import gymapi
from isaacgym import gymtorch
from isaacgym.gymutil import get_property_getter_map
from isaacgym.gymutil import get_property_setter_map
from isaacgymenvs.utils.torch_jit_utils import *
from loguru import logger
from dexenv.envs.base.vec_task import VecTask
from dexenv.envs.rewards import compute_dclaw_reward
from dexenv.utils.common import get_module_path
from dexenv.utils.common import pathlib_file
from dexenv.utils.hand_color import dclaw_body_color_mapping
from dexenv.utils.isaac_utils import get_camera_params
from dexenv.utils.torch_utils import random_quaternions
from dexenv.utils.torch_utils import torch_long | 10,786 | self.hand_start_states = []
self.hand_indices = []
self.fingertip_indices = []
self.object_indices = []
self.goal_object_indices = []
self.render_camera_handles = []
if self.cfg.rgb_render:
render_cam_pose, render_cam_params = self.get_visual_render_camera_setup()
self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in
self.fingertips]
print(f'Fingertip handles:{self.fingertip_handles}')
dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset)
object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)
object_rs_count = self.gym.get_asset_rigid_shape_count(object_asset)
self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count))
self.object_handles = []
max_agg_bodies = self.num_dclaw_bodies + 2 * object_rb_count + 1
max_agg_shapes = self.num_dclaw_shapes + 2 * object_rs_count + 1
for i in range(self.num_envs):
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
if self.aggregate_mode >= 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
self.create_hand_actor(env_ptr=env_ptr,
dclaw_asset=dclaw_asset,
dclaw_start_pose=dclaw_start_pose,
dclaw_dof_props=dclaw_dof_props,
env_id=i)
object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 1)
self.object_handles.append(object_handle)
self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,
object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z,
object_start_pose.r.w,
0, 0, 0, 0, 0, 0])
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
self.object_indices.append(object_idx)
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs,
0, 2)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
if self.cfg.env.blockscale is not None and self.cfg.env.objectType == 'block':
blockscale = float(self.cfg.env.blockscale)
self.gym.set_actor_scale(env_ptr, object_handle, blockscale)
self.gym.set_actor_scale(env_ptr, goal_handle, blockscale)
if self.object_type != "block":
self.gym.set_rigid_body_color(
env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
self.gym.set_rigid_body_color(
env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0)
if self.cfg.rgb_render:
render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params)
self.render_camera_handles.append(render_camera_handle[0])
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.setup_torch_states()
def create_camera(self, camera_poses, env_ptr, camera_params):
cam_handles = []
for ic in range(min(len(camera_poses), self.cfg.cam.cam_num)):
camera_handle = self.gym.create_camera_sensor(env_ptr, camera_params)
if isinstance(camera_poses[ic], tuple):
self.gym.set_camera_location(camera_handle, env_ptr, camera_poses[ic][0], camera_poses[ic][1])
else:
self.gym.set_camera_transform(camera_handle, env_ptr, camera_poses[ic])
cam_handles.append(camera_handle)
return cam_handles
def get_visual_render_camera_setup(self):
cam_pos = np.array([-0.7, 0, 0.5])
cam_focus_pt = np.array([0.08, 0, 0.15])
cam_focus_pt = gymapi.Vec3(*cam_focus_pt)
cam_pos = gymapi.Vec3(*cam_pos)
camera_poses = [(cam_pos, cam_focus_pt)]
camera_params = get_camera_params(width=self.cfg.cam.visual_render_width,
height=self.cfg.cam.visual_render_height,
hov=45,
cuda=False)
return camera_poses, camera_params
def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id):
dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, "hand", env_id, 0, 0)
if self.cfg.env.dof_torque_on:
self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)
self.hand_start_states.append(
[dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z,
dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z,
dclaw_start_pose.r.w,
0, 0, 0, 0, 0, 0])
self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props)
hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM)
self.hand_indices.append(hand_idx)
self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL)
if self.obs_type == "full_state":
self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)
self.dclaws.append(dclaw_actor)
self.set_hand_color(env_ptr, dclaw_actor)
def set_hand_color(self, env_ptr, dclaw_actor):
rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor)
for bd, bd_id in rgd_dict.items():
|
class DClawBase(VecTask):
def __init__(self, cfg, sim_device, rl_device, graphics_device_id):
self.cfg = cfg
headless = self.cfg.headless
self.randomize = self.cfg["task"]["randomize"]
if self.randomize:
logger.warning(f'Domain randomization is enabled!')
self.randomization_params = self.cfg["task"]["randomization_params"]
self.aggregate_mode = self.cfg["env"]["aggregateMode"]
self.dist_reward_scale = self.cfg["env"]["rew"]["distRewardScale"]
self.rot_reward_scale = self.cfg["env"]["rew"]["rotRewardScale"]
self.success_tolerance = self.cfg["env"]["rew"]["successTolerance"]
self.reach_goal_bonus = self.cfg["env"]["rew"]["reachGoalBonus"]
self.fall_dist = self.cfg["env"]["rew"]["fallDistance"]
self.fall_penalty = self.cfg["env"]["rew"]["fallPenalty"]
self.rot_eps = self.cfg["env"]["rew"]["rotEps"]
self.vel_obs_scale = 0.2 # scale factor of velocity based observations
self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations
self.reset_position_noise = self.cfg["env"]["resetPositionNoise"]
self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"]
self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"]
self.force_scale = self.cfg["env"].get("forceScale", 0.0)
self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1])
self.force_decay = self.cfg["env"].get("forceDecay", 0.99)
self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08)
self.dclaw_dof_speed_scale = self.cfg["env"]["dofSpeedScale"]
# self.act_moving_average = self.cfg["env"]["actionsMovingAverage"]
self.debug_viz = self.cfg["env"]["enableDebugVis"]
self.max_episode_length = self.cfg["env"]["episodeLength"]
self.reset_time = self.cfg["env"].get("resetTime", -1.0)
self.print_success_stat = self.cfg["env"]["printNumSuccesses"]
self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"]
self.av_factor = self.cfg["env"].get("averFactor", 0.1)
self.object_type = self.cfg["env"]["objectType"]
self.asset_files_dict = {
"block": "urdf/objects/cube_multicolor.urdf",
"egg": "mjcf/open_ai_assets/hand/egg.xml",
"airplane": "single_objects/airplane/model.urdf",
'power_drill': 'single_objects/power_drill/model.urdf',
'mug': 'single_objects/mug/model.urdf',
'elephant': 'asymm/train/elephant/var_000/model.urdf',
'train': 'asymm/train/train/var_000/model.urdf',
'stanford_bunny': 'asymm/train/stanford_bunny/var_004/model.urdf'
}
self.objs_in_isaacgym = ['block', 'egg']
if "asset" in self.cfg["env"]:
self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock",
self.asset_files_dict["block"])
self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg",
self.asset_files_dict["egg"])
self.obs_type = self.cfg["env"]["observationType"]
if not (self.obs_type in ["full_no_vel", "full", "full_state"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]")
print("Obs type:", self.obs_type)
## TODO: change value here
self.num_obs_dict = {
"full_no_vel": 42,
"full": 87,
"full_state": 114
}
self.up_axis = 'z'
num_states = 0
self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type]
self.cfg["env"]["numStates"] = num_states
self.cfg["env"]["numActions"] = 12
self.hist_buf_reset_env_ids = None
super().__init__(config=self.cfg,
sim_device=sim_device,
rl_device=rl_device,
graphics_device_id=graphics_device_id,
headless=headless)
self.dt = self.sim_params.dt
control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1)
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
if self.viewer != None:
cam_pos = gymapi.Vec3(0.16, -0.5, 0.5)
cam_target = gymapi.Vec3(0.0, 0.0, 0.15)
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
if self.obs_type == "full_state":
sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)
dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)
self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs,
self.num_dclaw_dofs)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
if self.cfg.env.dof_torque_on:
self.gym.refresh_dof_force_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dclaw_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_dclaw_dofs]
self.dclaw_dof_pos = self.dclaw_dof_state[..., 0]
self.dclaw_dof_vel = self.dclaw_dof_state[..., 1]
if self.cfg.env.dof_torque_on:
self.dclaw_dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, -1)
else:
self.dclaw_dof_torque = None
self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)
self.num_bodies = self.rigid_body_states.shape[1]
self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)
if self.cfg.env.rew.pen_tb_contact:
_net_cf = self.gym.acquire_net_contact_force_tensor(self.sim)
self.net_contact_force = gymtorch.wrap_tensor(_net_cf).view(self.num_envs, -1, 3)
table_handle = self.gym.find_actor_handle(self.envs[0], 'table')
self.table_body_index = self.gym.find_actor_rigid_body_index(self.envs[0],
table_handle,
'table',
gymapi.DOMAIN_ENV)
logger.warning(f'Table body index:{self.table_body_index}')
self.table_contact_force = self.net_contact_force[:, self.table_body_index]
self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs
self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)
self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)
self.total_successes = 0
self.total_resets = 0
self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)
self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)
self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))
* torch.rand(self.num_envs, device=self.device) + torch.log(
self.force_prob_range[1]))
self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)
self.num_actions = self.num_dclaw_dofs
self.actions = self.zero_actions()
DClawBase.compute_observations(self)
self.num_observations = self.obs_buf.shape[-1]
self.cfg.env.numObservations = self.num_observations
self.create_ob_act_space()
def create_sim(self):
self.dt = self.cfg["sim"]["dt"]
self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, self.up_axis)
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
plane_params.distance = 0.1
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix()
object_asset_file = self.asset_files_dict[self.object_type]
dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root)
table_asset = self.get_table_asset()
table_pose = self.get_table_pose()
if self.obs_type == "full_state":
sensor_pose = gymapi.Transform()
for ft_handle in self.fingertip_handles:
self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose)
if self.object_type in self.objs_in_isaacgym:
asset_root = get_module_path('isaacgymenvs').parent.joinpath('assets').as_posix()
else:
asset_root = dexenv.LIB_PATH.joinpath('assets').as_posix()
object_asset_options = gymapi.AssetOptions()
if self.cfg.env.vhacd:
object_asset_options.convex_decomposition_from_submeshes = True
object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
object_asset_options.disable_gravity = True
goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)
dclaw_start_pose = self.get_dclaw_start_pose()
object_start_pose = self.get_object_start_pose(dclaw_start_pose)
goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose)
self.dclaws = []
self.envs = []
self.object_init_state = []
self.hand_start_states = []
self.hand_indices = []
self.fingertip_indices = []
self.object_indices = []
self.goal_object_indices = []
self.render_camera_handles = []
if self.cfg.rgb_render:
render_cam_pose, render_cam_params = self.get_visual_render_camera_setup()
self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in
self.fingertips]
print(f'Fingertip handles:{self.fingertip_handles}')
dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset)
object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)
object_rs_count = self.gym.get_asset_rigid_shape_count(object_asset)
self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count))
self.object_handles = []
max_agg_bodies = self.num_dclaw_bodies + 2 * object_rb_count + 1
max_agg_shapes = self.num_dclaw_shapes + 2 * object_rs_count + 1
for i in range(self.num_envs):
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
if self.aggregate_mode >= 1:
self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)
self.create_hand_actor(env_ptr=env_ptr,
dclaw_asset=dclaw_asset,
dclaw_start_pose=dclaw_start_pose,
dclaw_dof_props=dclaw_dof_props,
env_id=i)
object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 1)
self.object_handles.append(object_handle)
self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,
object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z,
object_start_pose.r.w,
0, 0, 0, 0, 0, 0])
object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)
self.object_indices.append(object_idx)
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs,
0, 2)
goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)
self.goal_object_indices.append(goal_object_idx)
if self.cfg.env.blockscale is not None and self.cfg.env.objectType == 'block':
blockscale = float(self.cfg.env.blockscale)
self.gym.set_actor_scale(env_ptr, object_handle, blockscale)
self.gym.set_actor_scale(env_ptr, goal_handle, blockscale)
if self.object_type != "block":
self.gym.set_rigid_body_color(
env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
self.gym.set_rigid_body_color(
env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))
table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0)
if self.cfg.rgb_render:
render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params)
self.render_camera_handles.append(render_camera_handle[0])
if self.aggregate_mode > 0:
self.gym.end_aggregate(env_ptr)
self.envs.append(env_ptr)
self.setup_torch_states()
def create_camera(self, camera_poses, env_ptr, camera_params):
cam_handles = []
for ic in range(min(len(camera_poses), self.cfg.cam.cam_num)):
camera_handle = self.gym.create_camera_sensor(env_ptr, camera_params)
if isinstance(camera_poses[ic], tuple):
self.gym.set_camera_location(camera_handle, env_ptr, camera_poses[ic][0], camera_poses[ic][1])
else:
self.gym.set_camera_transform(camera_handle, env_ptr, camera_poses[ic])
cam_handles.append(camera_handle)
return cam_handles
def get_visual_render_camera_setup(self):
cam_pos = np.array([-0.7, 0, 0.5])
cam_focus_pt = np.array([0.08, 0, 0.15])
cam_focus_pt = gymapi.Vec3(*cam_focus_pt)
cam_pos = gymapi.Vec3(*cam_pos)
camera_poses = [(cam_pos, cam_focus_pt)]
camera_params = get_camera_params(width=self.cfg.cam.visual_render_width,
height=self.cfg.cam.visual_render_height,
hov=45,
cuda=False)
return camera_poses, camera_params
def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id):
dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, "hand", env_id, 0, 0)
if self.cfg.env.dof_torque_on:
self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)
self.hand_start_states.append(
[dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z,
dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z,
dclaw_start_pose.r.w,
0, 0, 0, 0, 0, 0])
self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props)
hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM)
self.hand_indices.append(hand_idx)
self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL)
if self.obs_type == "full_state":
self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)
self.dclaws.append(dclaw_actor)
self.set_hand_color(env_ptr, dclaw_actor)
def set_hand_color(self, env_ptr, dclaw_actor):
rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor)
for bd, bd_id in rgd_dict.items(): | if bd not in dclaw_body_color_mapping: | 4 | 2023-10-25 17:22:41+00:00 | 12k |
ai-safety-foundation/sparse_autoencoder | sparse_autoencoder/autoencoder/model.py | [
{
"identifier": "LinearEncoder",
"path": "sparse_autoencoder/autoencoder/components/linear_encoder.py",
"snippet": "class LinearEncoder(Module):\n r\"\"\"Linear encoder layer.\n\n Linear encoder layer (essentially `nn.Linear`, with a ReLU activation function). Designed to be\n used as the encoder in a sparse autoencoder (excluding any outer tied bias).\n\n $$\n \\begin{align*}\n m &= \\text{learned features dimension} \\\\\n n &= \\text{input and output dimension} \\\\\n b &= \\text{batch items dimension} \\\\\n \\overline{\\mathbf{x}} \\in \\mathbb{R}^{b \\times n} &= \\text{input after tied bias} \\\\\n W_e \\in \\mathbb{R}^{m \\times n} &= \\text{weight matrix} \\\\\n b_e \\in \\mathbb{R}^{m} &= \\text{bias vector} \\\\\n f &= \\text{ReLU}(\\overline{\\mathbf{x}} W_e^T + b_e) = \\text{LinearEncoder output}\n \\end{align*}\n $$\n \"\"\"\n\n _learnt_features: int\n \"\"\"Number of learnt features (inputs to this layer).\"\"\"\n\n _input_features: int\n \"\"\"Number of input features from the source model.\"\"\"\n\n _n_components: int | None\n\n weight: Float[\n Parameter,\n Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE),\n ]\n \"\"\"Weight parameter.\n\n Each row in the weights matrix acts as a dictionary vector, representing a single basis\n element in the learned activation space.\n \"\"\"\n\n bias: Float[Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)]\n \"\"\"Bias parameter.\"\"\"\n\n @property\n def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:\n \"\"\"Reset optimizer parameter details.\n\n Details of the parameters that should be reset in the optimizer, when resetting\n dictionary vectors.\n\n Returns:\n List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to\n reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.\n \"\"\"\n return [\n ResetOptimizerParameterDetails(parameter=self.weight, axis=-2),\n ResetOptimizerParameterDetails(parameter=self.bias, axis=-1),\n ]\n\n activation_function: ReLU\n \"\"\"Activation function.\"\"\"\n\n @validate_call\n def __init__(\n self,\n input_features: PositiveInt,\n learnt_features: PositiveInt,\n n_components: PositiveInt | None,\n ):\n \"\"\"Initialize the linear encoder layer.\n\n Args:\n input_features: Number of input features to the autoencoder.\n learnt_features: Number of learnt features in the autoencoder.\n n_components: Number of source model components the SAE is trained on.\n \"\"\"\n super().__init__()\n\n self._learnt_features = learnt_features\n self._input_features = input_features\n self._n_components = n_components\n\n self.weight = Parameter(\n torch.empty(\n shape_with_optional_dimensions(n_components, learnt_features, input_features),\n )\n )\n self.bias = Parameter(\n torch.zeros(shape_with_optional_dimensions(n_components, learnt_features))\n )\n self.activation_function = ReLU()\n\n self.reset_parameters()\n\n def reset_parameters(self) -> None:\n \"\"\"Initialize or reset the parameters.\"\"\"\n # Assumes we are using ReLU activation function (for e.g. leaky ReLU, the `a` parameter and\n # `nonlinerity` must be changed.\n init.kaiming_uniform_(self.weight, nonlinearity=\"relu\")\n\n # Bias (approach from nn.Linear)\n fan_in = self.weight.size(1)\n bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n init.uniform_(self.bias, -bound, bound)\n\n def forward(\n self,\n x: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)]:\n \"\"\"Forward pass.\n\n Args:\n x: Input tensor.\n\n Returns:\n Output of the forward pass.\n \"\"\"\n z = (\n einops.einsum(\n x,\n self.weight,\n f\"{Axis.BATCH} ... {Axis.INPUT_OUTPUT_FEATURE}, \\\n ... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE} \\\n -> {Axis.BATCH} ... {Axis.LEARNT_FEATURE}\",\n )\n + self.bias\n )\n\n return self.activation_function(z)\n\n @final\n def update_dictionary_vectors(\n self,\n dictionary_vector_indices: Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)],\n updated_dictionary_weights: Float[\n Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX, Axis.INPUT_OUTPUT_FEATURE)\n ],\n component_idx: int | None = None,\n ) -> None:\n \"\"\"Update encoder dictionary vectors.\n\n Updates the dictionary vectors (columns in the weight matrix) with the given values.\n\n Args:\n dictionary_vector_indices: Indices of the dictionary vectors to update.\n updated_dictionary_weights: Updated weights for just these dictionary vectors.\n component_idx: Component index to update.\n\n Raises:\n ValueError: If there are multiple components and `component_idx` is not specified.\n \"\"\"\n if dictionary_vector_indices.numel() == 0:\n return\n\n with torch.no_grad():\n if component_idx is None:\n if self._n_components is not None:\n error_message = \"component_idx must be specified when n_components is not None\"\n raise ValueError(error_message)\n\n self.weight[dictionary_vector_indices] = updated_dictionary_weights\n else:\n self.weight[component_idx, dictionary_vector_indices] = updated_dictionary_weights\n\n @final\n def update_bias(\n self,\n update_parameter_indices: Int64[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE_IDX)\n ],\n updated_bias_features: Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE_IDX)\n ],\n component_idx: int | None = None,\n ) -> None:\n \"\"\"Update encoder bias.\n\n Args:\n update_parameter_indices: Indices of the bias features to update.\n updated_bias_features: Updated bias features for just these indices.\n component_idx: Component index to update.\n\n Raises:\n ValueError: If there are multiple components and `component_idx` is not specified.\n \"\"\"\n if update_parameter_indices.numel() == 0:\n return\n\n with torch.no_grad():\n if component_idx is None:\n if self._n_components is not None:\n error_message = \"component_idx must be specified when n_components is not None\"\n raise ValueError(error_message)\n\n self.bias[update_parameter_indices] = updated_bias_features\n else:\n self.bias[component_idx, update_parameter_indices] = updated_bias_features\n\n def extra_repr(self) -> str:\n \"\"\"String extra representation of the module.\"\"\"\n return (\n f\"input_features={self._input_features}, \"\n f\"learnt_features={self._learnt_features}, \"\n f\"n_components={self._n_components}\"\n )"
},
{
"identifier": "TiedBias",
"path": "sparse_autoencoder/autoencoder/components/tied_bias.py",
"snippet": "class TiedBias(Module):\n \"\"\"Tied Bias Layer.\n\n The tied pre-encoder bias is a learned bias term that is subtracted from the input before\n encoding, and added back after decoding.\n\n The bias parameter must be initialised in the parent module, and then passed to this layer.\n\n https://transformer-circuits.pub/2023/monosemantic-features/index.html#appendix-autoencoder-bias\n \"\"\"\n\n _bias_position: TiedBiasPosition\n\n _bias_reference: Float[\n Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n\n @property\n def bias(\n self,\n ) -> Float[Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]:\n \"\"\"Bias.\"\"\"\n return self._bias_reference\n\n def __init__(\n self,\n bias_reference: Float[\n Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n position: TiedBiasPosition,\n ) -> None:\n \"\"\"Initialize the bias layer.\n\n Args:\n bias_reference: Tied bias parameter (initialised in the parent module), used for both\n the pre-encoder and post-encoder bias. The original paper initialised this using the\n geometric median of the dataset.\n position: Whether this is the pre-encoder or post-encoder bias.\n \"\"\"\n super().__init__()\n\n self._bias_reference = bias_reference\n\n # Support string literals as well as enums\n self._bias_position = position\n\n def forward(\n self,\n x: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]:\n \"\"\"Forward Pass.\n\n Args:\n x: Input tensor.\n\n Returns:\n Output of the forward pass.\n \"\"\"\n # If this is the pre-encoder bias, we subtract the bias from the input.\n if self._bias_position == TiedBiasPosition.PRE_ENCODER:\n return x - self.bias\n\n # If it's the post-encoder bias, we add the bias to the input.\n return x + self.bias\n\n def extra_repr(self) -> str:\n \"\"\"String extra representation of the module.\"\"\"\n return f\"position={self._bias_position.value}\""
},
{
"identifier": "TiedBiasPosition",
"path": "sparse_autoencoder/autoencoder/components/tied_bias.py",
"snippet": "class TiedBiasPosition(str, Enum):\n \"\"\"Tied Bias Position.\"\"\"\n\n PRE_ENCODER = \"pre_encoder\"\n POST_DECODER = \"post_decoder\""
},
{
"identifier": "UnitNormDecoder",
"path": "sparse_autoencoder/autoencoder/components/unit_norm_decoder.py",
"snippet": "class UnitNormDecoder(Module):\n r\"\"\"Constrained unit norm linear decoder layer.\n\n Linear layer decoder, where the dictionary vectors (columns of the weight matrix) are\n constrained to have unit norm. This is done by removing the gradient information parallel to the\n dictionary vectors before applying the gradient step, using a backward hook. It also requires\n `constrain_weights_unit_norm` to be called after each gradient step, to prevent drift of the\n dictionary vectors away from unit norm (as optimisers such as Adam don't strictly follow the\n gradient, but instead follow a modified gradient that includes momentum).\n\n $$ \\begin{align*}\n m &= \\text{learned features dimension} \\\\\n n &= \\text{input and output dimension} \\\\\n b &= \\text{batch items dimension} \\\\\n f \\in \\mathbb{R}^{b \\times m} &= \\text{encoder output} \\\\\n W_d \\in \\mathbb{R}^{n \\times m} &= \\text{weight matrix} \\\\\n z \\in \\mathbb{R}^{b \\times m} &= f W_d^T = \\text{UnitNormDecoder output (pre-tied bias)}\n \\end{align*} $$\n\n Motivation:\n Normalisation of the columns (dictionary features) prevents the model from reducing the\n sparsity loss term by increasing the size of the feature vectors in $W_d$.\n\n Note that the *Towards Monosemanticity: Decomposing Language Models With Dictionary\n Learning* paper found that removing the gradient information parallel to the dictionary\n vectors before applying the gradient step, rather than resetting the dictionary vectors to\n unit norm after each gradient step, results in a small but real reduction in total\n loss](https://transformer-circuits.pub/2023/monosemantic-features/index.html#appendix-autoencoder-optimization).\n \"\"\"\n\n _learnt_features: int\n \"\"\"Number of learnt features (inputs to this layer).\"\"\"\n\n _decoded_features: int\n \"\"\"Number of decoded features (outputs from this layer).\"\"\"\n\n _n_components: int | None\n\n weight: Float[\n Parameter,\n Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE, Axis.LEARNT_FEATURE),\n ]\n \"\"\"Weight parameter.\n\n Each column in the weights matrix acts as a dictionary vector, representing a single basis\n element in the learned activation space.\n \"\"\"\n\n @property\n def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:\n \"\"\"Reset optimizer parameter details.\n\n Details of the parameters that should be reset in the optimizer, when resetting\n dictionary vectors.\n\n Returns:\n List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to\n reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.\n \"\"\"\n return [ResetOptimizerParameterDetails(parameter=self.weight, axis=-1)]\n\n @validate_call\n def __init__(\n self,\n learnt_features: PositiveInt,\n decoded_features: PositiveInt,\n n_components: PositiveInt | None,\n *,\n enable_gradient_hook: bool = True,\n ) -> None:\n \"\"\"Initialize the constrained unit norm linear layer.\n\n Args:\n learnt_features: Number of learnt features in the autoencoder.\n decoded_features: Number of decoded (output) features in the autoencoder.\n n_components: Number of source model components the SAE is trained on.\n enable_gradient_hook: Enable the gradient backwards hook (modify the gradient before\n applying the gradient step, to maintain unit norm of the dictionary vectors).\n \"\"\"\n super().__init__()\n\n self._learnt_features = learnt_features\n self._decoded_features = decoded_features\n self._n_components = n_components\n\n # Create the linear layer as per the standard PyTorch linear layer\n self.weight = Parameter(\n torch.empty(\n shape_with_optional_dimensions(n_components, decoded_features, learnt_features),\n )\n )\n self.reset_parameters()\n\n # Register backward hook to remove any gradient information parallel to the dictionary\n # vectors (columns of the weight matrix) before applying the gradient step.\n if enable_gradient_hook:\n self.weight.register_hook(self._weight_backward_hook)\n\n def update_dictionary_vectors(\n self,\n dictionary_vector_indices: Int64[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE_IDX)\n ],\n updated_weights: Float[\n Tensor,\n Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE, Axis.LEARNT_FEATURE_IDX),\n ],\n component_idx: int | None = None,\n ) -> None:\n \"\"\"Update decoder dictionary vectors.\n\n Updates the dictionary vectors (rows in the weight matrix) with the given values. Typically\n this is used when resampling neurons (dictionary vectors) that have died.\n\n Args:\n dictionary_vector_indices: Indices of the dictionary vectors to update.\n updated_weights: Updated weights for just these dictionary vectors.\n component_idx: Component index to update.\n\n Raises:\n ValueError: If `component_idx` is not specified when `n_components` is not None.\n \"\"\"\n if dictionary_vector_indices.numel() == 0:\n return\n\n with torch.no_grad():\n if component_idx is None:\n if self._n_components is not None:\n error_message = \"component_idx must be specified when n_components is not None\"\n raise ValueError(error_message)\n\n self.weight[:, dictionary_vector_indices] = updated_weights\n else:\n self.weight[component_idx, :, dictionary_vector_indices] = updated_weights\n\n def constrain_weights_unit_norm(self) -> None:\n \"\"\"Constrain the weights to have unit norm.\n\n Warning:\n Note this must be called after each gradient step. This is because optimisers such as\n Adam don't strictly follow the gradient, but instead follow a modified gradient that\n includes momentum. This means that the gradient step can change the norm of the\n dictionary vectors, even when the hook `_weight_backward_hook` is applied.\n\n Note this can't be applied directly in the backward hook, as it would interfere with a\n variety of use cases (e.g. gradient accumulation across mini-batches, concurrency issues\n with asynchronous operations, etc).\n\n Example:\n >>> import torch\n >>> layer = UnitNormDecoder(3, 3, None)\n >>> layer.weight.data = torch.ones((3, 3)) * 10\n >>> layer.constrain_weights_unit_norm()\n >>> column_norms = torch.sqrt(torch.sum(layer.weight ** 2, dim=0))\n >>> column_norms.round(decimals=3).tolist()\n [1.0, 1.0, 1.0]\n\n \"\"\"\n with torch.no_grad():\n torch.nn.functional.normalize(self.weight, dim=-2, out=self.weight)\n\n def reset_parameters(self) -> None:\n \"\"\"Initialize or reset the parameters.\n\n Example:\n >>> import torch\n >>> # Create a layer with 4 columns (learnt features) and 3 rows (decoded features)\n >>> layer = UnitNormDecoder(learnt_features=4, decoded_features=3, n_components=None)\n >>> layer.reset_parameters()\n >>> # Get the norm across the rows (by summing across the columns)\n >>> column_norms = torch.sum(layer.weight ** 2, dim=0)\n >>> column_norms.round(decimals=3).tolist()\n [1.0, 1.0, 1.0, 1.0]\n\n \"\"\"\n # Initialize the weights with a normal distribution. Note we don't use e.g. kaiming\n # normalisation here, since we immediately scale the weights to have unit norm (so the\n # initial standard deviation doesn't matter). Note also that `init.normal_` is in place.\n self.weight: Float[\n Parameter,\n Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE),\n ] = init.normal_(self.weight, mean=0, std=1) # type: ignore\n\n # Scale so that each row has unit norm\n self.constrain_weights_unit_norm()\n\n def _weight_backward_hook(\n self,\n grad: Float[\n Tensor,\n Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE),\n ],\n ) -> Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ]:\n r\"\"\"Unit norm backward hook.\n\n By subtracting the projection of the gradient onto the dictionary vectors, we remove the\n component of the gradient that is parallel to the dictionary vectors and just keep the\n component that is orthogonal to the dictionary vectors (i.e. moving around the hypersphere).\n The result is that the backward pass does not change the norm of the dictionary vectors.\n\n $$\n \\begin{align*}\n W_d &\\in \\mathbb{R}^{n \\times m} = \\text{Decoder weight matrix} \\\\\n g &\\in \\mathbb{R}^{n \\times m} = \\text{Gradient w.r.t. } W_d\n \\text{ from the backward pass} \\\\\n W_{d, \\text{norm}} &= \\frac{W_d}{\\|W_d\\|} = \\text{Normalized decoder weight matrix\n (over columns)} \\\\\n g_{\\parallel} &\\in \\mathbb{R}^{n \\times m} = \\text{Component of } g\n \\text{ parallel to } W_{d, \\text{norm}} \\\\\n g_{\\perp} &\\in \\mathbb{R}^{n \\times m} = \\text{Component of } g \\text{ orthogonal to }\n W_{d, \\text{norm}} \\\\\n g_{\\parallel} &= W_{d, \\text{norm}} \\cdot (W_{d, \\text{norm}}^\\top \\cdot g) \\\\\n g_{\\perp} &= g - g_{\\parallel} =\n \\text{Adjusted gradient with parallel component removed} \\\\\n \\end{align*}\n $$\n\n Args:\n grad: Gradient with respect to the weights.\n\n Returns:\n Gradient with respect to the weights, with the component parallel to the dictionary\n vectors removed.\n \"\"\"\n # Project the gradients onto the dictionary vectors. Intuitively the dictionary vectors can\n # be thought of as vectors that end on the circumference of a hypersphere. The projection of\n # the gradient onto the dictionary vectors is the component of the gradient that is parallel\n # to the dictionary vectors, i.e. the component that moves to or from the center of the\n # hypersphere.\n normalized_weight: Float[\n Tensor,\n Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE),\n ] = self.weight / torch.norm(self.weight, dim=-2, keepdim=True)\n\n scalar_projections = einops.einsum(\n grad,\n normalized_weight,\n f\"... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE}, \\\n ... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE} \\\n -> ... {Axis.INPUT_OUTPUT_FEATURE}\",\n )\n\n projection = einops.einsum(\n scalar_projections,\n normalized_weight,\n f\"... {Axis.INPUT_OUTPUT_FEATURE}, \\\n ... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE} \\\n -> ... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE}\",\n )\n\n # Subtracting the parallel component from the gradient leaves only the component that is\n # orthogonal to the dictionary vectors, i.e. the component that moves around the surface of\n # the hypersphere.\n return grad - projection\n\n def forward(\n self, x: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)]\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]:\n \"\"\"Forward pass.\n\n Args:\n x: Input tensor.\n\n Returns:\n Output of the forward pass.\n \"\"\"\n return einops.einsum(\n x,\n self.weight,\n f\"{Axis.BATCH} ... {Axis.LEARNT_FEATURE}, \\\n ... {Axis.INPUT_OUTPUT_FEATURE} {Axis.LEARNT_FEATURE} \\\n -> {Axis.BATCH} ... {Axis.INPUT_OUTPUT_FEATURE}\",\n )\n\n def extra_repr(self) -> str:\n \"\"\"String extra representation of the module.\"\"\"\n return (\n f\"learnt_features={self._learnt_features}, \"\n f\"decoded_features={self._decoded_features}, \"\n f\"n_components={self._n_components}\"\n )"
},
{
"identifier": "ResetOptimizerParameterDetails",
"path": "sparse_autoencoder/autoencoder/types.py",
"snippet": "class ResetOptimizerParameterDetails(NamedTuple):\n \"\"\"Reset Optimizer Parameter Details.\n\n Details of a parameter that should be reset in the optimizer, when resetting\n it's corresponding dictionary vectors.\n \"\"\"\n\n parameter: Parameter\n \"\"\"Parameter to reset.\"\"\"\n\n axis: int\n \"\"\"Axis of the parameter to reset.\"\"\""
},
{
"identifier": "Axis",
"path": "sparse_autoencoder/tensor_types.py",
"snippet": "class Axis(LowercaseStrEnum):\n \"\"\"Tensor axis names.\n\n Used to annotate tensor types.\n\n Example:\n When used directly it prints a string:\n\n >>> print(Axis.INPUT_OUTPUT_FEATURE)\n input_output_feature\n\n The primary use is to annotate tensor types:\n\n >>> from jaxtyping import Float\n >>> from torch import Tensor\n >>> from typing import TypeAlias\n >>> batch: TypeAlias = Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n >>> print(batch)\n <class 'jaxtyping.Float[Tensor, 'batch input_output_feature']'>\n\n You can also join multiple axis together to represent the dimensions of a tensor:\n\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n \"\"\"\n\n # Component idx\n COMPONENT = auto()\n \"\"\"Component index.\"\"\"\n\n COMPONENT_OPTIONAL = \"*component\"\n \"\"\"Optional component index.\"\"\"\n\n # Batches\n SOURCE_DATA_BATCH = auto()\n \"\"\"Batch of prompts used to generate source model activations.\"\"\"\n\n BATCH = auto()\n \"\"\"Batch of items that the SAE is being trained on.\"\"\"\n\n STORE_BATCH = auto()\n \"\"\"Batch of items to be written to the store.\"\"\"\n\n ITEMS = auto()\n \"\"\"Arbitrary number of items.\"\"\"\n\n # Features\n INPUT_OUTPUT_FEATURE = auto()\n \"\"\"Input or output feature (e.g. feature in activation vector from source model).\"\"\"\n\n LEARNT_FEATURE = auto()\n \"\"\"Learn feature (e.g. feature in learnt activation vector).\"\"\"\n\n DEAD_FEATURE = auto()\n \"\"\"Dead feature.\"\"\"\n\n ALIVE_FEATURE = auto()\n \"\"\"Alive feature.\"\"\"\n\n # Feature indices\n INPUT_OUTPUT_FEATURE_IDX = auto()\n \"\"\"Input or output feature index.\"\"\"\n\n LEARNT_FEATURE_IDX = auto()\n \"\"\"Learn feature index.\"\"\"\n\n # Other\n POSITION = auto()\n \"\"\"Token position.\"\"\"\n\n SINGLE_ITEM = \"\"\n \"\"\"Single item axis.\"\"\"\n\n ANY = \"...\"\n \"\"\"Any number of axis.\"\"\"\n\n @staticmethod\n def names(*axis: \"Axis\") -> str:\n \"\"\"Join multiple axis together, to represent the dimensions of a tensor.\n\n Example:\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n\n Args:\n *axis: Axis to join.\n\n Returns:\n Joined axis string.\n \"\"\"\n return \" \".join(a.value for a in axis)"
},
{
"identifier": "shape_with_optional_dimensions",
"path": "sparse_autoencoder/utils/tensor_shape.py",
"snippet": "def shape_with_optional_dimensions(*shape: int | None) -> tuple[int, ...]:\n \"\"\"Create a shape from a tuple of optional dimensions.\n\n Motivation:\n By default PyTorch tensor shapes will error if you set an axis to `None`. This allows\n you to set that size and then the resulting output simply removes that axis.\n\n Examples:\n >>> shape_with_optional_dimensions(1, 2, 3)\n (1, 2, 3)\n\n >>> shape_with_optional_dimensions(1, None, 3)\n (1, 3)\n\n >>> shape_with_optional_dimensions(1, None, None)\n (1,)\n\n >>> shape_with_optional_dimensions(None, None, None)\n ()\n\n Args:\n *shape: Axis sizes, with `None` representing an optional axis.\n\n Returns:\n Axis sizes.\n \"\"\"\n return tuple(dimension for dimension in shape if dimension is not None)"
}
] | from pathlib import Path
from tempfile import gettempdir
from typing import NamedTuple
from huggingface_hub import HfApi, hf_hub_download
from jaxtyping import Float
from pydantic import (
BaseModel,
DirectoryPath,
NonNegativeInt,
PositiveInt,
validate_call,
)
from torch import Tensor
from torch.nn import Module, Parameter
from torch.serialization import FILE_LIKE
from sparse_autoencoder.autoencoder.components.linear_encoder import LinearEncoder
from sparse_autoencoder.autoencoder.components.tied_bias import TiedBias, TiedBiasPosition
from sparse_autoencoder.autoencoder.components.unit_norm_decoder import UnitNormDecoder
from sparse_autoencoder.autoencoder.types import ResetOptimizerParameterDetails
from sparse_autoencoder.tensor_types import Axis
from sparse_autoencoder.utils.tensor_shape import shape_with_optional_dimensions
import torch
import wandb | 7,329 | """The Sparse Autoencoder Model."""
class SparseAutoencoderConfig(BaseModel, frozen=True):
"""SAE model config."""
n_input_features: PositiveInt
"""Number of input features.
E.g. `d_mlp` if training on MLP activations from TransformerLens).
"""
n_learned_features: PositiveInt
"""Number of learned features.
The initial paper experimented with 1 to 256 times the number of input features, and primarily
used a multiple of 8."""
n_components: PositiveInt | None = None
"""Number of source model components the SAE is trained on.""
This is useful if you want to train the SAE on several components of the source model at once.
If `None`, the SAE is assumed to be trained on just one component (in this case the model won't
contain a component axis in any of the parameters).
"""
class SparseAutoencoderState(BaseModel, arbitrary_types_allowed=True):
"""SAE model state.
Used for saving and loading the model.
"""
config: SparseAutoencoderConfig
"""Model config."""
state_dict: dict[str, Tensor]
"""Model state dict."""
class ForwardPassResult(NamedTuple):
"""SAE model forward pass result."""
learned_activations: Float[
Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)
]
decoded_activations: Float[
Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
]
DEFAULT_TMP_DIR = Path(gettempdir()) / "sparse_autoencoder"
class SparseAutoencoder(Module):
"""Sparse Autoencoder Model."""
config: SparseAutoencoderConfig
"""Model config."""
geometric_median_dataset: Float[
Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
]
"""Estimated Geometric Median of the Dataset.
Used for initialising :attr:`tied_bias`.
"""
tied_bias: Float[
Parameter, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
]
"""Tied Bias Parameter.
The same bias is used pre-encoder and post-decoder.
"""
pre_encoder_bias: TiedBias
"""Pre-Encoder Bias."""
encoder: LinearEncoder
"""Encoder."""
decoder: UnitNormDecoder
"""Decoder."""
post_decoder_bias: TiedBias
"""Post-Decoder Bias."""
def __init__(
self,
config: SparseAutoencoderConfig,
geometric_median_dataset: Float[
Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
]
| None = None,
) -> None:
"""Initialize the Sparse Autoencoder Model.
Args:
config: Model config.
geometric_median_dataset: Estimated geometric median of the dataset.
"""
super().__init__()
self.config = config
# Store the geometric median of the dataset (so that we can reset parameters). This is not a
# parameter itself (the tied bias parameter is used for that), so gradients are disabled.
| """The Sparse Autoencoder Model."""
class SparseAutoencoderConfig(BaseModel, frozen=True):
"""SAE model config."""
n_input_features: PositiveInt
"""Number of input features.
E.g. `d_mlp` if training on MLP activations from TransformerLens).
"""
n_learned_features: PositiveInt
"""Number of learned features.
The initial paper experimented with 1 to 256 times the number of input features, and primarily
used a multiple of 8."""
n_components: PositiveInt | None = None
"""Number of source model components the SAE is trained on.""
This is useful if you want to train the SAE on several components of the source model at once.
If `None`, the SAE is assumed to be trained on just one component (in this case the model won't
contain a component axis in any of the parameters).
"""
class SparseAutoencoderState(BaseModel, arbitrary_types_allowed=True):
"""SAE model state.
Used for saving and loading the model.
"""
config: SparseAutoencoderConfig
"""Model config."""
state_dict: dict[str, Tensor]
"""Model state dict."""
class ForwardPassResult(NamedTuple):
"""SAE model forward pass result."""
learned_activations: Float[
Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)
]
decoded_activations: Float[
Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
]
DEFAULT_TMP_DIR = Path(gettempdir()) / "sparse_autoencoder"
class SparseAutoencoder(Module):
"""Sparse Autoencoder Model."""
config: SparseAutoencoderConfig
"""Model config."""
geometric_median_dataset: Float[
Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
]
"""Estimated Geometric Median of the Dataset.
Used for initialising :attr:`tied_bias`.
"""
tied_bias: Float[
Parameter, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
]
"""Tied Bias Parameter.
The same bias is used pre-encoder and post-decoder.
"""
pre_encoder_bias: TiedBias
"""Pre-Encoder Bias."""
encoder: LinearEncoder
"""Encoder."""
decoder: UnitNormDecoder
"""Decoder."""
post_decoder_bias: TiedBias
"""Post-Decoder Bias."""
def __init__(
self,
config: SparseAutoencoderConfig,
geometric_median_dataset: Float[
Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
]
| None = None,
) -> None:
"""Initialize the Sparse Autoencoder Model.
Args:
config: Model config.
geometric_median_dataset: Estimated geometric median of the dataset.
"""
super().__init__()
self.config = config
# Store the geometric median of the dataset (so that we can reset parameters). This is not a
# parameter itself (the tied bias parameter is used for that), so gradients are disabled. | tied_bias_shape = shape_with_optional_dimensions( | 6 | 2023-10-27 07:37:15+00:00 | 12k |
LeapLabTHU/FamO2O | jax_cql/JaxCQL/conservative_sac_main.py | [
{
"identifier": "ConservativeSAC",
"path": "jax_cql/JaxCQL/conservative_sac.py",
"snippet": "class ConservativeSAC(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.discount = 0.99\n config.alpha_multiplier = 1.0\n config.use_automatic_entropy_tuning = True\n config.backup_entropy = False\n config.target_entropy = 0.0\n config.policy_lr = 3e-4\n config.qf_lr = 3e-4\n config.optimizer_type = 'adam'\n config.soft_target_update_rate = 5e-3\n config.use_cql = True\n config.cql_n_actions = 10\n config.cql_importance_sample = True\n config.cql_lagrange = False\n config.cql_target_action_gap = 1.0\n config.cql_temp = 1.0\n config.cql_min_q_weight = 5.0\n config.cql_max_target_backup = False\n config.cql_clip_diff_min = -np.inf\n config.cql_clip_diff_max = np.inf\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, policy, qf):\n self.config = self.get_default_config(config)\n self.policy = policy\n self.qf = qf\n self.observation_dim = policy.observation_dim\n self.action_dim = policy.action_dim\n\n self._train_states = {}\n\n optimizer_class = {\n 'adam': optax.adam,\n 'sgd': optax.sgd,\n }[self.config.optimizer_type]\n\n policy_params = self.policy.init(\n next_rng(self.policy.rng_keys()),\n jnp.zeros((10, self.observation_dim))\n )\n self._train_states['policy'] = TrainState.create(\n params=policy_params,\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n\n qf1_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf1'] = TrainState.create(\n params=qf1_params,\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None,\n )\n qf2_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf2'] = TrainState.create(\n params=qf2_params,\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None,\n )\n self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})\n\n model_keys = ['policy', 'qf1', 'qf2']\n\n if self.config.use_automatic_entropy_tuning:\n self.log_alpha = Scalar(0.0)\n self._train_states['log_alpha'] = TrainState.create(\n params=self.log_alpha.init(next_rng()),\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha')\n\n if self.config.cql_lagrange:\n self.log_alpha_prime = Scalar(1.0)\n self._train_states['log_alpha_prime'] = TrainState.create(\n params=self.log_alpha_prime.init(next_rng()),\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha_prime')\n\n self._model_keys = tuple(model_keys)\n self._total_steps = 0\n\n def train(self, batch, use_cql, bc=False):\n self._total_steps += 1\n self._train_states, self._target_qf_params, metrics = self._train_step(\n self._train_states, self._target_qf_params, next_rng(), batch, use_cql, bc\n )\n return metrics\n\n @partial(jax.jit, static_argnames=('self', 'use_cql', 'bc'))\n def _train_step(self, train_states, target_qf_params, rng, batch, use_cql, bc=False):\n rng_generator = JaxRNG(rng)\n\n def loss_fn(train_params):\n observations = batch['observations']\n actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n\n loss_collection = {}\n\n @wrap_function_with_rng(rng_generator())\n def forward_policy(rng, *args, **kwargs):\n return self.policy.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n @wrap_function_with_rng(rng_generator())\n def forward_qf(rng, *args, **kwargs):\n return self.qf.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.qf.rng_keys())\n )\n\n new_actions, log_pi = forward_policy(train_params['policy'], observations)\n\n if self.config.use_automatic_entropy_tuning:\n alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (\n log_pi + self.config.target_entropy).mean()\n loss_collection['log_alpha'] = alpha_loss\n alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier\n else:\n alpha_loss = 0.0\n alpha = self.config.alpha_multiplier\n\n \"\"\" Policy loss \"\"\"\n if bc:\n log_probs = forward_policy(train_params['policy'], observations, actions, method=self.policy.log_prob)\n policy_loss = (alpha * log_pi - log_probs).mean()\n else:\n q_new_actions = jnp.minimum(\n forward_qf(train_params['qf1'], observations, new_actions),\n forward_qf(train_params['qf2'], observations, new_actions),\n )\n policy_loss = (alpha * log_pi - q_new_actions).mean()\n\n loss_collection['policy'] = policy_loss\n\n \"\"\" Q function loss \"\"\"\n q1_pred = forward_qf(train_params['qf1'], observations, actions)\n q2_pred = forward_qf(train_params['qf2'], observations, actions)\n\n if self.config.cql_max_target_backup:\n new_next_actions, next_log_pi = forward_policy(\n train_params['policy'], next_observations, repeat=self.config.cql_n_actions\n )\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n max_target_indices = jnp.expand_dims(jnp.argmax(target_q_values, axis=-1), axis=-1)\n target_q_values = jnp.take_along_axis(target_q_values, max_target_indices, axis=-1).squeeze(-1)\n next_log_pi = jnp.take_along_axis(next_log_pi, max_target_indices, axis=-1).squeeze(-1)\n else:\n new_next_actions, next_log_pi = forward_policy(\n train_params['policy'], next_observations\n )\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n\n if self.config.backup_entropy:\n target_q_values = target_q_values - alpha * next_log_pi\n\n td_target = jax.lax.stop_gradient(\n rewards + (1. - dones) * self.config.discount * target_q_values\n )\n qf1_loss = mse_loss(q1_pred, td_target)\n qf2_loss = mse_loss(q2_pred, td_target)\n\n ### CQL\n if self.config.use_cql and use_cql:\n batch_size = actions.shape[0]\n cql_random_actions = jax.random.uniform(\n rng_generator(), shape=(batch_size, self.config.cql_n_actions, self.action_dim),\n minval=-1.0, maxval=1.0\n )\n\n cql_current_actions, cql_current_log_pis = forward_policy(\n train_params['policy'], observations, repeat=self.config.cql_n_actions,\n )\n cql_next_actions, cql_next_log_pis = forward_policy(\n train_params['policy'], next_observations, repeat=self.config.cql_n_actions,\n )\n\n cql_q1_rand = forward_qf(train_params['qf1'], observations, cql_random_actions)\n cql_q2_rand = forward_qf(train_params['qf2'], observations, cql_random_actions)\n cql_q1_current_actions = forward_qf(train_params['qf1'], observations, cql_current_actions)\n cql_q2_current_actions = forward_qf(train_params['qf2'], observations, cql_current_actions)\n cql_q1_next_actions = forward_qf(train_params['qf1'], observations, cql_next_actions)\n cql_q2_next_actions = forward_qf(train_params['qf2'], observations, cql_next_actions)\n\n cql_cat_q1 = jnp.concatenate(\n [cql_q1_rand, jnp.expand_dims(q1_pred, 1), cql_q1_next_actions, cql_q1_current_actions], axis=1\n )\n cql_cat_q2 = jnp.concatenate(\n [cql_q2_rand, jnp.expand_dims(q2_pred, 1), cql_q2_next_actions, cql_q2_current_actions], axis=1\n )\n cql_std_q1 = jnp.std(cql_cat_q1, axis=1)\n cql_std_q2 = jnp.std(cql_cat_q2, axis=1)\n\n if self.config.cql_importance_sample:\n random_density = np.log(0.5 ** self.action_dim)\n cql_cat_q1 = jnp.concatenate(\n [cql_q1_rand - random_density,\n cql_q1_next_actions - cql_next_log_pis,\n cql_q1_current_actions - cql_current_log_pis],\n axis=1\n )\n cql_cat_q2 = jnp.concatenate(\n [cql_q2_rand - random_density,\n cql_q2_next_actions - cql_next_log_pis,\n cql_q2_current_actions - cql_current_log_pis],\n axis=1\n )\n\n cql_qf1_ood = (\n jax.scipy.special.logsumexp(cql_cat_q1 / self.config.cql_temp, axis=1)\n * self.config.cql_temp\n )\n cql_qf2_ood = (\n jax.scipy.special.logsumexp(cql_cat_q2 / self.config.cql_temp, axis=1)\n * self.config.cql_temp\n )\n\n \"\"\"Subtract the log likelihood of data\"\"\"\n cql_qf1_diff = jnp.clip(\n cql_qf1_ood - q1_pred,\n self.config.cql_clip_diff_min,\n self.config.cql_clip_diff_max,\n ).mean()\n cql_qf2_diff = jnp.clip(\n cql_qf2_ood - q2_pred,\n self.config.cql_clip_diff_min,\n self.config.cql_clip_diff_max,\n ).mean()\n\n if self.config.cql_lagrange:\n alpha_prime = jnp.clip(\n jnp.exp(self.log_alpha_prime.apply(train_params['log_alpha_prime'])),\n a_min=0.0, a_max=1000000.0\n )\n cql_min_qf1_loss = alpha_prime * self.config.cql_min_q_weight * (\n cql_qf1_diff - self.config.cql_target_action_gap)\n cql_min_qf2_loss = alpha_prime * self.config.cql_min_q_weight * (\n cql_qf2_diff - self.config.cql_target_action_gap)\n\n alpha_prime_loss = (-cql_min_qf1_loss - cql_min_qf2_loss) * 0.5\n\n loss_collection['log_alpha_prime'] = alpha_prime_loss\n\n else:\n cql_min_qf1_loss = cql_qf1_diff * self.config.cql_min_q_weight\n cql_min_qf2_loss = cql_qf2_diff * self.config.cql_min_q_weight\n alpha_prime_loss = 0.0\n alpha_prime = 0.0\n\n qf1_loss = qf1_loss + cql_min_qf1_loss\n qf2_loss = qf2_loss + cql_min_qf2_loss\n\n loss_collection['qf1'] = qf1_loss\n loss_collection['qf2'] = qf2_loss\n\n loss_collection\n return tuple(loss_collection[key] for key in self.model_keys), locals()\n\n train_params = {key: train_states[key].params for key in self.model_keys}\n (_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params)\n\n new_train_states = {\n key: train_states[key].apply_gradients(grads=grads[i][key])\n for i, key in enumerate(self.model_keys)\n }\n new_target_qf_params = {}\n new_target_qf_params['qf1'] = update_target_network(\n new_train_states['qf1'].params, target_qf_params['qf1'],\n self.config.soft_target_update_rate\n )\n new_target_qf_params['qf2'] = update_target_network(\n new_train_states['qf2'].params, target_qf_params['qf2'],\n self.config.soft_target_update_rate\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['log_pi', 'policy_loss', 'qf1_loss', 'qf2_loss', 'alpha_loss',\n 'alpha', 'q1_pred', 'q2_pred', 'target_q_values']\n )\n\n if self.config.use_cql:\n metrics.update(collect_jax_metrics(\n aux_values,\n ['cql_std_q1', 'cql_std_q2', 'cql_q1_rand', 'cql_q2_rand'\n 'cql_qf1_diff', 'cql_qf2_diff', 'cql_min_qf1_loss',\n 'cql_min_qf2_loss', 'cql_q1_current_actions', 'cql_q2_current_actions'\n 'cql_q1_next_actions', 'cql_q2_next_actions',\n 'alpha_prime',\n 'alpha_prime_loss'],\n 'cql'\n ))\n\n return new_train_states, new_target_qf_params, metrics\n\n @property\n def model_keys(self):\n return self._model_keys\n\n @property\n def train_states(self):\n return self._train_states\n\n @property\n def train_params(self):\n return {key: self.train_states[key].params for key in self.model_keys}\n\n @property\n def total_steps(self):\n return self._total_steps"
},
{
"identifier": "get_d4rl_dataset",
"path": "jax_cql/JaxCQL/replay_buffer.py",
"snippet": "def get_d4rl_dataset(env):\n dataset = d4rl.qlearning_dataset(env)\n return dict(\n observations=dataset['observations'],\n actions=dataset['actions'],\n next_observations=dataset['next_observations'],\n rewards=dataset['rewards'],\n dones=dataset['terminals'].astype(np.float32),\n )"
},
{
"identifier": "subsample_batch",
"path": "jax_cql/JaxCQL/replay_buffer.py",
"snippet": "def subsample_batch(batch, size):\n indices = np.random.randint(batch['observations'].shape[0], size=size)\n return index_batch(batch, indices)"
},
{
"identifier": "batch_to_jax",
"path": "jax_cql/JaxCQL/jax_utils.py",
"snippet": "@jax.jit\ndef batch_to_jax(batch):\n return jax.tree_util.tree_map(jax.device_put, batch)"
},
{
"identifier": "TanhGaussianPolicy",
"path": "jax_cql/JaxCQL/model.py",
"snippet": "class TanhGaussianPolicy(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n log_std_multiplier: float = 1.0\n log_std_offset: float = -1.0\n\n def setup(self):\n self.base_network = FullyConnectedNetwork(\n output_dim=2 * self.action_dim, arch=self.arch, orthogonal_init=self.orthogonal_init\n )\n self.log_std_multiplier_module = Scalar(self.log_std_multiplier)\n self.log_std_offset_module = Scalar(self.log_std_offset)\n\n def log_prob(self, observations, actions):\n if actions.ndim == 3:\n observations = extend_and_repeat(observations, 1, actions.shape[1])\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.Transformed(\n distrax.MultivariateNormalDiag(mean, jnp.exp(log_std)),\n distrax.Block(distrax.Tanh(), ndims=1)\n )\n return action_distribution.log_prob(actions)\n\n def __call__(self, observations, deterministic=False, repeat=None):\n if repeat is not None:\n observations = extend_and_repeat(observations, 1, repeat)\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.Transformed(\n distrax.MultivariateNormalDiag(mean, jnp.exp(log_std)),\n distrax.Block(distrax.Tanh(), ndims=1)\n )\n if deterministic:\n samples = jnp.tanh(mean)\n log_prob = action_distribution.log_prob(samples)\n else:\n samples, log_prob = action_distribution.sample_and_log_prob(seed=self.make_rng('noise'))\n\n return samples, log_prob\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', 'noise')"
},
{
"identifier": "FullyConnectedQFunction",
"path": "jax_cql/JaxCQL/model.py",
"snippet": "class FullyConnectedQFunction(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n\n @nn.compact\n @multiple_action_q_function\n def __call__(self, observations, actions):\n x = jnp.concatenate([observations, actions], axis=-1)\n x = FullyConnectedNetwork(output_dim=1, arch=self.arch, orthogonal_init=self.orthogonal_init)(x)\n return jnp.squeeze(x, -1)\n\n @nn.nowrap\n def rng_keys(self):\n return ('params',)"
},
{
"identifier": "SamplerPolicy",
"path": "jax_cql/JaxCQL/model.py",
"snippet": "class SamplerPolicy(object):\n\n def __init__(self, policy, params):\n self.policy = policy\n self.params = params\n\n def update_params(self, params):\n self.params = params\n return self\n\n @partial(jax.jit, static_argnames=('self', 'deterministic'))\n def act(self, params, rng, observations, deterministic):\n return self.policy.apply(params, observations, deterministic, repeat=None,\n rngs=JaxRNG(rng)(self.policy.rng_keys()), )\n\n def __call__(self, observations, deterministic=False):\n actions, _ = self.act(self.params, next_rng(), observations, deterministic=deterministic)\n assert jnp.all(jnp.isfinite(actions))\n return jax.device_get(actions)"
},
{
"identifier": "StepSampler",
"path": "jax_cql/JaxCQL/sampler.py",
"snippet": "class StepSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n def sample(self, policy, n_steps, deterministic=False, replay_buffer=None):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n for _ in range(n_steps):\n self._traj_steps += 1\n observation = self._current_observation\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n self._current_observation = next_observation\n\n if done or self._traj_steps >= self.max_traj_length:\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n return dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n )\n\n @property\n def env(self):\n return self._env"
},
{
"identifier": "TrajSampler",
"path": "jax_cql/JaxCQL/sampler.py",
"snippet": "class TrajSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n\n def sample(self, policy, n_trajs, deterministic=False, replay_buffer=None):\n trajs = []\n for _ in range(n_trajs):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n observation = self.env.reset()\n\n for _ in range(self.max_traj_length):\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n observation = next_observation\n\n if done:\n break\n\n trajs.append(dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n ))\n\n return trajs\n\n @property\n def env(self):\n return self._env"
},
{
"identifier": "Timer",
"path": "jax_cql/JaxCQL/utils.py",
"snippet": "class Timer(object):\n\n def __init__(self):\n self._time = None\n\n def __enter__(self):\n self._start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._time = time.time() - self._start_time\n\n def __call__(self):\n return self._time"
},
{
"identifier": "define_flags_with_default",
"path": "jax_cql/JaxCQL/utils.py",
"snippet": "def define_flags_with_default(**kwargs):\n for key, val in kwargs.items():\n if isinstance(val, ConfigDict):\n config_flags.DEFINE_config_dict(key, val)\n elif isinstance(val, bool):\n # Note that True and False are instances of int.\n absl.flags.DEFINE_bool(key, val, 'automatically defined flag')\n elif isinstance(val, int):\n absl.flags.DEFINE_integer(key, val, 'automatically defined flag')\n elif isinstance(val, float):\n absl.flags.DEFINE_float(key, val, 'automatically defined flag')\n elif isinstance(val, str):\n absl.flags.DEFINE_string(key, val, 'automatically defined flag')\n else:\n raise ValueError('Incorrect value type')\n return kwargs"
},
{
"identifier": "set_random_seed",
"path": "jax_cql/JaxCQL/utils.py",
"snippet": "def set_random_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n init_rng(seed)"
},
{
"identifier": "print_flags",
"path": "jax_cql/JaxCQL/utils.py",
"snippet": "def print_flags(flags, flags_def):\n logging.info(\n 'Running training with hyperparameters: \\n{}'.format(\n pprint.pformat(\n ['{}: {}'.format(key, val) for key, val in get_user_flags(flags, flags_def).items()]\n )\n )\n )"
},
{
"identifier": "get_user_flags",
"path": "jax_cql/JaxCQL/utils.py",
"snippet": "def get_user_flags(flags, flags_def):\n output = {}\n for key in flags_def:\n val = getattr(flags, key)\n if isinstance(val, ConfigDict):\n output.update(flatten_config_dict(val, prefix=key))\n else:\n output[key] = val\n\n return output"
},
{
"identifier": "prefix_metrics",
"path": "jax_cql/JaxCQL/utils.py",
"snippet": "def prefix_metrics(metrics, prefix):\n return {\n '{}/{}'.format(prefix, key): value for key, value in metrics.items()\n }"
},
{
"identifier": "WandBLogger",
"path": "jax_cql/JaxCQL/utils.py",
"snippet": "class WandBLogger(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.online = False\n config.prefix = 'FamilyJaxCQL'\n config.project = 'sac'\n config.output_dir = '/tmp/FamilyJaxCQL'\n config.random_delay = 0.0\n config.experiment_id = config_dict.placeholder(str)\n config.anonymous = config_dict.placeholder(str)\n config.notes = config_dict.placeholder(str)\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, variant):\n self.config = self.get_default_config(config)\n\n if self.config.experiment_id is None:\n self.config.experiment_id = uuid.uuid4().hex\n\n if self.config.prefix != '':\n self.config.project = '{}--{}'.format(self.config.prefix, self.config.project)\n\n if self.config.output_dir == '':\n self.config.output_dir = tempfile.mkdtemp()\n else:\n self.config.output_dir = os.path.join(self.config.output_dir, self.config.experiment_id)\n os.makedirs(self.config.output_dir, exist_ok=True)\n\n self._variant = copy(variant)\n\n if 'hostname' not in self._variant:\n self._variant['hostname'] = gethostname()\n\n if self.config.random_delay > 0:\n time.sleep(np.random.uniform(0, self.config.random_delay))\n\n self.run = wandb.init(\n reinit=True,\n config=self._variant,\n project=self.config.project,\n dir=self.config.output_dir,\n id=self.config.experiment_id,\n anonymous=self.config.anonymous,\n notes=self.config.notes,\n settings=wandb.Settings(\n start_method=\"thread\",\n _disable_stats=True,\n ),\n mode='online' if self.config.online else 'offline',\n )\n\n def log(self, *args, **kwargs):\n self.run.log(*args, **kwargs)\n\n def save_pickle(self, obj, filename):\n with open(os.path.join(self.config.output_dir, filename), 'wb') as fout:\n pickle.dump(obj, fout)\n\n @property\n def experiment_id(self):\n return self.config.experiment_id\n\n @property\n def variant(self):\n return self.config.variant\n\n @property\n def output_dir(self):\n return self.config.output_dir"
}
] | import os
import time
import uuid
import numpy as np
import pprint
import jax
import jax.numpy as jnp
import flax
import gym
import d4rl
import absl.app
import absl.flags
from copy import deepcopy
from .conservative_sac import ConservativeSAC
from .replay_buffer import get_d4rl_dataset, subsample_batch
from .jax_utils import batch_to_jax
from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy
from .sampler import StepSampler, TrajSampler
from .utils import (
Timer, define_flags_with_default, set_random_seed, print_flags,
get_user_flags, prefix_metrics, WandBLogger
)
from viskit.logging import logger, setup_logger | 7,476 |
FLAGS_DEF = define_flags_with_default(
env='halfcheetah-medium-v2',
max_traj_length=1000,
seed=42,
save_model=False,
batch_size=256,
reward_scale=1.0,
reward_bias=0.0,
clip_action=0.999,
policy_arch='256-256',
qf_arch='256-256',
orthogonal_init=False,
policy_log_std_multiplier=1.0,
policy_log_std_offset=-1.0,
n_epochs=2000,
bc_epochs=0,
n_train_step_per_epoch=1000,
eval_period=10,
eval_n_trajs=5,
cql=ConservativeSAC.get_default_config(),
logging=WandBLogger.get_default_config(),
)
def main(argv):
FLAGS = absl.flags.FLAGS
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
setup_logger(
variant=variant,
exp_id=wandb_logger.experiment_id,
seed=FLAGS.seed,
base_log_dir=FLAGS.logging.output_dir,
include_exp_prefix_sub_dir=False
)
set_random_seed(FLAGS.seed)
eval_sampler = TrajSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
observation_dim = eval_sampler.env.observation_space.shape[0]
action_dim = eval_sampler.env.action_space.shape[0]
policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset
)
qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init)
if FLAGS.cql.target_entropy >= 0.0:
FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item()
sac = ConservativeSAC(FLAGS.cql, policy, qf)
sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy'])
viskit_metrics = {}
for epoch in range(FLAGS.n_epochs):
metrics = {'epoch': epoch}
with Timer() as train_timer:
for batch_idx in range(FLAGS.n_train_step_per_epoch):
|
FLAGS_DEF = define_flags_with_default(
env='halfcheetah-medium-v2',
max_traj_length=1000,
seed=42,
save_model=False,
batch_size=256,
reward_scale=1.0,
reward_bias=0.0,
clip_action=0.999,
policy_arch='256-256',
qf_arch='256-256',
orthogonal_init=False,
policy_log_std_multiplier=1.0,
policy_log_std_offset=-1.0,
n_epochs=2000,
bc_epochs=0,
n_train_step_per_epoch=1000,
eval_period=10,
eval_n_trajs=5,
cql=ConservativeSAC.get_default_config(),
logging=WandBLogger.get_default_config(),
)
def main(argv):
FLAGS = absl.flags.FLAGS
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
setup_logger(
variant=variant,
exp_id=wandb_logger.experiment_id,
seed=FLAGS.seed,
base_log_dir=FLAGS.logging.output_dir,
include_exp_prefix_sub_dir=False
)
set_random_seed(FLAGS.seed)
eval_sampler = TrajSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
observation_dim = eval_sampler.env.observation_space.shape[0]
action_dim = eval_sampler.env.action_space.shape[0]
policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset
)
qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init)
if FLAGS.cql.target_entropy >= 0.0:
FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item()
sac = ConservativeSAC(FLAGS.cql, policy, qf)
sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy'])
viskit_metrics = {}
for epoch in range(FLAGS.n_epochs):
metrics = {'epoch': epoch}
with Timer() as train_timer:
for batch_idx in range(FLAGS.n_train_step_per_epoch): | batch = batch_to_jax(subsample_batch(dataset, FLAGS.batch_size)) | 3 | 2023-10-25 11:53:25+00:00 | 12k |
Eanya-Tonic/MihiroToolbox | MihiroToolBox.py | [
{
"identifier": "VideoInterface",
"path": "VideoInterface.py",
"snippet": "class VideoInterface(QWidget, Ui_Video):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n\n # 编码选项\n self.EncoderType.addItem('x264')\n self.EncoderType.addItem('x265')\n\n self.DepthChoice.addItem('压制音频')\n self.DepthChoice.addItem('无音频')\n\n # 编码参数\n self.KbpsLabel.setVisible(0)\n self.ParmsNum.setValue(24)\n self.ButtonCRF.clicked.connect(partial(self.EncodeParms, 0))\n self.ButtonVBR.clicked.connect(partial(self.EncodeParms, 1))\n self.Button2pass.clicked.connect(partial(self.EncodeParms, 1))\n\n # 文件选项\n self.InputButton.clicked.connect(\n partial(self.FileSelect, self.InputLine))\n self.Outputbutton.clicked.connect(\n partial(self.FileSelect, self.OutputLine))\n self.Outputbutton_2.clicked.connect(\n partial(self.FileSelect, self.TextLine))\n \n # 文件自动填充\n self.InputLine.textChanged.connect(\n partial(self.AutoFill, self.InputLine, self.OutputLine))\n \n\n # 分辨率选项\n self.WidthNum.setDisabled(1)\n self.HeightNum.setDisabled(1)\n self.IfEnableSwitch.checkedChanged.connect(self.ResolutionChange)\n\n # 开始压制\n self.StartButton.clicked.connect(self.ProcessFunc)\n self.StartButton.setWindowIconText(\"\")\n self.StartButton.windowIconTextChanged.connect(self.ProcessComplte)\n self.ProgressBar.setVisible(0)\n\n # 硬件加速\n self.HardAccler.addItem(\"软解\")\n self.HardAccler.addItem(\"Nvidia\")\n self.HardAccler.addItem(\"AMD\")\n self.HardAccler.addItem(\"Intel\")\n # CRF不支持硬件编码\n self.HardAccler.setDisabled(1)\n\n # 文件选择函数\n '''\n 输入: 选择文件的目标LineEdit\n 输出: 无输出\n 描述: 选择文件函数, 与界面上的浏览按钮绑定, 用于把资源管理器读取的地址传回输入框\n '''\n def FileSelect(self, TargetLine):\n dir = QFileDialog()\n dir.setDirectory(os.getcwd())\n if dir.exec_(): # 判断是否选择了文件\n FilePath = dir.selectedFiles()\n TargetLine.setText(FilePath[0])\n \n # 自动填充函数\n '''\n 输入: 选择文件的源LineEdit, 自动同步的目标LineEdit\n 输出: 无输出\n 描述: 根据输入框内容自动填充输出框\n '''\n def AutoFill(self, SourceLine, TargetLine):\n FilePath = SourceLine.text()\n if FilePath == \"\":\n return\n FileExt = os.path.splitext(FilePath)[1]\n FilePath = os.path.splitext(FilePath)[0]\n NewFilePath = FilePath + '_output.mp4'\n TargetLine.setText(NewFilePath)\n\n # 自定义分辨率控制\n def ResolutionChange(self):\n if (self.IfEnableSwitch.checked):\n self.WidthNum.setDisabled(0)\n self.HeightNum.setDisabled(0)\n else:\n self.WidthNum.setDisabled(1)\n self.HeightNum.setDisabled(1)\n\n # 编码参数控制\n '''\n 输入: 编码模式选择, 0是CRF模式, 1是VBR模式\n 输出: 无输出\n '''\n def EncodeParms(self, choice):\n # 0是CRF模式\n if(choice == 0):\n self.KbpsLabel.setVisible(0)\n self.ParmsSetTitle.setText(\"CRF\")\n # CRF默认24,小数两位\n self.ParmsNum.setValue(24)\n self.ParmsNum.setDecimals(2)\n # CRF不支持硬件编码\n self.HardAccler.setCurrentIndex(0)\n self.HardAccler.setDisabled(1)\n # 1是VBR模式\n elif(choice == 1):\n self.KbpsLabel.setVisible(1)\n self.ParmsSetTitle.setText(\"目标比特率\")\n # VBR默认5000,没有小数\n self.ParmsNum.setValue(5000)\n self.ParmsNum.setDecimals(0)\n # VBR支持硬件编码\n self.HardAccler.setDisabled(0)\n\n # 压制控制\n def ProcessFunc(self):\n # 地址缺失\n if(self.InputLine.text() == '' or self.OutputLine.text() == ''):\n InfoBar.error(\n title='未定义视频地址',\n content=\"请确认你是否已经设定了正确的输入输出视频地址\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n return\n\n ProcessCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.InputLine.text() + \\\n \"\\\" -y \"\n ProcessCmd0 = \"\"\n \n # 设置音频\n if(self.DepthChoice.text() == \"压制音频\"):\n ProcessCmd += \"\"\n else:\n ProcessCmd += \"-an \"\n\n # 硬件加速编码器\n if(self.EncoderType.text() == 'x264'):\n if(self.HardAccler.text() == \"软解\"):\n ProcessCmd += \"-vcodec libx264 \"\n elif(self.HardAccler.text() == \"Nvidia\"):\n ProcessCmd += \"-vcodec h264_nvenc \"\n elif(self.HardAccler.text() == \"AMD\"):\n ProcessCmd += \"-vcodec h264_amf \"\n elif(self.HardAccler.text() == \"Intel\"):\n ProcessCmd += \"-vcodec h264_qsv \"\n else:\n if(self.HardAccler.text() == \"软解\"):\n ProcessCmd += \"-vcodec libx265 \"\n elif(self.HardAccler.text() == \"Nvidia\"):\n ProcessCmd += \"-vcodec hevc_nvenc \"\n elif(self.HardAccler.text() == \"AMD\"):\n ProcessCmd += \"-vcodec hevc_amf \"\n elif(self.HardAccler.text() == \"Intel\"):\n ProcessCmd += \"-vcodec hevc_qsv \"\n\n # 自定义分辨率\n if(self.IfEnableSwitch.checked):\n ProcessCmd += \"-s \" + \\\n str(self.WidthNum.value()) + \"x\" + \\\n str(self.HeightNum.value()) + \" \"\n\n # 按帧数截取视频\n if(self.TotalFrameNum.value() != 0):\n ProcessCmd += \"-vf \\\"select=between(n\\\\,\" + str(\n self.StartFrameNum.value()) + \"\\\\,\"+str(self.TotalFrameNum.value())+\")\\\" \"\n\n # 切换CRF和VBR参数\n if(self.ButtonCRF.isChecked()):\n ProcessCmd += \"-crf \" + str(self.ParmsNum.value()) + \" \"\n elif(self.ButtonVBR.isChecked()):\n ProcessCmd += \"-b:v \" + str(self.ParmsNum.value()) + \"K \"\n else:\n ProcessCmd0 = ProcessCmd\n ProcessCmd0 += \"-b:v \" + \\\n str(self.ParmsNum.value()) + \"K -pass 1 -an -f rawvideo -y NUL\"\n\n ProcessCmd += \"-b:v \" + str(self.ParmsNum.value()) + \"K -pass 2 \"\n\n thread_01 = Thread(target=self.CmdThread,\n args=(ProcessCmd0, ProcessCmd))\n thread_01.start()\n\n # 多线程编码函数\n '''\n 输入: 2Pass使用的第一次Pass处理指令(为空则不适用), 处理指令\n 输出: 无输出\n '''\n def CmdThread(self, ProcessCmd0, ProcessCmd):\n\n self.ProgressBar.setVisible(1)\n self.StartButton.setText(\"正在压制...\")\n self.StartButton.setWindowIconText(\" \")\n self.StartButton.setDisabled(1)\n\n if(self.TextLine.text() != ''):\n ProcessCmd += \"-vf \\\"subtitles=\\'\" + \\\n self.TextLine.text().replace(\":\", \"\\:\") + \"\\'\\\"\"\n \n # 按帧数截取视频\n if(self.TotalFrameNum.value != 0):\n ProcessCmd += \",\\\"select=between(n\\\\,\" + str(\n self.StartFrameNum.value()) + \"\\\\,\"+str(self.TotalFrameNum.value())+\")\\\" \"\n\n ProcessCmd += \" \\\"\" + self.OutputLine.text() + \"\\\"\"\n \n if(ProcessCmd0 != \"\"):\n os.system(ProcessCmd0)\n os.system(ProcessCmd)\n\n self.ProgressBar.setVisible(0)\n self.StartButton.setText(\"开始压制\")\n self.StartButton.setDisabled(0)\n self.StartButton.setWindowIconText(\"\")\n \n if(self.AutoPowerOffButton.isChecked()):\n os.system('shutdown -s -t 2')\n\n # 压制完成提示\n def ProcessComplte(self):\n if(self.StartButton.text() == \"开始压制\"):\n InfoBar.success(\n title='任务执行完成',\n content=\"请确认是否压制成功\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )"
},
{
"identifier": "AudioInterface",
"path": "AudioInterface.py",
"snippet": "class AudioInterface(QWidget, Ui_Audio):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n\n # 音频编码选项\n self.EncoderChioce.addItem('ACC')\n self.EncoderChioce.addItem('TTA')\n self.EncoderChioce.addItem('WAV')\n self.EncoderChioce.addItem('ALAC')\n self.EncoderChioce.addItem('FLAC')\n self.EncoderChioce.addItem('AC3')\n self.EncoderChioce.addItem('MP3')\n self.EncoderChioce.currentTextChanged.connect(self.EncoderChange)\n \n # 码率\n self.BitrateNum.setValue(128)\n\n # 文件选项\n self.InputButton.clicked.connect(\n partial(self.FileSelect, self.InputLine))\n self.Outputbutton.clicked.connect(\n partial(self.FileSelect, self.OutputLine))\n \n # 自动填充\n self.InputLine.textChanged.connect(\n partial(self.AutoFill, self.InputLine, self.OutputLine))\n \n # 压制选项\n self.ProcessButton.clicked.connect(self.ProcessFunc)\n self.ProcessButton.setWindowIconText(\"\")\n self.ProcessButton.windowIconTextChanged.connect(self.ProcessComplte)\n self.ProgressBar.setVisible(0)\n \n\n # 文件选择函数\n '''\n 输入: 选择文件的目标LineEdit\n 输出: 无输出\n 描述: 选择文件函数, 与界面上的浏览按钮绑定, 用于把资源管理器读取的地址传回输入框\n '''\n def FileSelect(self, TargetLine):\n dir = QFileDialog()\n dir.setDirectory(os.getcwd())\n if dir.exec_(): # 判断是否选择了文件\n FilePath = dir.selectedFiles()\n TargetLine.setText(FilePath[0])\n \n # 自动填充函数\n '''\n 输入: 选择文件的源LineEdit, 自动同步的目标LineEdit\n 输出: 无输出\n 描述: 根据输入框内容自动填充输出框\n '''\n def AutoFill(self, SourceLine, TargetLine):\n FilePath = SourceLine.text()\n if FilePath == \"\":\n return\n FileExt = os.path.splitext(FilePath)[1]\n FilePath = os.path.splitext(FilePath)[0]\n NewFilePath = FilePath + '_output'\n\n # 根据编码器选择后缀\n if(self.EncoderChioce.text() == 'ACC'):\n NewFilePath = NewFilePath + '.m4a'\n elif(self.EncoderChioce.text() == 'TTA'):\n NewFilePath = NewFilePath + '.tta'\n elif(self.EncoderChioce.text() == 'WAV'):\n NewFilePath = NewFilePath + '.wav'\n elif(self.EncoderChioce.text() == 'ALAC'):\n NewFilePath = NewFilePath + '.m4a'\n elif(self.EncoderChioce.text() == 'FLAC'):\n NewFilePath = NewFilePath + '.flac'\n elif(self.EncoderChioce.text() == 'AC3'):\n NewFilePath = NewFilePath + '.ac3'\n elif(self.EncoderChioce.text() == 'MP3'):\n NewFilePath = NewFilePath + '.mp3'\n\n TargetLine.setText(NewFilePath)\n \n # 去除文件后缀名用于处理\n '''\n 输入: 带有后缀名的文件地址string\n 输出: 无后缀名的文件地址string\n '''\n def RemoveExt(self, FilePath):\n FilePath = os.path.splitext(FilePath)[0]\n return FilePath\n \n # 编码器选择函数\n def EncoderChange(self):\n if(self.EncoderChioce.text() == 'ACC'):\n self.BitrateNum.setDisabled(0)\n if(self.OutputLine.text()!=\"\"):\n self.OutputLine.setText(self.RemoveExt(self.OutputLine.text()) + '.m4a')\n elif(self.EncoderChioce.text() == 'TTA'):\n self.BitrateNum.setDisabled(1)\n if(self.OutputLine.text()!=\"\"):\n self.OutputLine.setText(self.RemoveExt(self.OutputLine.text()) + '.tta')\n elif(self.EncoderChioce.text() == 'WAV'):\n self.BitrateNum.setDisabled(1)\n if(self.OutputLine.text()!=\"\"):\n self.OutputLine.setText(self.RemoveExt(self.OutputLine.text()) + '.wav')\n elif(self.EncoderChioce.text() == 'ALAC'):\n self.BitrateNum.setDisabled(1)\n if(self.OutputLine.text()!=\"\"):\n self.OutputLine.setText(self.RemoveExt(self.OutputLine.text()) + '.m4a')\n elif(self.EncoderChioce.text() == 'FLAC'):\n self.BitrateNum.setDisabled(1)\n if(self.OutputLine.text()!=\"\"):\n self.OutputLine.setText(self.RemoveExt(self.OutputLine.text()) + '.flac')\n elif(self.EncoderChioce.text() == 'AC3'):\n self.BitrateNum.setDisabled(1)\n if(self.OutputLine.text()!=\"\"):\n self.OutputLine.setText(self.RemoveExt(self.OutputLine.text()) + '.ac3')\n elif(self.EncoderChioce.text() == 'MP3'):\n self.BitrateNum.setDisabled(0)\n if(self.OutputLine.text()!=\"\"):\n self.OutputLine.setText(self.RemoveExt(self.OutputLine.text()) + '.mp3')\n\n # 压制函数\n def ProcessFunc(self):\n # 地址缺失\n if(self.InputLine.text() == '' or self.OutputLine.text() == ''):\n InfoBar.error(\n title='未定义音频地址',\n content=\"请确认你是否已经设定了正确的输入输出音频地址\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n return\n \n ProcessCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.InputLine.text() + \\\n \"\\\" -y -vn \"\n \n # 设置编码器\n if(self.EncoderChioce.text() == 'ACC'):\n ProcessCmd += \"-c:a aac -b:a \" + str(self.BitrateNum.value()) + \"k \"\n elif(self.EncoderChioce.text() == 'TTA'):\n ProcessCmd += \"-c:a tta \"\n elif(self.EncoderChioce.text() == 'WAV'):\n ProcessCmd += \"-c:a pcm_s16le \"\n elif(self.EncoderChioce.text() == 'ALAC'):\n ProcessCmd += \"-c:a alac \"\n elif(self.EncoderChioce.text() == 'FLAC'):\n ProcessCmd += \"-c:a flac \"\n elif(self.EncoderChioce.text() == 'AC3'):\n ProcessCmd += \"-c:a ac3 \"\n elif(self.EncoderChioce.text() == 'MP3'):\n ProcessCmd += \"-c:a libmp3lame -b:a \" + str(self.BitrateNum.value()) + \"k \"\n \n print(ProcessCmd)\n \n thread_01 = Thread(target=self.CmdThread,\n args=(ProcessCmd,))\n thread_01.start()\n \n # 多线程编码函数\n '''\n 输入: 处理指令\n 输出: 无输出\n '''\n def CmdThread(self, ProcessCmd):\n\n self.ProgressBar.setVisible(1)\n self.ProcessButton.setText(\"正在压制...\")\n self.ProcessButton.setWindowIconText(\" \")\n self.ProcessButton.setDisabled(1)\n\n ProcessCmd += \" \\\"\" + self.OutputLine.text() + \"\\\"\"\n os.system(ProcessCmd)\n\n self.ProgressBar.setVisible(0)\n self.ProcessButton.setText(\"压制\")\n self.ProcessButton.setDisabled(0)\n self.ProcessButton.setWindowIconText(\"\")\n \n # 压制完成提示\n def ProcessComplte(self):\n if(self.ProcessButton.text() == \"压制\"):\n InfoBar.success(\n title='任务执行完成',\n content=\"请确认是否压制成功\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )"
},
{
"identifier": "CommonInterface",
"path": "CommonInterface.py",
"snippet": "class CommonInterface(QWidget, Ui_Common):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n\n # 音频参数\n self.BitrateNum.setValue(128)\n self.FpsNum.setValue(24)\n self.CrfNum.setValue(24)\n\n # 硬件加速\n self.HardAccler.addItem(\"软解\")\n self.HardAccler.addItem(\"Nvidia\")\n self.HardAccler.addItem(\"AMD\")\n self.HardAccler.addItem(\"Intel\")\n\n # 隐藏ProgressBar\n self.ProgressBar1.setVisible(0)\n self.ProgressBar2.setVisible(0)\n self.ProgressBar3.setVisible(0)\n\n # 截取视频\n self.StartTimeLine.setText('00:00:00')\n self.EndTimeLine.setText('00:00:20')\n\n # 旋转视频\n self.OptionChoice.addItem('顺时针90度')\n self.OptionChoice.addItem('逆时针90度')\n self.OptionChoice.addItem('180度')\n self.OptionChoice.addItem('水平翻转')\n self.OptionChoice.addItem('垂直翻转')\n\n # 操作开始按钮\n self.ProcessStartButton.clicked.connect(self.ProcessFunc)\n self.ProcessStartButton.setWindowIconText(\"\")\n self.ProcessStartButton.windowIconTextChanged.connect(\n self.ProcessComplte)\n\n self.ClipVideoButton.clicked.connect(self.ClipFunc)\n self.ClipVideoButton.setWindowIconText(\"\")\n self.ClipVideoButton.windowIconTextChanged.connect(self.ClipComplte)\n\n self.TransposeButton.clicked.connect(self.TransposeFunc)\n self.TransposeButton.setWindowIconText(\"\")\n self.TransposeButton.windowIconTextChanged.connect(\n self.TransposeComplte)\n\n # 文件选择\n self.InputButton.clicked.connect(\n partial(self.FileSelect, self.InputLine))\n self.AudioInputButton.clicked.connect(\n partial(self.FileSelect, self.AudioInputLine))\n self.VideoOutputButton_2.clicked.connect(\n partial(self.FileSelect, self.VideoOutputLine_2))\n\n self.VideoInputButton.clicked.connect(\n partial(self.FileSelect, self.VideoInputLine))\n self.VideoOutputButton.clicked.connect(\n partial(self.FileSelect, self.VideoOutputLine))\n \n # 文件自动填充\n self.VideoInputLine.textChanged.connect(\n partial(self.AutoFill, self.VideoInputLine, self.VideoOutputLine, 2))\n self.AudioInputLine.textChanged.connect(\n partial(self.AutoFill, self.AudioInputLine, self.VideoOutputLine_2, 1))\n\n # 文件选择函数\n '''\n 输入: 选择文件的目标LineEdit\n 输出: 无输出\n 描述: 选择文件函数, 与界面上的浏览按钮绑定, 用于把资源管理器读取的地址传回输入框\n '''\n def FileSelect(self, TargetLine):\n dir = QFileDialog()\n dir.setDirectory(os.getcwd())\n if dir.exec_(): # 判断是否选择了文件\n FilePath = dir.selectedFiles()\n TargetLine.setText(FilePath[0])\n \n # 自动填充函数\n '''\n 输入: 选择文件的源LineEdit, 自动同步的目标LineEdit, 自动填充后缀名类型 1是mp4 2是保留原本的后缀名\n 输出: 无输出\n 描述: 根据输入框内容自动填充输出框\n '''\n def AutoFill(self, SourceLine, TargetLine, Type):\n FilePath = SourceLine.text()\n if FilePath == \"\":\n return\n FileExt = os.path.splitext(FilePath)[1]\n FilePath = os.path.splitext(FilePath)[0]\n if(Type == 1):\n NewFilePath = FilePath + '_output.mp4'\n elif(Type == 2):\n NewFilePath = FilePath + '_output' + FileExt\n TargetLine.setText(NewFilePath)\n\n # 一图流控制\n def ProcessFunc(self):\n # 地址缺失\n if(self.InputLine.text() == '' or self.VideoOutputLine_2.text() == '' or self.AudioInputLine.text() == ''):\n InfoBar.error(\n title='未定义地址',\n content=\"请确认你是否已经设定了正确的输入输出地址\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n return\n\n ProcessCmd = \"tools\\\\ffmpeg -hide_banner -loop 1 \" \n \n # 硬件加速\n if(self.HardAccler.currentIndex() == 0):\n pass\n elif(self.HardAccler.currentIndex() == 1):\n ProcessCmd += \"-hwaccel nvdec \"\n elif(self.HardAccler.currentIndex() == 2):\n ProcessCmd += \"-hwaccel amf \"\n elif(self.HardAccler.currentIndex() == 3):\n ProcessCmd += \"-hwaccel qsv \"\n \n ProcessCmd += \"-i \\\"\" + self.InputLine.text() + \"\\\" \" + \"-i \\\"\" + self.AudioInputLine.text() + \"\\\" -y -shortest \"\n\n # 音频参数\n ProcessCmd += \"-c:a aac -b:a \" + str(self.BitrateNum.value()) + \"k \"\n\n # 视频参数\n ProcessCmd += \"-r \" + str(self.FpsNum.value()) + \\\n \" -crf \" + str(self.CrfNum.value()) + \" \"\n\n thread_01 = Thread(target=self.CmdThread01,\n args=(ProcessCmd,))\n thread_01.start()\n \n # 多线程编码函数01\n '''\n 输入: 处理指令\n 输出: 无输出\n '''\n def CmdThread01(self, ProcessCmd):\n\n self.ProgressBar1.setVisible(1)\n self.ProcessStartButton.setText(\"正在压制...\")\n self.ProcessStartButton.setWindowIconText(\" \")\n self.ProcessStartButton.setDisabled(1)\n\n ProcessCmd += \" \\\"\" + self.VideoOutputLine_2.text() + \"\\\"\"\n os.system(ProcessCmd)\n\n self.ProgressBar1.setVisible(0)\n self.ProcessStartButton.setText(\"开始压制\")\n self.ProcessStartButton.setDisabled(0)\n self.ProcessStartButton.setWindowIconText(\"\")\n \n # 压制完成提示\n def ProcessComplte(self):\n if(self.ProcessStartButton.text() == \"开始压制\"):\n InfoBar.success(\n title='任务执行完成',\n content=\"请确认是否压制成功\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n \n # 视频截取控制\n def ClipFunc(self):\n # 地址缺失\n if(self.VideoInputLine.text() == '' or self.VideoOutputLine.text() == ''):\n InfoBar.error(\n title='未定义地址',\n content=\"请确认你是否已经设定了正确的输入输出地址\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n return\n \n ProcessCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.VideoInputLine.text() + \"\\\" -y -ss \" + self.StartTimeLine.text() + \" -to \" + self.EndTimeLine.text() + \" -c copy \\\"\" + self.VideoOutputLine.text() + \"\\\"\"\n \n thread_02 = Thread(target=self.CmdThread02,\n args=(ProcessCmd,))\n thread_02.start()\n \n # 多线程编码函数02\n '''\n 输入: 处理指令\n 输出: 无输出\n '''\n def CmdThread02(self,ProcessCmd):\n self.ProgressBar2.setVisible(1)\n self.ClipVideoButton.setText(\"正在截取...\")\n self.ClipVideoButton.setWindowIconText(\" \")\n self.ClipVideoButton.setDisabled(1)\n self.TransposeButton.setDisabled(1)\n \n os.system(ProcessCmd)\n \n self.ProgressBar2.setVisible(0)\n self.ClipVideoButton.setText(\"开始截取\")\n self.ClipVideoButton.setDisabled(0)\n self.TransposeButton.setDisabled(0)\n self.ClipVideoButton.setWindowIconText(\"\")\n \n # 截取完成提示\n def ClipComplte(self):\n if(self.ClipVideoButton.text() == \"开始截取\"):\n InfoBar.success(\n title='任务执行完成',\n content=\"请确认是否截取成功\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n \n # 视频旋转控制\n def TransposeFunc(self):\n # 地址缺失\n if(self.VideoInputLine.text() == '' or self.VideoOutputLine.text() == ''):\n InfoBar.error(\n title='未定义地址',\n content=\"请确认你是否已经设定了正确的输入输出地址\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n return\n \n # 旋转参数\n if(self.OptionChoice.currentIndex() == 0):\n ProcessCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.VideoInputLine.text() + \"\\\" -y -vf \\\"transpose=1\\\" \\\"\" + self.VideoOutputLine.text() + \"\\\"\"\n elif(self.OptionChoice.currentIndex() == 1):\n ProcessCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.VideoInputLine.text() + \"\\\" -y -vf \\\"transpose=2\\\" \\\"\" + self.VideoOutputLine.text() + \"\\\"\"\n elif(self.OptionChoice.currentIndex() == 2):\n ProcessCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.VideoInputLine.text() + \"\\\" -y -vf \\\"transpose=2,transpose=2\\\" \\\"\" + self.VideoOutputLine.text() + \"\\\"\"\n elif(self.OptionChoice.currentIndex() == 3):\n ProcessCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.VideoInputLine.text() + \"\\\" -y -vf \\\"hflip\\\" \\\"\" + self.VideoOutputLine.text() + \"\\\"\"\n elif(self.OptionChoice.currentIndex() == 4):\n ProcessCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.VideoInputLine.text() + \"\\\" -y -vf \\\"vflip\\\" \\\"\" + self.VideoOutputLine.text() + \"\\\"\"\n \n thread_03 = Thread(target=self.CmdThread03,\n args=(ProcessCmd, ))\n thread_03.start()\n \n # 多线程编码函数03\n '''\n 输入: 处理指令\n 输出: 无输出\n '''\n def CmdThread03(self, ProcessCmd):\n self.ProgressBar3.setVisible(1)\n self.TransposeButton.setText(\"正在旋转...\")\n self.TransposeButton.setWindowIconText(\" \")\n self.TransposeButton.setDisabled(1)\n self.ClipVideoButton.setDisabled(1)\n \n os.system(ProcessCmd)\n \n self.ProgressBar3.setVisible(0)\n self.TransposeButton.setText(\"开始旋转\")\n self.TransposeButton.setDisabled(0)\n self.ClipVideoButton.setDisabled(0)\n self.TransposeButton.setWindowIconText(\"\")\n \n # 旋转完成提示\n def TransposeComplte(self):\n if(self.TransposeButton.text() == \"开始旋转\"):\n InfoBar.success(\n title='任务执行完成',\n content=\"请确认是否旋转成功\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )"
},
{
"identifier": "PackageInterface",
"path": "PackageInterface.py",
"snippet": "class PackageInterface(QWidget, Ui_Package):\n \n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n \n # 隐藏ProgressBar\n self.ProgressBar1.setVisible(0)\n self.ProgressBar2.setVisible(0)\n self.ProgressBar3.setVisible(0)\n \n # 文件选择\n self.InputButton.clicked.connect(partial(self.FileSelect, self.InputLine))\n self.OutputButton.clicked.connect(partial(self.FileSelect, self.OutputLine))\n self.AudioButton.clicked.connect(partial(self.FileSelect, self.AudioLine))\n self.InputButton_2.clicked.connect(partial(self.FileSelect, self.InputLine_2))\n self.AudioButton_2.clicked.connect(partial(self.FileSelect, self.AudioLine_2))\n self.OutputButton_2.clicked.connect(partial(self.FileSelect, self.OutputLine_2))\n self.TextButton.clicked.connect(partial(self.FileSelect, self.TextLine))\n self.OutputButton_3.clicked.connect(partial(self.FileSelect, self.OutputLine_3))\n \n # 文件自动填充\n self.InputLine.textChanged.connect(partial(self.AutoFill, self.InputLine, self.OutputLine, 1))\n self.InputLine_2.textChanged.connect(partial(self.AutoFill, self.InputLine_2, self.OutputLine_2, 2))\n \n # 粘贴地址\n self.PasteButton.clicked.connect(partial(self.Paste, self.AddressLine))\n \n # 操作开始按钮\n self.MP4Start.clicked.connect(self.MP4Func)\n self.MP4Start.setWindowIconText(\"\")\n self.MP4Start.windowIconTextChanged.connect(\n self.MP4Complte)\n \n self.MkvStart.clicked.connect(self.MkvFunc)\n self.MkvStart.setWindowIconText(\"\")\n self.MkvStart.windowIconTextChanged.connect(\n self.MkvComplte)\n\n self.DownloadButton.clicked.connect(self.DownloadFunc)\n self.DownloadButton.setWindowIconText(\"\")\n self.DownloadButton.windowIconTextChanged.connect(\n self.DownloadComplte)\n \n \n # 文件选择函数\n '''\n 输入: 选择文件的目标LineEdit\n 输出: 无输出\n '''\n def FileSelect(self, TargetLine):\n dir = QFileDialog()\n dir.setDirectory(os.getcwd())\n if dir.exec_(): # 判断是否选择了文件\n FilePath = dir.selectedFiles()\n TargetLine.setText(FilePath[0])\n \n # 自动填充函数\n '''\n 输入: 选择文件的源LineEdit, 自动同步的目标LineEdit, 自动填充后缀名类型 1是mp4 2是mkv\n 输出: 无输出\n 描述: 选择文件函数, 与界面上的浏览按钮绑定, 用于把资源管理器读取的地址传回输入框\n '''\n def AutoFill(self, SourceLine, TargetLine, Type):\n FilePath = SourceLine.text()\n if FilePath == \"\":\n return\n FileExt = os.path.splitext(FilePath)[1]\n FilePath = os.path.splitext(FilePath)[0]\n if(Type == 1):\n NewFilePath = FilePath + '_output.mp4'\n elif(Type == 2):\n NewFilePath = FilePath + '_output.mkv'\n TargetLine.setText(NewFilePath)\n\n # 粘贴函数\n '''\n 输入: 黏贴的目标地址\n 输出: 无输出\n 描述: 根据输入框内容自动填充输出框\n '''\n def Paste(self, TargetLine):\n TargetLine.setText(QApplication.clipboard().text())\n \n # MP4封装函数\n def MP4Func(self):\n # 地址缺失\n if(self.InputLine.text() == '' or self.OutputLine.text() == ''):\n InfoBar.error(\n title='未定义地址',\n content=\"请确认你是否已经设定了正确的输入输出地址\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n return\n\n ProcseeCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.InputLine.text() + \"\\\" \"\n # 封装音频\n if(self.AudioLine.text() != ''):\n ProcseeCmd += \"-i \\\"\" + self.AudioLine.text() + \"\\\" \"\n ProcseeCmd += \"-c:v copy -c:a copy -strict experimental -y \\\"\" + self.OutputLine.text() + \"\\\"\"\n \n thread_01 = Thread(target=self.MP4Thread, args=(ProcseeCmd,))\n thread_01.start()\n \n # MP4封装线程\n '''\n 输入: 处理指令\n 输出: 无输出\n '''\n def MP4Thread(self, ProcseeCmd):\n self.MP4Start.setWindowIconText(\" \")\n self.ProgressBar1.setVisible(1)\n self.MP4Start.setEnabled(0)\n \n os.system(ProcseeCmd)\n \n self.MP4Start.setWindowIconText(\"\")\n self.ProgressBar1.setVisible(0)\n self.MP4Start.setEnabled(1)\n \n # MP4封装完成\n def MP4Complte(self):\n if(self.MP4Start.windowIconText() == \"\"):\n InfoBar.success(\n title='封装完成',\n content=\"请确认是否封装成功\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n \n # MKV封装函数\n def MkvFunc(self):\n # 地址缺失\n if(self.InputLine_2.text() == '' or self.OutputLine_2.text() == ''):\n InfoBar.error(\n title='未定义地址',\n content=\"请确认你是否已经设定了正确的输入输出地址\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n return\n\n ProcseeCmd = \"tools\\\\ffmpeg -hide_banner -i \\\"\" + self.InputLine_2.text() + \"\\\" \"\n # 封装音频\n if(self.AudioLine_2.text() != ''):\n ProcseeCmd += \"-i \\\"\" + self.AudioLine_2.text() + \"\\\" \"\n # 封装字幕\n if(self.TextLine.text() != ''):\n ProcseeCmd += \"-i \\\"\" + self.TextLine.text() + \"\\\" \"\n ProcseeCmd += \"-c:v copy -c:a copy -strict experimental -y \\\"\" + self.OutputLine_2.text() + \"\\\"\"\n \n thread_02 = Thread(target=self.MkvThread, args=(ProcseeCmd,))\n thread_02.start()\n \n # MKV封装线程\n '''\n 输入: 处理指令\n 输出: 无输出\n '''\n def MkvThread(self, ProcseeCmd):\n self.MkvStart.setWindowIconText(\" \")\n self.ProgressBar2.setVisible(1)\n self.MkvStart.setEnabled(0)\n \n os.system(ProcseeCmd)\n \n self.MkvStart.setWindowIconText(\"\")\n self.ProgressBar2.setVisible(0)\n self.MkvStart.setEnabled(1)\n \n # MKV封装完成\n def MkvComplte(self):\n if(self.MkvStart.windowIconText() == \"\"):\n InfoBar.success(\n title='封装完成',\n content=\"请确认是否封装成功\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n \n # 下载函数\n def DownloadFunc(self):\n # 地址缺失\n if(self.AddressLine.text() == '' or self.OutputLine_3.text() == ''):\n InfoBar.error(\n title='未定义地址',\n content=\"请确认你是否已经设定了正确的下载或输出地址\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )\n return\n \n ProcseeCmd = \"tools\\\\ffmpeg -i \\\"\" + self.AddressLine.text() + \"\\\" -y \\\"\" + self.OutputLine_3.text() + \"\\\"\"\n \n thread_03 = Thread(target=self.DownloadThread, args=(ProcseeCmd,))\n thread_03.start()\n \n # 下载线程\n '''\n 输入: 处理指令\n 输出: 无输出\n '''\n def DownloadThread(self, ProcseeCmd):\n self.DownloadButton.setWindowIconText(\" \")\n self.ProgressBar3.setVisible(1)\n self.DownloadButton.setEnabled(0)\n \n os.system(ProcseeCmd)\n \n self.DownloadButton.setWindowIconText(\"\")\n self.ProgressBar3.setVisible(0)\n self.DownloadButton.setEnabled(1)\n \n # 下载完成\n def DownloadComplte(self):\n if(self.DownloadButton.windowIconText() == \"\"):\n InfoBar.success(\n title='下载完成',\n content=\"请确认是否下载成功\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=6000,\n parent=self\n )"
},
{
"identifier": "SettingInterface",
"path": "SettingInterface.py",
"snippet": "class SettingInterface(QWidget, Ui_Form):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n \n self.HyperlinkLabel.setUrl('https://github.com/Eanya-Tonic/MihiroToolbox')\n \n self.ImageLabel.setPixmap(QPixmap('img/logo.png').scaledToHeight(100))\n\n # 选择主题\n \n conf = configparser.ConfigParser()\n conf.read('config.ini')\n theme = conf.get('DEFAULT', 'theme')\n \n self.ThemeBox.addItem('跟随系统')\n self.ThemeBox.addItem('浅色')\n self.ThemeBox.addItem('深色')\n \n self.ThemeBox.setCurrentIndex(int(theme))\n self.ThemeBox.currentIndexChanged.connect(self.ThemeBoxChanged)\n \n # 关闭开屏画面\n splash = conf.get('DEFAULT', 'splash')\n self.LaunchCheck.setChecked(bool(int(splash)))\n self.LaunchCheck.clicked.connect(self.LaunchCheckClicked)\n \n # 开关ScrollArea\n ScrollUI = conf.get('DEFAULT', 'ScrollUI')\n \n self.ThemeBox_2.addItem('禁用')\n self.ThemeBox_2.addItem('启用')\n \n self.ThemeBox_2.setCurrentIndex(int(ScrollUI))\n self.ThemeBox_2.currentIndexChanged.connect(self.ScrollChanged)\n \n \n # 选择主题\n def ThemeBoxChanged(self):\n conf = configparser.ConfigParser()\n conf.read('config.ini')\n conf.set('DEFAULT', 'theme', str(self.ThemeBox.currentIndex()))\n conf.write(open('config.ini', 'w'))\n \n InfoBar.info(\n title='提示',\n content=\"主题修改重启应用后生效\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=5000,\n parent=self\n )\n \n # 开关ScrollArea\n def ScrollChanged(self):\n conf = configparser.ConfigParser()\n conf.read('config.ini')\n conf.set('DEFAULT', 'ScrollUI', str(self.ThemeBox_2.currentIndex()))\n conf.write(open('config.ini', 'w'))\n \n InfoBar.info(\n title='提示',\n content=\"重启应用后生效\",\n orient=Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.BOTTOM_RIGHT,\n duration=5000,\n parent=self\n )\n \n # 关闭开屏画面\n def LaunchCheckClicked(self):\n conf = configparser.ConfigParser()\n conf.read('config.ini')\n conf.set('DEFAULT', 'splash', str(int(self.LaunchCheck.isChecked())))\n conf.write(open('config.ini', 'w'))"
}
] | import sys
import configparser
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtWidgets import QApplication, QWidget, QSplashScreen, QDesktopWidget
from qfluentwidgets import SplitFluentWindow, FluentIcon, NavigationItemPosition, setTheme, Theme
from VideoInterface import VideoInterface
from AudioInterface import AudioInterface
from CommonInterface import CommonInterface
from PackageInterface import PackageInterface
from SettingInterface import SettingInterface | 10,515 | # coding:utf-8
class MihiroToolBox(SplitFluentWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('MihiroToolBox')
self.setWindowIcon(QIcon('img/logo.png'))
# 设置默认大小
self.resize(800,800)
# 调整窗口在屏幕中央显示
center_pointer = QDesktopWidget().availableGeometry().center()
x = center_pointer.x()
y = center_pointer.y()
old_x,oldy, width, height = self.frameGeometry().getRect()
self.move(int(x - width / 2), int(y - height / 2))
# 添加视频子界面
self.VideoInterface = VideoInterface(self)
self.addSubInterface(self.VideoInterface, FluentIcon.VIDEO, '视频')
# 添加音频子界面
self.AudioInterface = AudioInterface(self)
self.addSubInterface(self.AudioInterface, FluentIcon.MUSIC, '音频')
# 添加通用子界面
| # coding:utf-8
class MihiroToolBox(SplitFluentWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('MihiroToolBox')
self.setWindowIcon(QIcon('img/logo.png'))
# 设置默认大小
self.resize(800,800)
# 调整窗口在屏幕中央显示
center_pointer = QDesktopWidget().availableGeometry().center()
x = center_pointer.x()
y = center_pointer.y()
old_x,oldy, width, height = self.frameGeometry().getRect()
self.move(int(x - width / 2), int(y - height / 2))
# 添加视频子界面
self.VideoInterface = VideoInterface(self)
self.addSubInterface(self.VideoInterface, FluentIcon.VIDEO, '视频')
# 添加音频子界面
self.AudioInterface = AudioInterface(self)
self.addSubInterface(self.AudioInterface, FluentIcon.MUSIC, '音频')
# 添加通用子界面 | self.CommonInterface = CommonInterface(self) | 2 | 2023-10-25 05:04:58+00:00 | 12k |
RenShuhuai-Andy/TESTA | testa/patch/timesformer.py | [
{
"identifier": "Attention",
"path": "models/timesformer/models/vit.py",
"snippet": "class Attention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., with_qkv=True):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n self.with_qkv = with_qkv\n if self.with_qkv:\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n self.attn_drop = nn.Dropout(attn_drop)\n\n def forward(self, x):\n B, N, C = x.shape\n if self.with_qkv:\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2]\n else:\n qkv = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n q, k, v = qkv, qkv, qkv\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n if self.with_qkv:\n x = self.proj(x)\n x = self.proj_drop(x)\n return x"
},
{
"identifier": "Block",
"path": "models/timesformer/models/vit.py",
"snippet": "class Block(nn.Module):\n\n def __init__(self, dim, num_heads, layer_num, num_frm, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0.1, act_layer=nn.GELU, norm_layer=nn.LayerNorm, attention_type='divided_space_time',\n learnable_temporal_scaling=False, use_grad_checkpointing=False):\n super().__init__()\n self.attention_type = attention_type\n assert(attention_type in ['divided_space_time', 'space_only','joint_space_time'])\n\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n\n ## Temporal Attention Parameters\n if self.attention_type == 'divided_space_time':\n self.temporal_norm1 = norm_layer(dim)\n self.temporal_attn = Attention(\n dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n self.temporal_fc = nn.Linear(dim, dim)\n torch.nn.init.constant_(self.temporal_fc.weight, 0)\n torch.nn.init.constant_(self.temporal_fc.bias, 0)\n\n ## drop path\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n self.layer_num = layer_num\n self.num_frm = num_frm\n self.learnable_temporal_scaling = learnable_temporal_scaling\n\n if self.learnable_temporal_scaling == True and self.attention_type != 'space_only':\n self.temporal_scaling = nn.Parameter(torch.zeros(1, self.num_frm, 1))\n\n if use_grad_checkpointing:\n if self.attention_type == 'divided_space_time':\n self.temporal_attn = checkpoint_wrapper(self.temporal_attn)\n self.temporal_fc = checkpoint_wrapper(self.temporal_fc)\n self.attn = checkpoint_wrapper(self.attn)\n self.mlp = checkpoint_wrapper(self.mlp)\n\n\n def forward(self, x, B, T, W):\n num_spatial_tokens = (x.size(1) - 1) // T\n H = num_spatial_tokens // W\n\n if self.attention_type in ['space_only', 'joint_space_time']:\n x = x + self.drop_path(self.attn(self.norm1(x)))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n elif self.attention_type == 'divided_space_time':\n ## Temporal\n xt = x[:,1:,:]\n xt = rearrange(xt, 'b (h w t) m -> (b h w) t m',b=B,h=H,w=W,t=T)\n if self.learnable_temporal_scaling == False:\n res_temporal = self.drop_path(self.temporal_attn(self.temporal_norm1(xt)))\n else:\n res_temporal = self.drop_path(self.temporal_attn(self.temporal_norm1(xt))*(torch.tanh(self.temporal_scaling)+1))\n res_temporal = rearrange(res_temporal, '(b h w) t m -> b (h w t) m',b=B,h=H,w=W,t=T)\n res_temporal = self.temporal_fc(res_temporal)\n xt = x[:,1:,:] + res_temporal\n\n ## Spatial\n init_cls_token = x[:,0,:].unsqueeze(1)\n cls_token = init_cls_token.repeat(1, T, 1)\n cls_token = rearrange(cls_token, 'b t m -> (b t) m',b=B,t=T).unsqueeze(1)\n xs = xt\n xs = rearrange(xs, 'b (h w t) m -> (b t) (h w) m',b=B,h=H,w=W,t=T)\n xs = torch.cat((cls_token, xs), 1)\n res_spatial = self.drop_path(self.attn(self.norm1(xs)))\n\n ### Taking care of CLS token\n cls_token = res_spatial[:,0,:]\n cls_token = rearrange(cls_token, '(b t) m -> b t m',b=B,t=T)\n cls_token = torch.mean(cls_token,1,True) ## averaging for every frame\n res_spatial = res_spatial[:,1:,:]\n res_spatial = rearrange(res_spatial, '(b t) (h w) m -> b (h w t) m',b=B,h=H,w=W,t=T)\n res = res_spatial\n x = xt\n\n ## Mlp\n x = torch.cat((init_cls_token, x), 1) + torch.cat((cls_token, res), 1)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x"
},
{
"identifier": "VisionTransformer",
"path": "models/timesformer/models/vit.py",
"snippet": "class VisionTransformer(nn.Module):\n \"\"\" Vision Transformere\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,\n drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, num_frames=8, \n attention_type='divided_space_time', dropout=0., learnable_temporal_scaling=False,\n use_grad_checkpointing=False, ckpt_layer=0):\n super().__init__()\n self.attention_type = attention_type\n self.depth = depth\n self.dropout = nn.Dropout(dropout)\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n num_patches = self.patch_embed.num_patches\n\n ## Positional Embeddings\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches+1, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_rate)\n if self.attention_type != 'space_only':\n self.time_embed = nn.Parameter(torch.zeros(1, num_frames, embed_dim))\n self.time_drop = nn.Dropout(p=drop_rate)\n\n ## Attention Blocks\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, self.depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n Block(\n layer_num=i, num_frm = num_frames,\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, \n attention_type=self.attention_type, learnable_temporal_scaling=learnable_temporal_scaling,\n use_grad_checkpointing=(use_grad_checkpointing and i >= self.depth-ckpt_layer))\n for i in range(self.depth)])\n self.norm = norm_layer(embed_dim)\n\n # Classifier head\n self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n ## initialization of temporal attention weights\n if self.attention_type == 'divided_space_time':\n i = 0\n for m in self.blocks.modules():\n m_str = str(m)\n if 'Block' in m_str:\n if i > 0:\n nn.init.constant_(m.temporal_fc.weight, 0)\n nn.init.constant_(m.temporal_fc.bias, 0)\n i += 1\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token', 'time_embed'}\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x, get_all_tokens=True):\n B = x.shape[0]\n x, T, W = self.patch_embed(x)\n cls_tokens = self.cls_token.expand(x.size(0), -1, -1)\n x = torch.cat((cls_tokens, x), dim=1)\n\n ## resizing the positional embeddings in case they don't match the input at inference\n if x.size(1) != self.pos_embed.size(1):\n pos_embed = self.pos_embed\n cls_pos_embed = pos_embed[0,0,:].unsqueeze(0).unsqueeze(1)\n other_pos_embed = pos_embed[0,1:,:].unsqueeze(0).transpose(1, 2)\n P = int(other_pos_embed.size(2) ** 0.5)\n H = x.size(1) // W\n other_pos_embed = other_pos_embed.reshape(1, x.size(2), P, P)\n new_pos_embed = F.interpolate(other_pos_embed, size=(H, W), mode='nearest')\n new_pos_embed = new_pos_embed.flatten(2)\n new_pos_embed = new_pos_embed.transpose(1, 2)\n new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)\n x = x + new_pos_embed\n else:\n x = x + self.pos_embed\n x = self.pos_drop(x)\n\n\n ## Time Embeddings\n if self.attention_type != 'space_only':\n cls_tokens = x[:B, 0, :].unsqueeze(1)\n x = x[:,1:]\n x = rearrange(x, '(b t) n m -> (b n) t m',b=B,t=T)\n ## Resizing time embeddings in case they don't match\n if T != self.time_embed.size(1):\n time_embed = self.time_embed.transpose(1, 2)\n new_time_embed = F.interpolate(time_embed, size=(T), mode='nearest')\n new_time_embed = new_time_embed.transpose(1, 2)\n x = x + new_time_embed\n else:\n x = x + self.time_embed\n x = self.time_drop(x)\n x = rearrange(x, '(b n) t m -> b (n t) m',b=B,t=T)\n x = torch.cat((cls_tokens, x), dim=1)\n\n ## Attention blocks\n for blk in self.blocks:\n x = blk(x, B, T, W)\n\n ### Predictions for space-only baseline\n if self.attention_type == 'space_only':\n x = rearrange(x, '(b t) n m -> b t n m',b=B,t=T)\n if get_all_tokens == False:\n x = torch.mean(x, 1) # averaging predictions for every frame\n else:\n x = self.norm(x)\n x = rearrange(x, 'b t n m -> b (t n) m',b=B,t=T) # concating tokens of every frame\n return x\n x = self.norm(x)\n if get_all_tokens == False:\n return x[:0]\n else:\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x"
},
{
"identifier": "bipartite_soft_matching",
"path": "testa/merge.py",
"snippet": "def bipartite_soft_matching(\n metric: torch.Tensor,\n r: int,\n class_token: bool = False,\n distill_token: bool = False,\n merging_type: str = 'patch'\n) -> Tuple[Callable, Callable]:\n \"\"\"\n Applies TESTA with a balanced matching set (50%, 50%).\n\n Input size is [batch, tokens, channels].\n r indicates the number of tokens to remove (max 50% of tokens).\n\n Extra args:\n - class_token: Whether or not there's a class token.\n - distill_token: Whether or not there's also a distillation token.\n\n When enabled, the class token and distillation tokens won't get merged.\n \"\"\"\n protected = 0\n if class_token:\n protected += 1\n if distill_token:\n protected += 1\n\n # We can only reduce by a maximum of 50% tokens\n t = metric.shape[-2] # dimension for reduction\n r = min(r, (t - protected) // 2)\n\n if r <= 0:\n return do_nothing, do_nothing\n\n with torch.no_grad():\n metric = metric / metric.norm(dim=-1, keepdim=True)\n a, b = metric[..., ::2, :], metric[..., 1::2, :]\n scores = a @ b.transpose(-1, -2) # |Set A| * |Set B| edges\n\n if merging_type == 'patch' and class_token:\n scores[..., 0, :] = -math.inf\n if merging_type == 'patch' and distill_token:\n scores[..., :, 0] = -math.inf\n\n node_max, node_idx = scores.max(dim=-1) # keep edge with the highest sim for every node in Set A\n\n if merging_type == 'frame': # aggregate frames based on patch voting\n n = metric.size(-3) # number of patches\n node_idx, _ = node_idx.mode(dim=-2, keepdim=True)\n node_idx = node_idx.repeat(1, n, 1)\n node_max, _ = node_max.mode(dim=-2, keepdim=True)\n node_max = node_max.repeat(1, n, 1)\n\n edge_idx = node_max.argsort(dim=-1, descending=True)[..., None] # sort |Set A| edges based on sim\n\n unm_idx = edge_idx[..., r:, :] # Unmerged Tokens\n src_idx = edge_idx[..., :r, :] # Merged Tokens\n dst_idx = node_idx[..., None].gather(dim=-2, index=src_idx) # node_idx: idx for Set B\n\n if class_token or merging_type == 'frame':\n # Sort to ensure the class token is at the start\n unm_idx = unm_idx.sort(dim=-2)[0]\n\n def merge(x: torch.Tensor, mode=\"mean\") -> torch.Tensor:\n src, dst = x[..., ::2, :], x[..., 1::2, :]\n B, n, t1, c = src.shape\n unm = src.gather(dim=-2, index=unm_idx.expand(B, n, t1 - r, c))\n src = src.gather(dim=-2, index=src_idx.expand(B, n, r, c))\n dst = dst.scatter_reduce(-2, dst_idx.expand(B, n, r, c), src, reduce=mode)\n\n if distill_token:\n return torch.cat([unm[:, :1], dst[:, :1], unm[:, 1:], dst[:, 1:]], dim=1)\n else:\n return torch.cat([unm, dst], dim=-2)\n\n def unmerge(x: torch.Tensor) -> torch.Tensor:\n unm_len = unm_idx.shape[-2]\n unm, dst = x[..., :unm_len, :], x[..., unm_len:, :]\n B, n, _, c = unm.shape\n\n src = dst.gather(dim=-2, index=dst_idx.expand(B, n, r, c))\n\n out = torch.zeros(B, n, metric.shape[1], c, device=x.device, dtype=x.dtype)\n\n out[..., 1::2, :] = dst\n out.scatter_(dim=-2, index=(2 * unm_idx).expand(B, n, unm_len, c), src=unm)\n out.scatter_(dim=-2, index=(2 * src_idx).expand(B, n, r, c), src=src)\n\n return out\n\n return merge, unmerge"
},
{
"identifier": "merge_source",
"path": "testa/merge.py",
"snippet": "def merge_source(\n merge: Callable, x: torch.Tensor, source: torch.Tensor = None\n) -> torch.Tensor:\n \"\"\"\n For source tracking. Source is an adjacency matrix between the initial tokens and final merged groups.\n x is used to find out how many tokens there are in case the source is None.\n \"\"\"\n if source is None:\n B, n, t, _ = x.shape\n source = torch.eye(t, device=x.device)[None, ...].expand(B, n, t, t)\n\n source = merge(source, mode=\"amax\")\n return source"
},
{
"identifier": "merge_wavg",
"path": "testa/merge.py",
"snippet": "def merge_wavg(\n merge: Callable, x: torch.Tensor, size: torch.Tensor = None\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Applies the merge function by taking a weighted average based on token size.\n Returns the merged tensor and the new token sizes.\n \"\"\"\n if size is None:\n size = torch.ones_like(x[..., 0, None])\n\n x = merge(x * size, mode=\"sum\")\n size = merge(size, mode=\"sum\")\n\n x = x / size\n return x, size"
},
{
"identifier": "original_bipartite_soft_matching",
"path": "testa/merge_original.py",
"snippet": "def original_bipartite_soft_matching(\n metric: torch.Tensor,\n r: int,\n class_token: bool = False,\n distill_token: bool = False,\n) -> Tuple[Callable, Callable]:\n \"\"\"\n Applies ToMe with a balanced matching set (50%, 50%).\n\n Input size is [batch, tokens, channels].\n r indicates the number of tokens to remove (max 50% of tokens).\n\n Extra args:\n - class_token: Whether or not there's a class token.\n - distill_token: Whether or not there's also a distillation token.\n\n When enabled, the class token and distillation tokens won't get merged.\n \"\"\"\n protected = 0\n if class_token:\n protected += 1\n if distill_token:\n protected += 1\n\n # We can only reduce by a maximum of 50% tokens\n t = metric.shape[1]\n r = min(r, (t - protected) // 2)\n\n if r <= 0:\n return do_nothing, do_nothing\n\n with torch.no_grad():\n metric = metric / metric.norm(dim=-1, keepdim=True)\n a, b = metric[..., ::2, :], metric[..., 1::2, :]\n scores = a @ b.transpose(-1, -2)\n\n if class_token:\n scores[..., 0, :] = -math.inf\n if distill_token:\n scores[..., :, 0] = -math.inf\n\n node_max, node_idx = scores.max(dim=-1)\n edge_idx = node_max.argsort(dim=-1, descending=True)[..., None]\n\n unm_idx = edge_idx[..., r:, :] # Unmerged Tokens\n src_idx = edge_idx[..., :r, :] # Merged Tokens\n dst_idx = node_idx[..., None].gather(dim=-2, index=src_idx)\n\n if class_token:\n # Sort to ensure the class token is at the start\n unm_idx = unm_idx.sort(dim=1)[0]\n\n def merge(x: torch.Tensor, mode=\"mean\") -> torch.Tensor:\n src, dst = x[..., ::2, :], x[..., 1::2, :]\n n, t1, c = src.shape\n unm = src.gather(dim=-2, index=unm_idx.expand(n, t1 - r, c))\n src = src.gather(dim=-2, index=src_idx.expand(n, r, c))\n dst = dst.scatter_reduce(-2, dst_idx.expand(n, r, c), src, reduce=mode)\n\n if distill_token:\n return torch.cat([unm[:, :1], dst[:, :1], unm[:, 1:], dst[:, 1:]], dim=1)\n else:\n return torch.cat([unm, dst], dim=1)\n\n def unmerge(x: torch.Tensor) -> torch.Tensor:\n unm_len = unm_idx.shape[1]\n unm, dst = x[..., :unm_len, :], x[..., unm_len:, :]\n n, _, c = unm.shape\n\n src = dst.gather(dim=-2, index=dst_idx.expand(n, r, c))\n\n out = torch.zeros(n, metric.shape[1], c, device=x.device, dtype=x.dtype)\n\n out[..., 1::2, :] = dst\n out.scatter_(dim=-2, index=(2 * unm_idx).expand(n, unm_len, c), src=unm)\n out.scatter_(dim=-2, index=(2 * src_idx).expand(n, r, c), src=src)\n\n return out\n\n return merge, unmerge"
},
{
"identifier": "original_merge_wavg",
"path": "testa/merge_original.py",
"snippet": "def original_merge_wavg(\n merge: Callable, x: torch.Tensor, size: torch.Tensor = None\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Applies the merge function by taking a weighted average based on token size.\n Returns the merged tensor and the new token sizes.\n \"\"\"\n if size is None:\n size = torch.ones_like(x[..., 0, None])\n\n x = merge(x * size, mode=\"sum\")\n size = merge(size, mode=\"sum\")\n\n x = x / size\n return x, size"
},
{
"identifier": "parse_r",
"path": "testa/utils.py",
"snippet": "def parse_r(num_layers: int, r: Union[List[int], Tuple[int, float], int]) -> List[int]:\n \"\"\"\n Process a constant r or r schedule into a list for use internally.\n\n r can take the following forms:\n - int: A constant number of tokens per layer.\n - Tuple[int, float]: A pair of r, inflection.\n Inflection describes there the the reduction / layer should trend\n upward (+1), downward (-1), or stay constant (0). A value of (r, 0)\n is as providing a constant r. (r, -1) is what we describe in the paper\n as \"decreasing schedule\". Any value between -1 and +1 is accepted.\n - List[int]: A specific number of tokens per layer. For extreme granularity.\n \"\"\"\n inflect = 0\n if isinstance(r, list):\n if len(r) < num_layers:\n r = r + [0] * (num_layers - len(r))\n return list(r)\n elif isinstance(r, tuple):\n r, inflect = r\n\n min_val = int(r * (1.0 - inflect))\n max_val = 2 * r - min_val\n step = (max_val - min_val) / (num_layers - 1)\n\n return [int(min_val + step * i) for i in range(num_layers)]"
},
{
"identifier": "parse_merging_type",
"path": "testa/utils.py",
"snippet": "def parse_merging_type(num_layers: int, merging_type: Union[List[str], str]) -> List[str]:\n if isinstance(merging_type, str):\n if merging_type == 'patch' or merging_type == 'frame' or merging_type == 'frame&patch':\n return [merging_type for _ in range(num_layers)]\n elif merging_type == 'patch-frame' or merging_type == 'frame-patch':\n return [merging_type.split('-')[0], merging_type.split('-')[1]] * (num_layers // 2)\n else:\n return merging_type"
}
] | from typing import Tuple
from models.timesformer.models.vit import Attention, Block, VisionTransformer
from einops import rearrange
from testa.merge import bipartite_soft_matching, merge_source, merge_wavg
from testa.merge_original import original_bipartite_soft_matching, original_merge_wavg
from testa.utils import parse_r, parse_merging_type
import torch.nn.functional as F
import torch | 9,316 | if merging_type == 'patch':
x = rearrange(x, "b t l d -> (b t) l d", b=B)
else: # merging_type == 'frame'
self._testa_info["size"] = self._testa_info["size"].permute(0, 2, 1, 3)
size_cls = torch.ones(B, self._testa_info["size"].size(1), 1, 1).to(self._testa_info["size"])
self._testa_info["size"] = torch.cat([size_cls, self._testa_info["size"]], dim=-2) # add cls
x = rearrange(x, "b l t d -> b (l t) d", l=L)
return x
class TESTAAttention(Attention):
"""
Modifications:
- Apply proportional attention
- Return the mean of k over heads from attention
"""
def forward(
self, x: torch.Tensor, size: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
# Note: this is copied from timm.models.vision_transformer.Attention with modifications.
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
# Apply proportional attention
if size is not None:
attn = attn + size.log()[:, None, None, :, 0]
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
# Return k as well here
return x, k.mean(1)
def make_testa_class(transformer_class):
class TESTAVisionTransformer(transformer_class):
"""
Modifications:
- Initialize r, token size, and token sources.
"""
def forward_features(self, x, get_all_tokens=True):
B = x.shape[0]
x, T, W = self.patch_embed(x)
cls_tokens = self.cls_token.expand(x.size(0), -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# resizing the positional embeddings in case they don't match the input at inference
if x.size(1) != self.pos_embed.size(1):
pos_embed = self.pos_embed
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
P = int(other_pos_embed.size(2) ** 0.5)
H = x.size(1) // W
other_pos_embed = other_pos_embed.reshape(1, x.size(2), P, P)
new_pos_embed = F.interpolate(other_pos_embed, size=(H, W), mode='nearest')
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
x = x + new_pos_embed
else:
x = x + self.pos_embed
x = self.pos_drop(x)
# Time Embeddings
if self.attention_type != 'space_only':
cls_tokens = x[:B, 0, :].unsqueeze(1)
x = x[:, 1:]
x = rearrange(x, '(b t) l d -> (b l) t d', b=B, t=T)
# Resizing time embeddings in case they don't match
if T != self.time_embed.size(1):
time_embed = self.time_embed.transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=(T), mode='nearest')
new_time_embed = new_time_embed.transpose(1, 2)
x = x + new_time_embed
else:
x = x + self.time_embed
x = self.time_drop(x)
x = rearrange(x, '(b l) t d -> b (l t) d', b=B, t=T)
x = torch.cat((cls_tokens, x), dim=1)
# Attention blocks
L = (x.size(1) - 1) // T
for blk in self.blocks:
x, T, L = blk(x, B, T, L)
# Predictions for space-only baseline
if self.attention_type == 'space_only':
x = rearrange(x, '(b t) l d -> b t l d', b=B, t=T)
if get_all_tokens is False:
x = torch.mean(x, 1) # averaging predictions for every frame
else:
x = self.norm(x)
x = rearrange(x, 'b t l d -> b (t l) d', b=B, t=T) # concat tokens of every frame
return x
x = self.norm(x)
if get_all_tokens is False:
return x[:0]
else:
return x
def forward(self, *args, **kwdargs) -> torch.Tensor:
r = self.r.copy() if isinstance(self.r, list) else self.r
merging_type = self.merging_type.copy() if isinstance(self.merging_type, list) else self.merging_type
| '''
Adapted from https://github.com/facebookresearch/ToMe
'''
class TESTABlock(Block):
"""
Modifications:
- Apply TESTA between the attention and mlp blocks
- Compute and propogate token size and potentially the token sources.
"""
def _drop_path1(self, x):
return self.drop_path1(x) if hasattr(self, "drop_path1") else self.drop_path(x)
def _drop_path2(self, x):
return self.drop_path2(x) if hasattr(self, "drop_path2") else self.drop_path(x)
def forward(self, x: torch.Tensor, B, T, L) -> torch.Tensor:
"""
x: [bsz, 1+seq_len*n_frm, dim] for video
"""
attn_size = self._testa_info["size"] if self._testa_info["prop_attn"] else None
merging_type = self._testa_info["merging_type"].pop(0)
if self.attention_type in ['space_only', 'joint_space_time']:
x = self.global_agg(x)
elif self.attention_type == 'divided_space_time':
# Temporal
xt = x[:, 1:, :] # [B, LxT, D]
xt = rearrange(xt, 'b (l t) d -> (b l) t d', b=B, l=L, t=T)
xt_attn, metric_t = self.temporal_attn(self.temporal_norm1(xt))
if self.learnable_temporal_scaling is False:
res_temporal = self.drop_path(xt_attn)
else:
res_temporal = self.drop_path(xt_attn * (torch.tanh(self.temporal_scaling) + 1))
res_temporal = rearrange(res_temporal, '(b l) t d -> b (l t) d', b=B, l=L, t=T)
res_temporal = self.temporal_fc(res_temporal)
xt = x[:, 1:, :] + res_temporal
if 'frame' in merging_type:
xt = self.testa(xt, metric_t, B, L, 'frame')
# reconstruct
T = xt.size(1) // L
# Spatial
init_cls_token = x[:, 0, :].unsqueeze(1) # [B, 1, D]
cls_token = init_cls_token.repeat(1, T, 1) # [B, T, D]
cls_token = rearrange(cls_token, 'b t d -> (b t) d', b=B, t=T).unsqueeze(1) # [BxT, 1, D]
xs = xt # [B, LxT, D]
xs = rearrange(xs, 'b (l t) d -> (b t) l d', b=B, l=L, t=T)
xs = torch.cat((cls_token, xs), 1) # [BxT, 1+L, D]
x_attn, metric_s = self.attn(self.norm1(xs), attn_size) # cal metric for TESTA
res_spatial = self.drop_path(x_attn)
# Taking care of CLS token
cls_token = res_spatial[:, 0, :] # [BxT, 1, D]
cls_token = rearrange(cls_token, '(b t) d -> b t d', b=B, t=T) # [B, T, D]
cls_token = torch.mean(cls_token, 1, True) # averaging for every frame [B, 1, D]
res_spatial = res_spatial[:, 1:, :] # [BxT, L, D]
res_spatial = rearrange(res_spatial, '(b t) l d -> b (l t) d', b=B, l=L, t=T)
res = res_spatial # [B, LxT, D]
x = xt # [B, LxT, D], feature before spatial attn
# Mlp
x = rearrange((x + res), 'b (l t) d -> (b t) l d', b=B, l=L, t=T) # [BxT, L, D]
final_cls = init_cls_token + cls_token
x = torch.cat((final_cls.repeat(x.size(0) // final_cls.size(0), 1, 1), x), 1)
if 'patch' in merging_type:
x = self.testa(x, metric_s, B, L, 'patch')[:, 1:, :] # exclude [cls]
# reconstruct
L = x.size(1)
x = rearrange(x, '(b t) l d -> b (l t) d', b=B, l=L, t=T)
x = torch.cat((final_cls, x), 1)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, T, L
def global_agg(self, x: torch.Tensor) -> torch.Tensor:
"""
Global aggregation of all patches in all frames
"""
# Note: this is copied from timm.models.vision_transformer.Block with modifications.
attn_size = self._testa_info["size"] if self._testa_info["prop_attn"] else None
x_attn, metric = self.attn(self.norm1(x), attn_size)
x = x + self._drop_path1(x_attn)
r = self._testa_info["r"].pop(0)
if r > 0:
# Apply ToMe here
merge, _ = original_bipartite_soft_matching(
metric,
r,
self._testa_info["class_token"],
self._testa_info["distill_token"],
)
if self._testa_info["trace_source"]:
self._testa_info["source"] = merge_source(
merge, x, self._testa_info["source"]
)
x, self._testa_info["size"] = original_merge_wavg(merge, x, self._testa_info["size"])
x = x + self._drop_path2(self.mlp(self.norm2(x)))
return x
def testa(self, x, metric, B, L, merging_type):
r = self._testa_info["r"].pop(0)
if r > 0:
if merging_type == 'patch':
x = rearrange(x, "(b t) l d -> b t l d", b=B)
metric = rearrange(metric, "(b t) l d -> b t l d", b=B)
else: # merging_type == 'frame'
x = rearrange(x, "b (l t) d -> b l t d", l=L)
metric = rearrange(metric, "(b l) t d -> b l t d", b=B)
if self._testa_info["size"] is not None:
# by default, the size of self._testa_info["size"] is [b, t, l, d]
self._testa_info["size"] = self._testa_info["size"].permute(0, 2, 1, 3)
self._testa_info["size"] = self._testa_info["size"][:, 1:, ...] # remove cls
# Apply TESTA here
merge, _ = bipartite_soft_matching(
metric,
r,
self._testa_info["class_token"],
self._testa_info["distill_token"],
merging_type,
)
if self._testa_info["trace_source"]:
self._testa_info["source"] = merge_source(
merge, x, self._testa_info["source"]
)
x, self._testa_info["size"] = merge_wavg(merge, x, self._testa_info["size"])
if merging_type == 'patch':
x = rearrange(x, "b t l d -> (b t) l d", b=B)
else: # merging_type == 'frame'
self._testa_info["size"] = self._testa_info["size"].permute(0, 2, 1, 3)
size_cls = torch.ones(B, self._testa_info["size"].size(1), 1, 1).to(self._testa_info["size"])
self._testa_info["size"] = torch.cat([size_cls, self._testa_info["size"]], dim=-2) # add cls
x = rearrange(x, "b l t d -> b (l t) d", l=L)
return x
class TESTAAttention(Attention):
"""
Modifications:
- Apply proportional attention
- Return the mean of k over heads from attention
"""
def forward(
self, x: torch.Tensor, size: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
# Note: this is copied from timm.models.vision_transformer.Attention with modifications.
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
# Apply proportional attention
if size is not None:
attn = attn + size.log()[:, None, None, :, 0]
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
# Return k as well here
return x, k.mean(1)
def make_testa_class(transformer_class):
class TESTAVisionTransformer(transformer_class):
"""
Modifications:
- Initialize r, token size, and token sources.
"""
def forward_features(self, x, get_all_tokens=True):
B = x.shape[0]
x, T, W = self.patch_embed(x)
cls_tokens = self.cls_token.expand(x.size(0), -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# resizing the positional embeddings in case they don't match the input at inference
if x.size(1) != self.pos_embed.size(1):
pos_embed = self.pos_embed
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
P = int(other_pos_embed.size(2) ** 0.5)
H = x.size(1) // W
other_pos_embed = other_pos_embed.reshape(1, x.size(2), P, P)
new_pos_embed = F.interpolate(other_pos_embed, size=(H, W), mode='nearest')
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
x = x + new_pos_embed
else:
x = x + self.pos_embed
x = self.pos_drop(x)
# Time Embeddings
if self.attention_type != 'space_only':
cls_tokens = x[:B, 0, :].unsqueeze(1)
x = x[:, 1:]
x = rearrange(x, '(b t) l d -> (b l) t d', b=B, t=T)
# Resizing time embeddings in case they don't match
if T != self.time_embed.size(1):
time_embed = self.time_embed.transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=(T), mode='nearest')
new_time_embed = new_time_embed.transpose(1, 2)
x = x + new_time_embed
else:
x = x + self.time_embed
x = self.time_drop(x)
x = rearrange(x, '(b l) t d -> b (l t) d', b=B, t=T)
x = torch.cat((cls_tokens, x), dim=1)
# Attention blocks
L = (x.size(1) - 1) // T
for blk in self.blocks:
x, T, L = blk(x, B, T, L)
# Predictions for space-only baseline
if self.attention_type == 'space_only':
x = rearrange(x, '(b t) l d -> b t l d', b=B, t=T)
if get_all_tokens is False:
x = torch.mean(x, 1) # averaging predictions for every frame
else:
x = self.norm(x)
x = rearrange(x, 'b t l d -> b (t l) d', b=B, t=T) # concat tokens of every frame
return x
x = self.norm(x)
if get_all_tokens is False:
return x[:0]
else:
return x
def forward(self, *args, **kwdargs) -> torch.Tensor:
r = self.r.copy() if isinstance(self.r, list) else self.r
merging_type = self.merging_type.copy() if isinstance(self.merging_type, list) else self.merging_type | self._testa_info["r"] = parse_r(len(self.blocks), r) | 8 | 2023-10-29 12:09:38+00:00 | 12k |
OATML-Markslab/ProteinNPT | utils/esm/modules.py | [
{
"identifier": "MultiheadAttention",
"path": "utils/esm/multihead_attention.py",
"snippet": "class MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n kdim=None,\n vdim=None,\n dropout=0.0,\n bias=True,\n add_bias_kv: bool = False,\n add_zero_attn: bool = False,\n self_attention: bool = False,\n encoder_decoder_attention: bool = False,\n use_rotary_embeddings: bool = False,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim**-0.5\n\n self.self_attention = self_attention\n self.encoder_decoder_attention = encoder_decoder_attention\n\n assert not self.self_attention or self.qkv_same_dim, (\n \"Self-attention requires query, key and \" \"value to be of the same size\"\n )\n\n self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n self.rot_emb = None\n if use_rotary_embeddings:\n self.rot_emb = RotaryEmbedding(dim=self.head_dim)\n\n self.enable_torch_version = False\n if hasattr(F, \"multi_head_attention_forward\"):\n self.enable_torch_version = True\n else:\n self.enable_torch_version = False\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def reset_parameters(self):\n if self.qkv_same_dim:\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))\n else:\n nn.init.xavier_uniform_(self.k_proj.weight)\n nn.init.xavier_uniform_(self.v_proj.weight)\n nn.init.xavier_uniform_(self.q_proj.weight)\n\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.0)\n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n def forward(\n self,\n query,\n key: Optional[Tensor],\n value: Optional[Tensor],\n key_padding_mask: Optional[Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n need_weights: bool = True,\n static_kv: bool = False,\n attn_mask: Optional[Tensor] = None,\n before_softmax: bool = False,\n need_head_weights: bool = False,\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n \"\"\"\n if need_head_weights:\n need_weights = True\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n\n if (\n not self.rot_emb\n and self.enable_torch_version\n and not self.onnx_trace\n and incremental_state is None\n and not static_kv\n # A workaround for quantization to work. Otherwise JIT compilation\n # treats bias in linear module as method.\n and not torch.jit.is_scripting()\n and not need_head_weights\n ):\n assert key is not None and value is not None\n return F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n )\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if saved_state is not None and \"prev_key\" in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert self.encoder_decoder_attention and not self.self_attention\n key = value = None\n else:\n saved_state = None\n\n if self.self_attention:\n q = self.q_proj(query)\n k = self.k_proj(query)\n v = self.v_proj(query)\n elif self.encoder_decoder_attention:\n # encoder-decoder attention\n q = self.q_proj(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k = self.k_proj(key)\n v = self.v_proj(key)\n\n else:\n assert key is not None and value is not None\n q = self.q_proj(query)\n k = self.k_proj(key)\n v = self.v_proj(value)\n q *= self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n key_padding_mask.new_zeros(key_padding_mask.size(0), 1),\n ],\n dim=1,\n )\n\n q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if k is not None:\n k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if v is not None:\n v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if \"prev_key\" in saved_state:\n _prev_key = saved_state[\"prev_key\"]\n assert _prev_key is not None\n prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n assert k is not None\n k = torch.cat([prev_key, k], dim=1)\n if \"prev_value\" in saved_state:\n _prev_value = saved_state[\"prev_value\"]\n assert _prev_value is not None\n prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n assert v is not None\n v = torch.cat([prev_value, v], dim=1)\n prev_key_padding_mask: Optional[Tensor] = None\n if \"prev_key_padding_mask\" in saved_state:\n prev_key_padding_mask = saved_state[\"prev_key_padding_mask\"]\n assert k is not None and v is not None\n key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(\n key_padding_mask=key_padding_mask,\n prev_key_padding_mask=prev_key_padding_mask,\n batch_size=bsz,\n src_len=k.size(1),\n static_kv=static_kv,\n )\n\n saved_state[\"prev_key\"] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_value\"] = v.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_key_padding_mask\"] = key_padding_mask\n # In this branch incremental_state is never None\n assert incremental_state is not None\n incremental_state = self._set_input_buffer(incremental_state, saved_state)\n assert k is not None\n src_len = k.size(1)\n\n # This is part of a workaround to get around fork/join parallelism\n # not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.dim() == 0:\n key_padding_mask = None\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n assert v is not None\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask),\n ],\n dim=1,\n )\n\n if self.rot_emb:\n q, k = self.rot_emb(q, k)\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)\n\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float(\"-inf\")\n )\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if before_softmax:\n return attn_weights, v\n\n attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace=self.onnx_trace)\n attn_weights = attn_weights_float.type_as(attn_weights)\n attn_probs = F.dropout(\n attn_weights_float.type_as(attn_weights),\n p=self.dropout,\n training=self.training,\n )\n assert v is not None\n attn = torch.bmm(attn_probs, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n if self.onnx_trace and attn.size(1) == 1:\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n attn_weights: Optional[Tensor] = None\n if need_weights:\n attn_weights = attn_weights_float.view(\n bsz, self.num_heads, tgt_len, src_len\n ).type_as(attn).transpose(1, 0)\n if not need_head_weights:\n # average attention weights over heads\n attn_weights = attn_weights.mean(dim=0)\n\n return attn, attn_weights\n\n @staticmethod\n def _append_prev_key_padding_mask(\n key_padding_mask: Optional[Tensor],\n prev_key_padding_mask: Optional[Tensor],\n batch_size: int,\n src_len: int,\n static_kv: bool,\n ) -> Optional[Tensor]:\n # saved key padding masks have shape (bsz, seq_len)\n if prev_key_padding_mask is not None and static_kv:\n new_key_padding_mask = prev_key_padding_mask\n elif prev_key_padding_mask is not None and key_padding_mask is not None:\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1\n )\n # During incremental decoding, as the padding token enters and\n # leaves the frame, there will be a time when prev or current\n # is None\n elif prev_key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - prev_key_padding_mask.size(1)),\n device=prev_key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), filler.float()], dim=1\n )\n elif key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - key_padding_mask.size(1)),\n device=key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat([filler.float(), key_padding_mask.float()], dim=1)\n else:\n new_key_padding_mask = prev_key_padding_mask\n return new_key_padding_mask\n\n @torch.jit.export\n def reorder_incremental_state(\n self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor\n ):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer_k = input_buffer[k]\n if input_buffer_k is not None:\n if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(\n 0\n ):\n break\n input_buffer[k] = input_buffer_k.index_select(0, new_order)\n incremental_state = self._set_input_buffer(incremental_state, input_buffer)\n return incremental_state\n\n def _get_input_buffer(\n self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]\n ) -> Dict[str, Optional[Tensor]]:\n result = self.get_incremental_state(incremental_state, \"attn_state\")\n if result is not None:\n return result\n else:\n empty_result: Dict[str, Optional[Tensor]] = {}\n return empty_result\n\n def _set_input_buffer(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n buffer: Dict[str, Optional[Tensor]],\n ):\n return self.set_incremental_state(incremental_state, \"attn_state\", buffer)\n\n def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):\n return attn_weights\n\n def upgrade_state_dict_named(self, state_dict, name):\n prefix = name + \".\" if name != \"\" else \"\"\n items_to_add = {}\n keys_to_remove = []\n for k in state_dict.keys():\n if k.endswith(prefix + \"in_proj_weight\"):\n # in_proj_weight used to be q + k + v with same dimensions\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.weight\"] = state_dict[k][:dim]\n items_to_add[prefix + \"k_proj.weight\"] = state_dict[k][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.weight\"] = state_dict[k][2 * dim :]\n\n keys_to_remove.append(k)\n\n k_bias = prefix + \"in_proj_bias\"\n if k_bias in state_dict.keys():\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.bias\"] = state_dict[k_bias][:dim]\n items_to_add[prefix + \"k_proj.bias\"] = state_dict[k_bias][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.bias\"] = state_dict[k_bias][2 * dim :]\n\n keys_to_remove.append(prefix + \"in_proj_bias\")\n\n for k in keys_to_remove:\n del state_dict[k]\n\n for key, value in items_to_add.items():\n state_dict[key] = value"
},
{
"identifier": "ColumnSelfAttention",
"path": "utils/esm/axial_attention.py",
"snippet": "class ColumnSelfAttention(nn.Module):\n \"\"\"Compute self-attention over columns of a 2D input.\"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.0,\n max_tokens_per_msa: int = 2 ** 16,\n ):\n super().__init__()\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n self.scaling = self.head_dim ** -0.5\n self.max_tokens_per_msa = max_tokens_per_msa\n\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n self.dropout_module = nn.Dropout(dropout)\n\n def _batched_forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n max_cols = max(1, self.max_tokens_per_msa // num_rows)\n outputs = []\n attns = []\n for start in range(0, num_cols, max_cols):\n output, attn = self(\n x[:, start : start + max_cols],\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask[:, :, start : start + max_cols]\n if self_attn_padding_mask is not None\n else None,\n )\n outputs.append(output)\n attns.append(attn)\n output = torch.cat(outputs, 1)\n attns = torch.cat(attns, 1)\n return output, attns\n\n def compute_attention_update(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n if num_rows == 1:\n # if there is only 1 position, this is equivalent and doesn't break with padding\n attn_probs = torch.ones(\n self.num_heads,\n num_cols,\n batch_size,\n num_rows,\n num_rows,\n device=x.device,\n dtype=x.dtype,\n )\n output = self.out_proj(self.v_proj(x))\n else:\n q = self.q_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n k = self.k_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n v = self.v_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n q *= self.scaling\n\n attn_weights = torch.einsum(\"icnhd,jcnhd->hcnij\", q, k)\n\n if self_attn_mask is not None:\n raise NotImplementedError\n if self_attn_padding_mask is not None:\n attn_weights = attn_weights.masked_fill(\n self_attn_padding_mask.permute(2, 0, 1).unsqueeze(0).unsqueeze(3),\n -10000,\n )\n\n attn_probs = attn_weights.softmax(-1)\n attn_probs = self.dropout_module(attn_probs)\n context = torch.einsum(\"hcnij,jcnhd->icnhd\", attn_probs, v)\n context = context.contiguous().view(num_rows, num_cols, batch_size, embed_dim)\n output = self.out_proj(context)\n return output, attn_probs\n\n def forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n # if False and num_rows * num_cols > 2 ** 14 and not torch.is_grad_enabled():\n if (num_rows * num_cols) > self.max_tokens_per_msa and not torch.is_grad_enabled():\n return self._batched_forward(\n x,\n self_attn_mask,\n self_attn_padding_mask,\n )\n else:\n return self.compute_attention_update(x, self_attn_mask, self_attn_padding_mask)"
},
{
"identifier": "RowSelfAttention",
"path": "utils/esm/axial_attention.py",
"snippet": "class RowSelfAttention(nn.Module):\n \"\"\"Compute self-attention over rows of a 2D input.\"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.0,\n max_tokens_per_msa: int = 2 ** 16,\n tranception_attention: bool = False,\n num_targets: int = 1,\n ):\n super().__init__()\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n self.scaling = self.head_dim ** -0.5\n self.max_tokens_per_msa = max_tokens_per_msa\n self.attn_shape = \"hnij\"\n\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n self.dropout_module = nn.Dropout(dropout)\n \n self.tranception_attention = tranception_attention\n self.num_targets = num_targets\n if self.tranception_attention:\n assert self.num_heads%4==0, \"Invalid number of heads. Tranception requires the number of heads to be a multiple of 4.\"\n self.num_heads_per_kernel_size = self.num_heads // 4\n self.query_depthwiseconv = nn.ModuleDict()\n self.key_depthwiseconv = nn.ModuleDict()\n self.value_depthwiseconv = nn.ModuleDict()\n for kernel_idx, kernel in enumerate([3,5,7]):\n self.query_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel,self.num_targets)\n self.key_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel,self.num_targets)\n self.value_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel,self.num_targets)\n\n def align_scaling(self, q):\n num_rows = q.size(0)\n return self.scaling / math.sqrt(num_rows)\n\n def _batched_forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n max_rows = max(1, self.max_tokens_per_msa // num_cols)\n attns = 0\n scaling = self.align_scaling(x)\n for start in range(0, num_rows, max_rows):\n attn_weights = self.compute_attention_weights(\n x[start : start + max_rows],\n scaling,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask[:, start : start + max_rows]\n if self_attn_padding_mask is not None\n else None,\n )\n attns += attn_weights\n attn_probs = attns.softmax(-1)\n attn_probs = self.dropout_module(attn_probs)\n\n outputs = []\n for start in range(0, num_rows, max_rows):\n output = self.compute_attention_update(x[start : start + max_rows], attn_probs)\n outputs.append(output)\n\n output = torch.cat(outputs, 0)\n return output, attn_probs\n\n def compute_attention_weights(\n self,\n x,\n scaling: float,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n q = self.q_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n k = self.k_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n q *= scaling\n if self_attn_padding_mask is not None:\n # Zero out any padded aligned positions - this is important since\n # we take a sum across the alignment axis.\n q *= 1 - self_attn_padding_mask.permute(1, 2, 0).unsqueeze(3).unsqueeze(4).to(q)\n \n if self.tranception_attention:\n # We do not do anything on the first self.num_heads_per_kernel_size heads (kernel =1)\n query_list=[q[:,:,:,:self.num_heads_per_kernel_size,:]]\n key_list=[k[:,:,:,:self.num_heads_per_kernel_size,:]]\n for kernel_idx in range(3):\n query_list.append(self.query_depthwiseconv[str(kernel_idx)](q[:,:,:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:]))\n key_list.append(self.key_depthwiseconv[str(kernel_idx)](k[:,:,:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:]))\n q=torch.cat(query_list, dim=1)\n k=torch.cat(key_list, dim=1)\n \n attn_weights = torch.einsum(f\"rinhd,rjnhd->{self.attn_shape}\", q, k)\n\n if self_attn_mask is not None:\n raise NotImplementedError\n # Mask Size: [B x R x C], Weights Size: [H x B x C x C]\n\n if self_attn_padding_mask is not None:\n attn_weights = attn_weights.masked_fill(\n self_attn_padding_mask[:, 0].unsqueeze(0).unsqueeze(2),\n -10000,\n )\n\n return attn_weights\n\n def compute_attention_update(\n self,\n x,\n attn_probs,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n v = self.v_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)\n \n if self.tranception_attention:\n value_list=[v[:,:,:,:self.num_heads_per_kernel_size,:]]\n for kernel_idx in range(3):\n value_list.append(self.value_depthwiseconv[str(kernel_idx)](v[:,:,:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:]))\n v=torch.cat(value_list, dim=1)\n\n context = torch.einsum(f\"{self.attn_shape},rjnhd->rinhd\", attn_probs, v)\n context = context.contiguous().view(num_rows, num_cols, batch_size, embed_dim)\n output = self.out_proj(context)\n return output\n\n def forward(\n self,\n x,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n ):\n num_rows, num_cols, batch_size, embed_dim = x.size()\n if (num_rows * num_cols > self.max_tokens_per_msa) and not torch.is_grad_enabled():\n return self._batched_forward(x, self_attn_mask, self_attn_padding_mask)\n else:\n scaling = self.align_scaling(x)\n attn_weights = self.compute_attention_weights(\n x, scaling, self_attn_mask, self_attn_padding_mask\n )\n attn_probs = attn_weights.softmax(-1)\n attn_probs = self.dropout_module(attn_probs)\n output = self.compute_attention_update(x, attn_probs)\n return output, attn_probs"
}
] | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
from .multihead_attention import MultiheadAttention # noqa
from .axial_attention import ColumnSelfAttention, RowSelfAttention
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
from torch.nn import LayerNorm as ESM1bLayerNorm | 8,506 | """Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = (self.weight * x) + self.bias
return x
try:
class ESM1bLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
class TransformerLayer(nn.Module):
"""Transformer layer block."""
def __init__(
self,
embed_dim,
ffn_embed_dim,
attention_heads,
add_bias_kv=True,
use_esm1b_layer_norm=False,
use_rotary_embeddings: bool = False,
):
super().__init__()
self.embed_dim = embed_dim
self.ffn_embed_dim = ffn_embed_dim
self.attention_heads = attention_heads
self.use_rotary_embeddings = use_rotary_embeddings
self._init_submodules(add_bias_kv, use_esm1b_layer_norm)
def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm):
BertLayerNorm = ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm
self.self_attn = MultiheadAttention(
self.embed_dim,
self.attention_heads,
add_bias_kv=add_bias_kv,
add_zero_attn=False,
use_rotary_embeddings=self.use_rotary_embeddings,
)
self.self_attn_layer_norm = BertLayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim)
self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim)
self.final_layer_norm = BertLayerNorm(self.embed_dim)
def forward(
self, x, self_attn_mask=None, self_attn_padding_mask=None, need_head_weights=False
):
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=True,
need_head_weights=need_head_weights,
attn_mask=self_attn_mask,
)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = gelu(self.fc1(x))
x = self.fc2(x)
x = residual + x
return x, attn
class AxialTransformerLayer(nn.Module):
"""Implements an Axial MSA Transformer block."""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
max_tokens_per_msa: int = 2**14,
deactivate_col_attention: bool = False,
tranception_attention: bool = False,
num_targets: int = 1,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout_prob = dropout
self.deactivate_col_attention = deactivate_col_attention
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def symmetrize(x):
"Make layer symmetric in final two dimensions, used for contact prediction."
return x + x.transpose(-1, -2)
def apc(x):
"Perform average product correct, used for contact prediction."
a1 = x.sum(-1, keepdims=True)
a2 = x.sum(-2, keepdims=True)
a12 = x.sum((-1, -2), keepdims=True)
avg = a1 * a2
avg.div_(a12) # in-place to reduce memory
normalized = x - avg
return normalized
class ESM1LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12, affine=True):
"""Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = (self.weight * x) + self.bias
return x
try:
class ESM1bLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
class TransformerLayer(nn.Module):
"""Transformer layer block."""
def __init__(
self,
embed_dim,
ffn_embed_dim,
attention_heads,
add_bias_kv=True,
use_esm1b_layer_norm=False,
use_rotary_embeddings: bool = False,
):
super().__init__()
self.embed_dim = embed_dim
self.ffn_embed_dim = ffn_embed_dim
self.attention_heads = attention_heads
self.use_rotary_embeddings = use_rotary_embeddings
self._init_submodules(add_bias_kv, use_esm1b_layer_norm)
def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm):
BertLayerNorm = ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm
self.self_attn = MultiheadAttention(
self.embed_dim,
self.attention_heads,
add_bias_kv=add_bias_kv,
add_zero_attn=False,
use_rotary_embeddings=self.use_rotary_embeddings,
)
self.self_attn_layer_norm = BertLayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim)
self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim)
self.final_layer_norm = BertLayerNorm(self.embed_dim)
def forward(
self, x, self_attn_mask=None, self_attn_padding_mask=None, need_head_weights=False
):
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=True,
need_head_weights=need_head_weights,
attn_mask=self_attn_mask,
)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = gelu(self.fc1(x))
x = self.fc2(x)
x = residual + x
return x, attn
class AxialTransformerLayer(nn.Module):
"""Implements an Axial MSA Transformer block."""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
max_tokens_per_msa: int = 2**14,
deactivate_col_attention: bool = False,
tranception_attention: bool = False,
num_targets: int = 1,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout_prob = dropout
self.deactivate_col_attention = deactivate_col_attention
| row_self_attention = RowSelfAttention( | 2 | 2023-10-28 11:41:05+00:00 | 12k |
CVHub520/yolov5_obb | export.py | [
{
"identifier": "Conv",
"path": "models/common.py",
"snippet": "class Conv(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def forward_fuse(self, x):\n return self.act(self.conv(x))"
},
{
"identifier": "attempt_load",
"path": "models/experimental.py",
"snippet": "def attempt_load(weights, map_location=None, inplace=True, fuse=True):\n from models.yolo import Detect, Model\n\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n ckpt = torch.load(attempt_download(w), map_location=map_location) # load\n if fuse:\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n else:\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse\n\n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:\n m.inplace = inplace # pytorch 1.7.0 compatibility\n if type(m) is Detect:\n if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility\n delattr(m, 'anchor_grid')\n setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n\n if len(model) == 1:\n return model[-1] # return model\n else:\n print(f'Ensemble created with {weights}\\n')\n for k in ['names']:\n setattr(model, k, getattr(model[-1], k))\n model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride\n return model # return ensemble"
},
{
"identifier": "Detect",
"path": "models/yolo.py",
"snippet": "class Detect(nn.Module):\n stride = None # strides computed during build\n onnx_dynamic = False # ONNX export parameter\n export = False # export mode\n\n def __init__(self, nc=80, anchors=(), ch=(), inplace=False): # detection layer\n super().__init__()\n self.nc = nc # number of classes\n self.no = nc + 5 + 180 # number of outputs per anchor\n self.nl = len(anchors) # number of detection layers\n self.na = len(anchors[0]) // 2 # number of anchors\n self.grid = [torch.zeros(1)] * self.nl # init grid\n self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid\n self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)\n self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv\n self.inplace = inplace # use in-place ops (e.g. slice assignment)\n\n def forward(self, x):\n \"\"\"\n Args:\n x (list[P3_in,...]): torch.Size(b, c_i, h_i, w_i)\n\n Return:\n if train:\n x (list[P3_out,...]): torch.Size(b, self.na, h_i, w_i, self.no), self.na means the number of anchors scales\n else:\n inference (tensor): (b, n_all_anchors, self.no)\n x (list[P3_in,...]): torch.Size(b, c_i, h_i, w_i)\n \"\"\"\n z = [] # inference output\n for i in range(self.nl):\n x[i] = self.m[i](x[i]) # conv\n bs, _, ny, nx = x[i].shape # x[i](bs,self.no * self.na,20,20) to x[i](bs,self.na,20,20,self.no)\n x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()\n\n if not self.training: # inference\n if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:\n self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)\n\n y = x[i].sigmoid() # (tensor): (b, self.na, h, w, self.no)\n if self.inplace:\n y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy\n y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh\n else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953\n xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy\n wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh\n y = torch.cat((xy, wh, y[..., 4:]), -1) \n z.append(y.view(bs, -1, self.no)) # z (list[P3_pred]): Torch.Size(b, n_anchors, self.no)\n\n return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x)\n\n def _make_grid(self, nx=20, ny=20, i=0):\n d = self.anchors[i].device\n if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility\n yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij')\n else:\n yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)])\n grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float()\n anchor_grid = (self.anchors[i].clone() * self.stride[i]) \\\n .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float()\n return grid, anchor_grid"
},
{
"identifier": "SiLU",
"path": "utils/activations.py",
"snippet": "class SiLU(nn.Module): # export-friendly version of nn.SiLU()\n @staticmethod\n def forward(x):\n return x * torch.sigmoid(x)"
},
{
"identifier": "LoadImages",
"path": "utils/datasets.py",
"snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True):\n p = str(Path(path).resolve()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap, s\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files"
},
{
"identifier": "LOGGER",
"path": "utils/general.py",
"snippet": "LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)"
},
{
"identifier": "check_dataset",
"path": "utils/general.py",
"snippet": "def check_dataset(data, autodownload=True):\n # Download and/or unzip dataset if not found locally\n # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip\n\n # Download (optional)\n extract_dir = ''\n if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip\n download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1)\n data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))\n extract_dir, autodownload = data.parent, False\n\n # Read yaml (optional)\n if isinstance(data, (str, Path)):\n with open(data, errors='ignore') as f:\n data = yaml.safe_load(f) # dictionary\n\n # Parse yaml\n path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.'\n for k in 'train', 'val', 'test':\n if data.get(k): # prepend path\n data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]\n\n assert 'nc' in data, \"Dataset 'nc' key missing.\"\n if 'names' not in data:\n data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing\n train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))\n if val:\n val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path\n if not all(x.exists() for x in val):\n print('\\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])\n if s and autodownload: # download script\n root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'\n if s.startswith('http') and s.endswith('.zip'): # URL\n f = Path(s).name # filename\n print(f'Downloading {s} to {f}...')\n torch.hub.download_url_to_file(s, f)\n Path(root).mkdir(parents=True, exist_ok=True) # create root\n ZipFile(f).extractall(path=root) # unzip\n Path(f).unlink() # remove zip\n r = None # success\n elif s.startswith('bash '): # bash script\n print(f'Running {s} ...')\n r = os.system(s)\n else: # python script\n r = exec(s, {'yaml': data}) # return None\n print(f\"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\\n\")\n else:\n raise Exception('Dataset not found.')\n\n return data # dictionary"
},
{
"identifier": "check_img_size",
"path": "utils/general.py",
"snippet": "def check_img_size(imgsz, s=32, floor=0):\n print(f\"#305 in utils/general.py - s={s}\")\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size"
},
{
"identifier": "check_requirements",
"path": "utils/general.py",
"snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))"
},
{
"identifier": "colorstr",
"path": "utils/general.py",
"snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']"
},
{
"identifier": "file_size",
"path": "utils/general.py",
"snippet": "def file_size(path):\n # Return file/dir size (MB)\n path = Path(path)\n if path.is_file():\n return path.stat().st_size / 1E6\n elif path.is_dir():\n return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6\n else:\n return 0.0"
},
{
"identifier": "print_args",
"path": "utils/general.py",
"snippet": "def print_args(name, opt):\n # Print argparser arguments\n LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))"
},
{
"identifier": "url2file",
"path": "utils/general.py",
"snippet": "def url2file(url):\n # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt\n url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/\n file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth\n return file"
},
{
"identifier": "select_device",
"path": "utils/torch_utils.py",
"snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')"
}
] | import argparse
import json
import os
import subprocess
import sys
import time
import torch
import torch.nn as nn
import onnx
import onnxsim
import coremltools as ct
import openvino.inference_engine as ie
import tensorflow as tf
import tensorflow as tf
import tensorflow as tf
import re
import tensorflowjs as tfjs
import tensorrt as trt
from pathlib import Path
from torch.utils.mobile_optimizer import optimize_for_mobile
from models.common import Conv
from models.experimental import attempt_load
from models.yolo import Detect
from utils.activations import SiLU
from utils.datasets import LoadImages
from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, colorstr, file_size, print_args,
url2file)
from utils.torch_utils import select_device
from tensorflow import keras
from models.tf import TFDetect, TFModel
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from models.tf import representative_dataset_gen | 8,879 | converter.target_spec.supported_types = []
converter.inference_input_type = tf.uint8 # or tf.int8
converter.inference_output_type = tf.uint8 # or tf.int8
converter.experimental_new_quantizer = False
f = str(file).replace('.pt', '-int8.tflite')
tflite_model = converter.convert()
open(f, "wb").write(tflite_model)
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):
# YOLOv5 TensorFlow.js export
try:
check_requirements(('tensorflowjs',))
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
f = str(file).replace('.pt', '_web_model') # js dir
f_pb = file.with_suffix('.pb') # *.pb path
f_json = f + '/model.json' # *.json path
cmd = f"tensorflowjs_converter --input_format=tf_frozen_model " \
f"--output_node_names='Identity,Identity_1,Identity_2,Identity_3' {f_pb} {f}"
subprocess.run(cmd, shell=True)
json = open(f_json).read()
with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
subst = re.sub(
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}}}',
r'{"outputs": {"Identity": {"name": "Identity"}, '
r'"Identity_1": {"name": "Identity_1"}, '
r'"Identity_2": {"name": "Identity_2"}, '
r'"Identity_3": {"name": "Identity_3"}}}',
json)
j.write(subst)
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
try:
check_requirements(('tensorrt',))
opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x
export_onnx(model, im, file, opset, train, False, simplify)
onnx = file.with_suffix('.onnx')
assert onnx.exists(), f'failed to export ONNX file: {onnx}'
LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
f = file.with_suffix('.engine') # TensorRT engine file
logger = trt.Logger(trt.Logger.INFO)
if verbose:
logger.min_severity = trt.Logger.Severity.VERBOSE
builder = trt.Builder(logger)
config = builder.create_builder_config()
config.max_workspace_size = workspace * 1 << 30
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
network = builder.create_network(flag)
parser = trt.OnnxParser(network, logger)
if not parser.parse_from_file(str(onnx)):
raise RuntimeError(f'failed to load ONNX file: {onnx}')
inputs = [network.get_input(i) for i in range(network.num_inputs)]
outputs = [network.get_output(i) for i in range(network.num_outputs)]
LOGGER.info(f'{prefix} Network Description:')
for inp in inputs:
LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}')
for out in outputs:
LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}')
half &= builder.platform_has_fast_fp16
LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}')
if half:
config.set_flag(trt.BuilderFlag.FP16)
with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
t.write(engine.serialize())
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
@torch.no_grad()
def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
weights=ROOT / 'yolov5s.pt', # weights path
imgsz=(640, 640), # image (height, width)
batch_size=1, # batch size
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
include=('torchscript', 'onnx'), # include formats
half=False, # FP16 half-precision export
inplace=False, # set YOLOv5 Detect() inplace=True
train=False, # model.train() mode
optimize=False, # TorchScript: optimize for mobile
int8=False, # CoreML/TF INT8 quantization
dynamic=False, # ONNX/TF: dynamic axes
simplify=False, # ONNX: simplify model
opset=12, # ONNX: opset version
verbose=False, # TensorRT: verbose log
workspace=4, # TensorRT: workspace size (GB)
nms=False, # TF: add NMS to model
agnostic_nms=False, # TF: add agnostic NMS to model
topk_per_class=100, # TF.js NMS: topk per class to keep
topk_all=100, # TF.js NMS: topk for all classes to keep
iou_thres=0.45, # TF.js NMS: IoU threshold
conf_thres=0.25 # TF.js NMS: confidence threshold
):
t = time.time()
include = [x.lower() for x in include]
tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports
| # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
Format | Example | `--include ...` argument
--- | --- | ---
PyTorch | yolov5s.pt | -
TorchScript | yolov5s.torchscript | `torchscript`
ONNX | yolov5s.onnx | `onnx`
CoreML | yolov5s.mlmodel | `coreml`
OpenVINO | yolov5s_openvino_model/ | `openvino`
TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model`
TensorFlow GraphDef | yolov5s.pb | `pb`
TensorFlow Lite | yolov5s.tflite | `tflite`
TensorFlow.js | yolov5s_web_model/ | `tfjs`
TensorRT | yolov5s.engine | `engine`
Usage:
$ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs
Inference:
$ python path/to/detect.py --weights yolov5s.pt
yolov5s.torchscript
yolov5s.onnx
yolov5s.mlmodel (under development)
yolov5s_openvino_model (under development)
yolov5s_saved_model
yolov5s.pb
yolov5s.tflite
yolov5s.engine
TensorFlow.js:
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
$ npm install
$ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
$ npm start
"""
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
# YOLOv5 TorchScript model export
try:
LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
f = file.with_suffix('.torchscript')
ts = torch.jit.trace(model, im, strict=False)
d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
(optimize_for_mobile(ts) if optimize else ts).save(str(f), _extra_files=extra_files)
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'{prefix} export failure: {e}')
def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')):
# YOLOv5 ONNX export
try:
check_requirements(('onnx',))
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
f = file.with_suffix('.onnx')
torch.onnx.export(model, im, f, verbose=False, opset_version=opset,
training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL,
do_constant_folding=not train,
input_names=['images'],
output_names=['output'],
dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640)
'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
} if dynamic else None)
# Checks
model_onnx = onnx.load(f) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
# LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print
# Simplify
if simplify:
try:
check_requirements(('onnx-simplifier',))
LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
model_onnx, check = onnxsim.simplify(
model_onnx,
dynamic_input_shape=dynamic,
input_shapes={'images': list(im.shape)} if dynamic else None)
assert check, 'assert check failed'
onnx.save(model_onnx, f)
except Exception as e:
LOGGER.info(f'{prefix} simplifier failure: {e}')
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
LOGGER.info(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'")
except Exception as e:
LOGGER.info(f'{prefix} export failure: {e}')
def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
# YOLOv5 CoreML export
ct_model = None
try:
check_requirements(('coremltools',))
LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
f = file.with_suffix('.mlmodel')
model.train() # CoreML exports should be placed in model.train() mode
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
ct_model.save(f)
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
return ct_model
def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')):
# YOLOv5 OpenVINO export
try:
check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
f = str(file).replace('.pt', '_openvino_model' + os.sep)
cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}"
subprocess.check_output(cmd, shell=True)
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
def export_saved_model(model, im, file, dynamic,
tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45,
conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')):
# YOLOv5 TensorFlow saved_model export
keras_model = None
try:
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
f = str(file).replace('.pt', '_saved_model')
batch_size, ch, *imgsz = list(im.shape) # BCHW
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC order for TensorFlow
y = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
inputs = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
keras_model = keras.Model(inputs=inputs, outputs=outputs)
keras_model.trainable = False
keras_model.summary()
keras_model.save(f, save_format='tf')
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
return keras_model
def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')):
# YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
try:
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
f = file.with_suffix('.pb')
m = tf.function(lambda x: keras_model(x)) # full model
m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
frozen_func = convert_variables_to_constants_v2(m)
frozen_func.graph.as_graph_def()
tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')):
# YOLOv5 TensorFlow Lite export
try:
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
batch_size, ch, *imgsz = list(im.shape) # BCHW
f = str(file).replace('.pt', '-fp16.tflite')
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
converter.target_spec.supported_types = [tf.float16]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
if int8:
dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data
converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = []
converter.inference_input_type = tf.uint8 # or tf.int8
converter.inference_output_type = tf.uint8 # or tf.int8
converter.experimental_new_quantizer = False
f = str(file).replace('.pt', '-int8.tflite')
tflite_model = converter.convert()
open(f, "wb").write(tflite_model)
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):
# YOLOv5 TensorFlow.js export
try:
check_requirements(('tensorflowjs',))
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
f = str(file).replace('.pt', '_web_model') # js dir
f_pb = file.with_suffix('.pb') # *.pb path
f_json = f + '/model.json' # *.json path
cmd = f"tensorflowjs_converter --input_format=tf_frozen_model " \
f"--output_node_names='Identity,Identity_1,Identity_2,Identity_3' {f_pb} {f}"
subprocess.run(cmd, shell=True)
json = open(f_json).read()
with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
subst = re.sub(
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}}}',
r'{"outputs": {"Identity": {"name": "Identity"}, '
r'"Identity_1": {"name": "Identity_1"}, '
r'"Identity_2": {"name": "Identity_2"}, '
r'"Identity_3": {"name": "Identity_3"}}}',
json)
j.write(subst)
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
try:
check_requirements(('tensorrt',))
opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x
export_onnx(model, im, file, opset, train, False, simplify)
onnx = file.with_suffix('.onnx')
assert onnx.exists(), f'failed to export ONNX file: {onnx}'
LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
f = file.with_suffix('.engine') # TensorRT engine file
logger = trt.Logger(trt.Logger.INFO)
if verbose:
logger.min_severity = trt.Logger.Severity.VERBOSE
builder = trt.Builder(logger)
config = builder.create_builder_config()
config.max_workspace_size = workspace * 1 << 30
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
network = builder.create_network(flag)
parser = trt.OnnxParser(network, logger)
if not parser.parse_from_file(str(onnx)):
raise RuntimeError(f'failed to load ONNX file: {onnx}')
inputs = [network.get_input(i) for i in range(network.num_inputs)]
outputs = [network.get_output(i) for i in range(network.num_outputs)]
LOGGER.info(f'{prefix} Network Description:')
for inp in inputs:
LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}')
for out in outputs:
LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}')
half &= builder.platform_has_fast_fp16
LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}')
if half:
config.set_flag(trt.BuilderFlag.FP16)
with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
t.write(engine.serialize())
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
@torch.no_grad()
def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
weights=ROOT / 'yolov5s.pt', # weights path
imgsz=(640, 640), # image (height, width)
batch_size=1, # batch size
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
include=('torchscript', 'onnx'), # include formats
half=False, # FP16 half-precision export
inplace=False, # set YOLOv5 Detect() inplace=True
train=False, # model.train() mode
optimize=False, # TorchScript: optimize for mobile
int8=False, # CoreML/TF INT8 quantization
dynamic=False, # ONNX/TF: dynamic axes
simplify=False, # ONNX: simplify model
opset=12, # ONNX: opset version
verbose=False, # TensorRT: verbose log
workspace=4, # TensorRT: workspace size (GB)
nms=False, # TF: add NMS to model
agnostic_nms=False, # TF: add agnostic NMS to model
topk_per_class=100, # TF.js NMS: topk per class to keep
topk_all=100, # TF.js NMS: topk for all classes to keep
iou_thres=0.45, # TF.js NMS: IoU threshold
conf_thres=0.25 # TF.js NMS: confidence threshold
):
t = time.time()
include = [x.lower() for x in include]
tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports | file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) | 12 | 2023-10-31 06:06:41+00:00 | 12k |
Kiteretsu77/VCISR-official | test_code/utils.py | [
{
"identifier": "RRDBNet",
"path": "architecture/rrdb.py",
"snippet": "class RRDBNet(nn.Module):\n \"\"\"Networks consisting of Residual in Residual Dense Block, which is used\n in ESRGAN.\n\n ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.\n\n We extend ESRGAN for scale x2 and scale x1.\n Note: This is one option for scale 1, scale 2 in RRDBNet.\n We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size\n and enlarge the channel size before feeding inputs into the main ESRGAN architecture.\n\n Args:\n num_in_ch (int): Channel number of inputs.\n num_out_ch (int): Channel number of outputs.\n num_feat (int): Channel number of intermediate features.\n Default: 64\n num_block (int): Block number in the trunk network. Defaults: 23\n num_grow_ch (int): Channels for each growth. Default: 32.\n \"\"\"\n\n def __init__(self, num_in_ch, num_out_ch, scale, num_feat=64, num_block=23, num_grow_ch=32):\n # shllow block (差不多砍半)\n num_block = 23\n\n super(RRDBNet, self).__init__()\n self.scale = scale\n if scale == 2:\n num_in_ch = num_in_ch * 4\n elif scale == 1:\n num_in_ch = num_in_ch * 16\n self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)\n self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)\n self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)\n # upsample\n self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)\n self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)\n self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)\n self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)\n\n self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n def forward(self, x):\n if self.scale == 2:\n feat = pixel_unshuffle(x, scale=2)\n elif self.scale == 1:\n feat = pixel_unshuffle(x, scale=4)\n else:\n feat = x\n feat = self.conv_first(feat)\n body_feat = self.conv_body(self.body(feat))\n feat = feat + body_feat\n # upsample\n feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))\n feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest')))\n out = self.conv_last(self.lrelu(self.conv_hr(feat)))\n return out"
},
{
"identifier": "GRL",
"path": "architecture/grl.py",
"snippet": "class GRL(nn.Module):\n r\"\"\"Image restoration transformer with global, non-local, and local connections\n Args:\n img_size (int | list[int]): Input image size. Default 64\n in_channels (int): Number of input image channels. Default: 3\n out_channels (int): Number of output image channels. Default: None\n embed_dim (int): Patch embedding dimension. Default: 96\n upscale (int): Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction\n img_range (float): Image range. 1. or 255.\n upsampler (str): The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None\n depths (list[int]): Depth of each Swin Transformer layer.\n num_heads_window (list[int]): Number of window attention heads in different layers.\n num_heads_stripe (list[int]): Number of stripe attention heads in different layers.\n window_size (int): Window size. Default: 8.\n stripe_size (list[int]): Stripe size. Default: [8, 8]\n stripe_groups (list[int]): Number of stripe groups. Default: [None, None].\n stripe_shift (bool): whether to shift the stripes. This is used as an ablation study.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qkv_proj_type (str): QKV projection type. Default: linear. Choices: linear, separable_conv.\n anchor_proj_type (str): Anchor projection type. Default: avgpool. Choices: avgpool, maxpool, conv2d, separable_conv, patchmerging.\n anchor_one_stage (bool): Whether to use one operator or multiple progressive operators to reduce feature map resolution. Default: True.\n anchor_window_down_factor (int): The downscale factor used to get the anchors.\n out_proj_type (str): Type of the output projection in the self-attention modules. Default: linear. Choices: linear, conv2d.\n local_connection (bool): Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used.\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n pretrained_window_size (list[int]): pretrained window size. This is actually not used. Default: [0, 0].\n pretrained_stripe_size (list[int]): pretrained stripe size. This is actually not used. Default: [0, 0].\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n conv_type (str): The convolutional block before residual connection. Default: 1conv. Choices: 1conv, 3conv, 1conv1x1, linear\n init_method: initialization method of the weight parameters used to train large scale models.\n Choices: n, normal -- Swin V1 init method.\n l, layernorm -- Swin V2 init method. Zero the weight and bias in the post layer normalization layer.\n r, res_rescale -- EDSR rescale method. Rescale the residual blocks with a scaling factor 0.1\n w, weight_rescale -- MSRResNet rescale method. Rescale the weight parameter in residual blocks with a scaling factor 0.1\n t, trunc_normal_ -- nn.Linear, trunc_normal; nn.Conv2d, weight_rescale\n fairscale_checkpoint (bool): Whether to use fairscale checkpoint.\n offload_to_cpu (bool): used by fairscale_checkpoint\n euclidean_dist (bool): use Euclidean distance or inner product as the similarity metric. An ablation study.\n\n \"\"\"\n\n def __init__(\n self,\n img_size=64,\n in_channels=3,\n out_channels=None,\n embed_dim=96,\n upscale=2,\n img_range=1.0,\n upsampler=\"\",\n depths=[6, 6, 6, 6, 6, 6],\n num_heads_window=[3, 3, 3, 3, 3, 3],\n num_heads_stripe=[3, 3, 3, 3, 3, 3],\n window_size=8,\n stripe_size=[8, 8], # used for stripe window attention\n stripe_groups=[None, None],\n stripe_shift=False,\n mlp_ratio=4.0,\n qkv_bias=True,\n qkv_proj_type=\"linear\",\n anchor_proj_type=\"avgpool\",\n anchor_one_stage=True,\n anchor_window_down_factor=1,\n out_proj_type=\"linear\",\n local_connection=False,\n drop_rate=0.0,\n attn_drop_rate=0.0,\n drop_path_rate=0.1,\n norm_layer=nn.LayerNorm,\n pretrained_window_size=[0, 0],\n pretrained_stripe_size=[0, 0],\n conv_type=\"1conv\",\n init_method=\"n\", # initialization method of the weight parameters used to train large scale models.\n fairscale_checkpoint=False, # fairscale activation checkpointing\n offload_to_cpu=False,\n euclidean_dist=False,\n **kwargs,\n ):\n super(GRL, self).__init__()\n # Process the input arguments\n out_channels = out_channels or in_channels\n self.in_channels = in_channels\n self.out_channels = out_channels\n num_out_feats = 64\n self.embed_dim = embed_dim\n self.upscale = upscale\n self.upsampler = upsampler\n self.img_range = img_range\n if in_channels == 3:\n rgb_mean = (0.4488, 0.4371, 0.4040)\n self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)\n else:\n self.mean = torch.zeros(1, 1, 1, 1)\n\n max_stripe_size = max([0 if s is None else s for s in stripe_size])\n max_stripe_groups = max([0 if s is None else s for s in stripe_groups])\n max_stripe_groups *= anchor_window_down_factor\n self.pad_size = max(window_size, max_stripe_size, max_stripe_groups)\n # if max_stripe_size >= window_size:\n # self.pad_size *= anchor_window_down_factor\n # if stripe_groups[0] is None and stripe_groups[1] is None:\n # self.pad_size = max(stripe_size)\n # else:\n # self.pad_size = window_size\n self.input_resolution = to_2tuple(img_size)\n self.window_size = to_2tuple(window_size)\n self.shift_size = [w // 2 for w in self.window_size]\n self.stripe_size = stripe_size\n self.stripe_groups = stripe_groups\n self.pretrained_window_size = pretrained_window_size\n self.pretrained_stripe_size = pretrained_stripe_size\n self.anchor_window_down_factor = anchor_window_down_factor\n\n # Head of the network. First convolution.\n self.conv_first = nn.Conv2d(in_channels, embed_dim, 3, 1, 1)\n\n # Body of the network\n self.norm_start = norm_layer(embed_dim)\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]\n # stochastic depth decay rule\n args = OmegaConf.create(\n {\n \"out_proj_type\": out_proj_type,\n \"local_connection\": local_connection,\n \"euclidean_dist\": euclidean_dist,\n }\n )\n for k, v in self.set_table_index_mask(self.input_resolution).items():\n self.register_buffer(k, v)\n\n self.layers = nn.ModuleList()\n for i in range(len(depths)):\n layer = TransformerStage(\n dim=embed_dim,\n input_resolution=self.input_resolution,\n depth=depths[i],\n num_heads_window=num_heads_window[i],\n num_heads_stripe=num_heads_stripe[i],\n window_size=self.window_size,\n stripe_size=stripe_size,\n stripe_groups=stripe_groups,\n stripe_shift=stripe_shift,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n qkv_proj_type=qkv_proj_type,\n anchor_proj_type=anchor_proj_type,\n anchor_one_stage=anchor_one_stage,\n anchor_window_down_factor=anchor_window_down_factor,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=dpr[\n sum(depths[:i]) : sum(depths[: i + 1])\n ], # no impact on SR results\n norm_layer=norm_layer,\n pretrained_window_size=pretrained_window_size,\n pretrained_stripe_size=pretrained_stripe_size,\n conv_type=conv_type,\n init_method=init_method,\n fairscale_checkpoint=fairscale_checkpoint,\n offload_to_cpu=offload_to_cpu,\n args=args,\n )\n self.layers.append(layer)\n self.norm_end = norm_layer(embed_dim)\n\n # Tail of the network\n self.conv_after_body = build_last_conv(conv_type, embed_dim)\n\n #####################################################################################################\n ################################ 3, high quality image reconstruction ################################\n if self.upsampler == \"pixelshuffle\":\n # for classical SR\n self.conv_before_upsample = nn.Sequential(\n nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)\n )\n self.upsample = Upsample(upscale, num_out_feats)\n self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)\n elif self.upsampler == \"pixelshuffledirect\":\n # for lightweight SR (to save parameters)\n self.upsample = UpsampleOneStep(\n upscale,\n embed_dim,\n out_channels,\n )\n elif self.upsampler == \"nearest+conv\":\n # for real-world SR (less artifacts)\n assert self.upscale == 4, \"only support x4 now.\"\n self.conv_before_upsample = nn.Sequential(\n nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)\n )\n self.conv_up1 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)\n self.conv_up2 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)\n self.conv_hr = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)\n self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)\n self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n else:\n # for image denoising and JPEG compression artifact reduction\n self.conv_last = nn.Conv2d(embed_dim, out_channels, 3, 1, 1)\n\n self.apply(self._init_weights)\n if init_method in [\"l\", \"w\"] or init_method.find(\"t\") >= 0:\n for layer in self.layers:\n layer._init_weights()\n\n def set_table_index_mask(self, x_size):\n \"\"\"\n Two used cases:\n 1) At initialization: set the shared buffers.\n 2) During forward pass: get the new buffers if the resolution of the input changes\n \"\"\"\n # ss - stripe_size, sss - stripe_shift_size\n ss, sss = _get_stripe_info(self.stripe_size, self.stripe_groups, True, x_size)\n df = self.anchor_window_down_factor\n\n table_w = get_relative_coords_table_all(\n self.window_size, self.pretrained_window_size\n )\n table_sh = get_relative_coords_table_all(ss, self.pretrained_stripe_size, df)\n table_sv = get_relative_coords_table_all(\n ss[::-1], self.pretrained_stripe_size, df\n )\n\n index_w = get_relative_position_index_simple(self.window_size)\n index_sh_a2w = get_relative_position_index_simple(ss, df, False)\n index_sh_w2a = get_relative_position_index_simple(ss, df, True)\n index_sv_a2w = get_relative_position_index_simple(ss[::-1], df, False)\n index_sv_w2a = get_relative_position_index_simple(ss[::-1], df, True)\n\n mask_w = calculate_mask(x_size, self.window_size, self.shift_size)\n mask_sh_a2w = calculate_mask_all(x_size, ss, sss, df, False)\n mask_sh_w2a = calculate_mask_all(x_size, ss, sss, df, True)\n mask_sv_a2w = calculate_mask_all(x_size, ss[::-1], sss[::-1], df, False)\n mask_sv_w2a = calculate_mask_all(x_size, ss[::-1], sss[::-1], df, True)\n return {\n \"table_w\": table_w,\n \"table_sh\": table_sh,\n \"table_sv\": table_sv,\n \"index_w\": index_w,\n \"index_sh_a2w\": index_sh_a2w,\n \"index_sh_w2a\": index_sh_w2a,\n \"index_sv_a2w\": index_sv_a2w,\n \"index_sv_w2a\": index_sv_w2a,\n \"mask_w\": mask_w,\n \"mask_sh_a2w\": mask_sh_a2w,\n \"mask_sh_w2a\": mask_sh_w2a,\n \"mask_sv_a2w\": mask_sv_a2w,\n \"mask_sv_w2a\": mask_sv_w2a,\n }\n\n def get_table_index_mask(self, device=None, input_resolution=None):\n # Used during forward pass\n if input_resolution == self.input_resolution:\n return {\n \"table_w\": self.table_w,\n \"table_sh\": self.table_sh,\n \"table_sv\": self.table_sv,\n \"index_w\": self.index_w,\n \"index_sh_a2w\": self.index_sh_a2w,\n \"index_sh_w2a\": self.index_sh_w2a,\n \"index_sv_a2w\": self.index_sv_a2w,\n \"index_sv_w2a\": self.index_sv_w2a,\n \"mask_w\": self.mask_w,\n \"mask_sh_a2w\": self.mask_sh_a2w,\n \"mask_sh_w2a\": self.mask_sh_w2a,\n \"mask_sv_a2w\": self.mask_sv_a2w,\n \"mask_sv_w2a\": self.mask_sv_w2a,\n }\n else:\n table_index_mask = self.set_table_index_mask(input_resolution)\n for k, v in table_index_mask.items():\n table_index_mask[k] = v.to(device)\n return table_index_mask\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n # Only used to initialize linear layers\n # weight_shape = m.weight.shape\n # if weight_shape[0] > 256 and weight_shape[1] > 256:\n # std = 0.004\n # else:\n # std = 0.02\n # print(f\"Standard deviation during initialization {std}.\")\n trunc_normal_(m.weight, std=0.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {\"absolute_pos_embed\"}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {\"relative_position_bias_table\"}\n\n def check_image_size(self, x):\n _, _, h, w = x.size()\n mod_pad_h = (self.pad_size - h % self.pad_size) % self.pad_size\n mod_pad_w = (self.pad_size - w % self.pad_size) % self.pad_size\n # print(\"padding size\", h, w, self.pad_size, mod_pad_h, mod_pad_w)\n\n try:\n x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), \"reflect\")\n except BaseException:\n x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), \"constant\")\n return x\n\n def forward_features(self, x):\n x_size = (x.shape[2], x.shape[3])\n x = bchw_to_blc(x)\n x = self.norm_start(x)\n x = self.pos_drop(x)\n\n table_index_mask = self.get_table_index_mask(x.device, x_size)\n for layer in self.layers:\n x = layer(x, x_size, table_index_mask)\n\n x = self.norm_end(x) # B L C\n x = blc_to_bchw(x, x_size)\n\n return x\n\n def forward(self, x):\n H, W = x.shape[2:]\n x = self.check_image_size(x)\n\n self.mean = self.mean.type_as(x)\n x = (x - self.mean) * self.img_range\n\n if self.upsampler == \"pixelshuffle\":\n # for classical SR\n x = self.conv_first(x)\n x = self.conv_after_body(self.forward_features(x)) + x\n x = self.conv_before_upsample(x)\n x = self.conv_last(self.upsample(x))\n elif self.upsampler == \"pixelshuffledirect\":\n # for lightweight SR\n x = self.conv_first(x)\n x = self.conv_after_body(self.forward_features(x)) + x\n x = self.upsample(x)\n elif self.upsampler == \"nearest+conv\":\n # for real-world SR\n x = self.conv_first(x)\n x = self.conv_after_body(self.forward_features(x)) + x\n x = self.conv_before_upsample(x)\n x = self.lrelu(\n self.conv_up1(\n torch.nn.functional.interpolate(x, scale_factor=2, mode=\"nearest\")\n )\n )\n x = self.lrelu(\n self.conv_up2(\n torch.nn.functional.interpolate(x, scale_factor=2, mode=\"nearest\")\n )\n )\n x = self.conv_last(self.lrelu(self.conv_hr(x)))\n else:\n # for image denoising and JPEG compression artifact reduction\n x_first = self.conv_first(x)\n res = self.conv_after_body(self.forward_features(x_first)) + x_first\n if self.in_channels == self.out_channels:\n x = x + self.conv_last(res)\n else:\n x = self.conv_last(res)\n\n x = x / self.img_range + self.mean\n\n return x[:, :, : H * self.upscale, : W * self.upscale]\n\n def flops(self):\n pass\n\n def convert_checkpoint(self, state_dict):\n for k in list(state_dict.keys()):\n if (\n k.find(\"relative_coords_table\") >= 0\n or k.find(\"relative_position_index\") >= 0\n or k.find(\"attn_mask\") >= 0\n or k.find(\"model.table_\") >= 0\n or k.find(\"model.index_\") >= 0\n or k.find(\"model.mask_\") >= 0\n # or k.find(\".upsample.\") >= 0\n ):\n state_dict.pop(k)\n print(k)\n return state_dict"
},
{
"identifier": "SwinIR",
"path": "architecture/swinir.py",
"snippet": "class SwinIR(nn.Module):\n r\"\"\" SwinIR\n A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.\n\n Args:\n img_size (int | tuple(int)): Input image size. Default 64\n patch_size (int | tuple(int)): Patch size. Default: 1\n in_chans (int): Number of input image channels. Default: 3\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction\n img_range: Image range. 1. or 255.\n upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None\n resi_connection: The convolutional block before residual connection. '1conv'/'3conv'\n \"\"\"\n\n def __init__(self, img_size=64, patch_size=1, in_chans=3,\n embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',\n **kwargs):\n super(SwinIR, self).__init__()\n num_in_ch = in_chans\n num_out_ch = in_chans\n num_feat = 64\n self.img_range = img_range\n if in_chans == 3:\n rgb_mean = (0.4488, 0.4371, 0.4040)\n self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)\n else:\n self.mean = torch.zeros(1, 1, 1, 1)\n self.upscale = upscale\n self.upsampler = upsampler\n self.window_size = window_size\n\n #####################################################################################################\n ################################### 1, shallow feature extraction ###################################\n self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)\n\n #####################################################################################################\n ################################### 2, deep feature extraction ######################################\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = embed_dim\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # merge non-overlapping patches into image\n self.patch_unembed = PatchUnEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build Residual Swin Transformer blocks (RSTB)\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = RSTB(dim=embed_dim,\n input_resolution=(patches_resolution[0],\n patches_resolution[1]),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results\n norm_layer=norm_layer,\n downsample=None,\n use_checkpoint=use_checkpoint,\n img_size=img_size,\n patch_size=patch_size,\n resi_connection=resi_connection\n\n )\n self.layers.append(layer)\n self.norm = norm_layer(self.num_features)\n\n # build the last conv layer in deep feature extraction\n if resi_connection == '1conv':\n self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)\n elif resi_connection == '3conv':\n # to save parameters and memory\n self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))\n\n #####################################################################################################\n ################################ 3, high quality image reconstruction ################################\n if self.upsampler == 'pixelshuffle':\n # for classical SR\n self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),\n nn.LeakyReLU(inplace=True))\n self.upsample = Upsample(upscale, num_feat)\n self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)\n elif self.upsampler == 'pixelshuffledirect':\n # for lightweight SR (to save parameters)\n self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,\n (patches_resolution[0], patches_resolution[1]))\n elif self.upsampler == 'nearest+conv':\n # for real-world SR (less artifacts)\n self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),\n nn.LeakyReLU(inplace=True))\n self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)\n if self.upscale == 4:\n self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)\n self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)\n self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)\n self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n else:\n # for image denoising and JPEG compression artifact reduction\n self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def check_image_size(self, x):\n _, _, h, w = x.size()\n mod_pad_h = (self.window_size - h % self.window_size) % self.window_size\n mod_pad_w = (self.window_size - w % self.window_size) % self.window_size\n x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')\n return x\n\n def forward_features(self, x):\n x_size = (x.shape[2], x.shape[3])\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x, x_size)\n\n x = self.norm(x) # B L C\n x = self.patch_unembed(x, x_size)\n\n return x\n\n def forward(self, x):\n H, W = x.shape[2:]\n x = self.check_image_size(x)\n \n self.mean = self.mean.type_as(x)\n x = (x - self.mean) * self.img_range\n\n if self.upsampler == 'pixelshuffle':\n # for classical SR\n x = self.conv_first(x)\n x = self.conv_after_body(self.forward_features(x)) + x\n x = self.conv_before_upsample(x)\n x = self.conv_last(self.upsample(x))\n elif self.upsampler == 'pixelshuffledirect':\n # for lightweight SR\n x = self.conv_first(x)\n x = self.conv_after_body(self.forward_features(x)) + x\n x = self.upsample(x)\n elif self.upsampler == 'nearest+conv':\n # for real-world SR\n x = self.conv_first(x)\n x = self.conv_after_body(self.forward_features(x)) + x\n x = self.conv_before_upsample(x)\n x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))\n if self.upscale == 4:\n x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))\n x = self.conv_last(self.lrelu(self.conv_hr(x)))\n else:\n # for image denoising and JPEG compression artifact reduction\n x_first = self.conv_first(x)\n res = self.conv_after_body(self.forward_features(x_first)) + x_first\n x = x + self.conv_last(res)\n\n x = x / self.img_range + self.mean\n\n return x[:, :, :H*self.upscale, :W*self.upscale]\n\n def flops(self):\n flops = 0\n H, W = self.patches_resolution\n flops += H * W * 3 * self.embed_dim * 9\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += H * W * 3 * self.embed_dim * self.embed_dim\n flops += self.upsample.flops()\n return flops"
}
] | import os, sys
import torch
from architecture.rrdb import RRDBNet
from architecture.grl import GRL
from architecture.swinir import SwinIR | 8,555 |
# Import files from same folder
root_path = os.path.abspath('.')
sys.path.append(root_path)
def load_grl(generator_weight_PATH, print_options=True):
''' A simpler API to load GRL model
Args:
generator_weight_PATH (str): The path to the weight
print_options (bool): whether to print options to show what kinds of setting is used
Returns:
generator (torch): the generator instance of the model
'''
# Load the checkpoint
checkpoint_g = torch.load(generator_weight_PATH)
# Find the generator weight
if 'model_state_dict' in checkpoint_g:
weight = checkpoint_g['model_state_dict']
# GRL Small
|
# Import files from same folder
root_path = os.path.abspath('.')
sys.path.append(root_path)
def load_grl(generator_weight_PATH, print_options=True):
''' A simpler API to load GRL model
Args:
generator_weight_PATH (str): The path to the weight
print_options (bool): whether to print options to show what kinds of setting is used
Returns:
generator (torch): the generator instance of the model
'''
# Load the checkpoint
checkpoint_g = torch.load(generator_weight_PATH)
# Find the generator weight
if 'model_state_dict' in checkpoint_g:
weight = checkpoint_g['model_state_dict']
# GRL Small | generator = GRL( | 1 | 2023-10-29 04:33:38+00:00 | 12k |
DataCanvasIO/LMS | lms/runtime/evaluation/benchmark/eval.py | [
{
"identifier": "ARCDataset",
"path": "lms/runtime/evaluation/benchmark/eval_dataset.py",
"snippet": "class ARCDataset():\n @staticmethod\n def load(path: str = basepath + \"/data/ARC/ARC-c/ARC-Challenge-Dev.jsonl\"):\n with open(path, 'r', errors='ignore') as in_f:\n rows = []\n for i, line in enumerate(in_f):\n sample = json.loads(line.strip())\n answer = sample['answerKey']\n sample = sample['question']\n question = sample['stem']\n choices = sample['choices']\n if len(choices) != 4:\n continue\n textA = choices[0]['text']\n textB = choices[1]['text']\n textC = choices[2]['text']\n textD = choices[3]['text']\n rows.append({\n 'question': f\"Question: {question}\\nA. {textA}\\nB. {textB}\\nC. {textC}\\nD. {textD}\\nAnswer:\",\n 'answer': answer,\n 'textA': textA,\n 'textB': textB,\n 'textC': textC,\n 'textD': textD\n })\n dataset = Dataset.from_dict({\n 'question': [row['question'] for row in rows],\n 'answer': [row['answer'] for row in rows],\n 'textA': [row['textA'] for row in rows],\n 'textB': [row['textB'] for row in rows],\n 'textC': [row['textC'] for row in rows],\n 'textD': [row['textD'] for row in rows]\n })\n return dataset"
},
{
"identifier": "MMLUDataset",
"path": "lms/runtime/evaluation/benchmark/eval_dataset.py",
"snippet": "class MMLUDataset():\n @staticmethod\n def load(path: str = basepath + \"/data/mmlu/\"):\n dataset = DatasetDict()\n name_list = [\n \"college_biology\",\n \"college_chemistry\",\n \"college_computer_science\",\n \"college_mathematics\",\n \"college_physics\",\n \"electrical_engineering\",\n \"astronomy\",\n \"anatomy\",\n \"abstract_algebra\",\n \"machine_learning\",\n \"clinical_knowledge\",\n \"global_facts\",\n \"management\",\n \"nutrition\",\n \"marketing\",\n \"professional_accounting\",\n \"high_school_geography\",\n \"international_law\",\n \"moral_scenarios\",\n \"computer_security\",\n \"high_school_microeconomics\",\n \"professional_law\",\n \"medical_genetics\",\n \"professional_psychology\",\n \"jurisprudence\",\n \"world_religions\",\n \"philosophy\",\n \"virology\",\n \"high_school_chemistry\",\n \"public_relations\",\n \"high_school_macroeconomics\",\n \"human_sexuality\",\n \"elementary_mathematics\",\n \"high_school_physics\",\n \"high_school_computer_science\",\n \"high_school_european_history\",\n \"business_ethics\",\n \"moral_disputes\",\n \"high_school_statistics\",\n \"miscellaneous\",\n \"formal_logic\",\n \"high_school_government_and_politics\",\n \"prehistory\",\n \"security_studies\",\n \"high_school_biology\",\n \"logical_fallacies\",\n \"high_school_world_history\",\n \"professional_medicine\",\n \"high_school_mathematics\",\n \"college_medicine\",\n \"high_school_us_history\",\n \"sociology\",\n \"econometrics\",\n \"high_school_psychology\",\n \"human_aging\",\n \"us_foreign_policy\",\n \"conceptual_physics\",\n ]\n\n for split in ['dev', 'test']:\n raw_data = []\n for name in name_list:\n _hint = f'There is a single choice question about {name.replace(\"_\", \" \")}. Answer the question by replying A, B, C or D.'\n filename = osp.join(path, split, f'{name}_{split}.csv')\n with open(filename, encoding='utf-8') as f:\n reader = csv.reader(f)\n for row in reader:\n assert len(row) == 6\n input = row[0]\n A = row[1]\n B = row[2]\n C = row[3]\n D = row[4]\n raw_data.append({\n 'question': f\"{_hint}\\nQuestion: {input}\\nA. {A}\\nB. {B}\\nC. {C}\\nD. {D}\\nAnswer: \",\n 'A': row[1],\n 'B': row[2],\n 'C': row[3],\n 'D': row[4],\n 'answer': row[5],\n })\n dataset[split] = Dataset.from_list(raw_data)\n dataset = dataset[\"test\"]\n return dataset"
},
{
"identifier": "CMMLUDataset",
"path": "lms/runtime/evaluation/benchmark/eval_dataset.py",
"snippet": "class CMMLUDataset():\n @staticmethod\n def load(path: str = basepath + \"/data/cmmlu/\"):\n dataset = DatasetDict()\n cmmlu_subject_mapping = {\n 'agronomy': '农学',\n 'anatomy': '解剖学',\n 'ancient_chinese': '古汉语',\n 'arts': '艺术学',\n 'astronomy': '天文学',\n 'business_ethics': '商业伦理',\n 'chinese_civil_service_exam': '中国公务员考试',\n 'chinese_driving_rule': '中国驾驶规则',\n 'chinese_food_culture': '中国饮食文化',\n 'chinese_foreign_policy': '中国外交政策',\n 'chinese_history': '中国历史',\n 'chinese_literature': '中国文学',\n 'chinese_teacher_qualification': '中国教师资格',\n 'clinical_knowledge': '临床知识',\n 'college_actuarial_science': '大学精算学',\n 'college_education': '大学教育学',\n 'college_engineering_hydrology': '大学工程水文学',\n 'college_law': '大学法律',\n 'college_mathematics': '大学数学',\n 'college_medical_statistics': '大学医学统计',\n 'college_medicine': '大学医学',\n 'computer_science': '计算机科学',\n 'computer_security': '计算机安全',\n 'conceptual_physics': '概念物理学',\n 'construction_project_management': '建设工程管理',\n 'economics': '经济学',\n 'education': '教育学',\n 'electrical_engineering': '电气工程',\n 'elementary_chinese': '小学语文',\n 'elementary_commonsense': '小学常识',\n 'elementary_information_and_technology': '小学信息技术',\n 'elementary_mathematics': '初等数学',\n 'ethnology': '民族学',\n 'food_science': '食品科学',\n 'genetics': '遗传学',\n 'global_facts': '全球事实',\n 'high_school_biology': '高中生物',\n 'high_school_chemistry': '高中化学',\n 'high_school_geography': '高中地理',\n 'high_school_mathematics': '高中数学',\n 'high_school_physics': '高中物理学',\n 'high_school_politics': '高中政治',\n 'human_sexuality': '人类性行为',\n 'international_law': '国际法学',\n 'journalism': '新闻学',\n 'jurisprudence': '法理学',\n 'legal_and_moral_basis': '法律与道德基础',\n 'logical': '逻辑学',\n 'machine_learning': '机器学习',\n 'management': '管理学',\n 'marketing': '市场营销',\n 'marxist_theory': '马克思主义理论',\n 'modern_chinese': '现代汉语',\n 'nutrition': '营养学',\n 'philosophy': '哲学',\n 'professional_accounting': '专业会计',\n 'professional_law': '专业法学',\n 'professional_medicine': '专业医学',\n 'professional_psychology': '专业心理学',\n 'public_relations': '公共关系',\n 'security_study': '安全研究',\n 'sociology': '社会学',\n 'sports_science': '体育学',\n 'traditional_chinese_medicine': '中医中药',\n 'virology': '病毒学',\n 'world_history': '世界历史',\n 'world_religions': '世界宗教'\n }\n for split in ['dev', 'test']:\n raw_data = []\n for name in cmmlu_subject_mapping:\n filename = osp.join(path, split, f'{name}.csv')\n with open(filename, encoding='utf-8') as f:\n reader = csv.reader(f)\n _ = next(reader) # skip the header\n for row in reader:\n assert len(row) == 7\n question = row[1]\n A = row[2]\n B = row[3]\n C = row[4]\n D = row[5]\n raw_data.append({\n 'question': f\"以下是关于{cmmlu_subject_mapping[name]}的单项选择题,请直接给出正确答案的选项。\\n题目:{question}\\nA. {A}\\nB. {B}\\nC. {C}\\nD. {D} 答案是:\",\n 'A': row[2],\n 'B': row[3],\n 'C': row[4],\n 'D': row[5],\n 'answer': row[6],\n })\n dataset[split] = Dataset.from_list(raw_data)\n dataset = dataset[\"test\"]\n return dataset"
},
{
"identifier": "CEvalDataset",
"path": "lms/runtime/evaluation/benchmark/eval_dataset.py",
"snippet": "class CEvalDataset():\n @staticmethod\n def load(path: str = basepath + \"/data/ceval/formal_ceval\"):\n ceval_subject_mapping = {\n \"computer_network\":\n [\"Computer Network\", \"\\u8ba1\\u7b97\\u673a\\u7f51\\u7edc\", \"STEM\"],\n \"operating_system\":\n [\"Operating System\", \"\\u64cd\\u4f5c\\u7cfb\\u7edf\", \"STEM\"],\n \"computer_architecture\":\n [\"Computer Architecture\", \"\\u8ba1\\u7b97\\u673a\\u7ec4\\u6210\", \"STEM\"],\n \"college_programming\":\n [\"College Programming\", \"\\u5927\\u5b66\\u7f16\\u7a0b\", \"STEM\"],\n \"college_physics\": [\"College Physics\", \"\\u5927\\u5b66\\u7269\\u7406\", \"STEM\"],\n \"college_chemistry\":\n [\"College Chemistry\", \"\\u5927\\u5b66\\u5316\\u5b66\", \"STEM\"],\n \"advanced_mathematics\":\n [\"Advanced Mathematics\", \"\\u9ad8\\u7b49\\u6570\\u5b66\", \"STEM\"],\n \"probability_and_statistics\":\n [\"Probability and Statistics\", \"\\u6982\\u7387\\u7edf\\u8ba1\", \"STEM\"],\n \"discrete_mathematics\":\n [\"Discrete Mathematics\", \"\\u79bb\\u6563\\u6570\\u5b66\", \"STEM\"],\n \"electrical_engineer\": [\n \"Electrical Engineer\", \"\\u6ce8\\u518c\\u7535\\u6c14\\u5de5\\u7a0b\\u5e08\",\n \"STEM\"\n ],\n \"metrology_engineer\":\n [\"Metrology Engineer\", \"\\u6ce8\\u518c\\u8ba1\\u91cf\\u5e08\", \"STEM\"],\n \"high_school_mathematics\":\n [\"High School Mathematics\", \"\\u9ad8\\u4e2d\\u6570\\u5b66\", \"STEM\"],\n \"high_school_physics\":\n [\"High School Physics\", \"\\u9ad8\\u4e2d\\u7269\\u7406\", \"STEM\"],\n \"high_school_chemistry\":\n [\"High School Chemistry\", \"\\u9ad8\\u4e2d\\u5316\\u5b66\", \"STEM\"],\n \"high_school_biology\": [\n \"High School Biology\", \"\\u9ad8\\u4e2d\\u751f\\u7269\", \"STEM\"\n ],\n \"middle_school_mathematics\": [\n \"Middle School Mathematics\", \"\\u521d\\u4e2d\\u6570\\u5b66\", \"STEM\"\n ],\n \"middle_school_biology\": [\n \"Middle School Biology\", \"\\u521d\\u4e2d\\u751f\\u7269\", \"STEM\"\n ],\n \"middle_school_physics\": [\n \"Middle School Physics\", \"\\u521d\\u4e2d\\u7269\\u7406\", \"STEM\"\n ],\n \"middle_school_chemistry\": [\n \"Middle School Chemistry\", \"\\u521d\\u4e2d\\u5316\\u5b66\", \"STEM\"\n ],\n \"veterinary_medicine\": [\n \"Veterinary Medicine\", \"\\u517d\\u533b\\u5b66\", \"STEM\"\n ],\n \"college_economics\": [\n \"College Economics\", \"\\u5927\\u5b66\\u7ecf\\u6d4e\\u5b66\", \"Social Science\"\n ],\n \"business_administration\": [\n \"Business Administration\", \"\\u5de5\\u5546\\u7ba1\\u7406\", \"Social Science\"\n ],\n \"marxism\": [\n \"Marxism\", \"\\u9a6c\\u514b\\u601d\\u4e3b\\u4e49\\u57fa\\u672c\\u539f\\u7406\",\n \"Social Science\"\n ],\n \"mao_zedong_thought\": [\n \"Mao Zedong Thought\",\n \"\\u6bdb\\u6cfd\\u4e1c\\u601d\\u60f3\\u548c\\u4e2d\\u56fd\\u7279\\u8272\\u793e\\u4f1a\\u4e3b\\u4e49\\u7406\\u8bba\\u4f53\\u7cfb\\u6982\\u8bba\",\n \"Social Science\"\n ],\n \"education_science\": [\n \"Education Science\", \"\\u6559\\u80b2\\u5b66\", \"Social Science\"\n ],\n \"teacher_qualification\": [\n \"Teacher Qualification\", \"\\u6559\\u5e08\\u8d44\\u683c\", \"Social Science\"\n ],\n \"high_school_politics\": [\n \"High School Politics\", \"\\u9ad8\\u4e2d\\u653f\\u6cbb\", \"Social Science\"\n ],\n \"high_school_geography\": [\n \"High School Geography\", \"\\u9ad8\\u4e2d\\u5730\\u7406\", \"Social Science\"\n ],\n \"middle_school_politics\": [\n \"Middle School Politics\", \"\\u521d\\u4e2d\\u653f\\u6cbb\", \"Social Science\"\n ],\n \"middle_school_geography\": [\n \"Middle School Geography\", \"\\u521d\\u4e2d\\u5730\\u7406\", \"Social Science\"\n ],\n \"modern_chinese_history\":\n [\"Modern Chinese History\", \"\\u8fd1\\u4ee3\\u53f2\\u7eb2\\u8981\", \"Humanities\"],\n \"ideological_and_moral_cultivation\": [\n \"Ideological and Moral Cultivation\",\n \"\\u601d\\u60f3\\u9053\\u5fb7\\u4fee\\u517b\\u4e0e\\u6cd5\\u5f8b\\u57fa\\u7840\",\n \"Humanities\"\n ],\n \"logic\": [\"Logic\", \"\\u903b\\u8f91\\u5b66\", \"Humanities\"],\n \"law\": [\"Law\", \"\\u6cd5\\u5b66\", \"Humanities\"],\n \"chinese_language_and_literature\": [\n \"Chinese Language and Literature\",\n \"\\u4e2d\\u56fd\\u8bed\\u8a00\\u6587\\u5b66\", \"Humanities\"\n ],\n \"art_studies\": [\"Art Studies\", \"\\u827a\\u672f\\u5b66\", \"Humanities\"],\n \"professional_tour_guide\": [\n \"Professional Tour Guide\", \"\\u5bfc\\u6e38\\u8d44\\u683c\", \"Humanities\"\n ],\n \"legal_professional\": [\n \"Legal Professional\", \"\\u6cd5\\u5f8b\\u804c\\u4e1a\\u8d44\\u683c\",\n \"Humanities\"\n ],\n \"high_school_chinese\": [\n \"High School Chinese\", \"\\u9ad8\\u4e2d\\u8bed\\u6587\", \"Humanities\"\n ],\n \"high_school_history\": [\n \"High School History\", \"\\u9ad8\\u4e2d\\u5386\\u53f2\", \"Humanities\"\n ],\n \"middle_school_history\": [\n \"Middle School History\", \"\\u521d\\u4e2d\\u5386\\u53f2\", \"Humanities\"\n ],\n \"civil_servant\": [\"Civil Servant\", \"\\u516c\\u52a1\\u5458\", \"Other\"],\n \"sports_science\": [\"Sports Science\", \"\\u4f53\\u80b2\\u5b66\", \"Other\"],\n \"plant_protection\": [\n \"Plant Protection\", \"\\u690d\\u7269\\u4fdd\\u62a4\", \"Other\"\n ],\n \"basic_medicine\": [\"Basic Medicine\", \"\\u57fa\\u7840\\u533b\\u5b66\", \"Other\"],\n \"clinical_medicine\": [\n \"Clinical Medicine\", \"\\u4e34\\u5e8a\\u533b\\u5b66\", \"Other\"\n ],\n \"urban_and_rural_planner\": [\n \"Urban and Rural Planner\",\n \"\\u6ce8\\u518c\\u57ce\\u4e61\\u89c4\\u5212\\u5e08\", \"Other\"\n ],\n \"accountant\": [\"Accountant\", \"\\u6ce8\\u518c\\u4f1a\\u8ba1\\u5e08\", \"Other\"],\n \"fire_engineer\": [\n \"Fire Engineer\", \"\\u6ce8\\u518c\\u6d88\\u9632\\u5de5\\u7a0b\\u5e08\", \"Other\"\n ],\n \"environmental_impact_assessment_engineer\": [\n \"Environmental Impact Assessment Engineer\",\n \"\\u73af\\u5883\\u5f71\\u54cd\\u8bc4\\u4ef7\\u5de5\\u7a0b\\u5e08\", \"Other\"\n ],\n \"tax_accountant\": [\"Tax Accountant\", \"\\u7a0e\\u52a1\\u5e08\", \"Other\"],\n \"physician\": [\"Physician\", \"\\u533b\\u5e08\\u8d44\\u683c\", \"Other\"]\n }\n raw_data = []\n for name in ceval_subject_mapping:\n val_dataset = load_dataset('csv',\n data_files=osp.join(path, 'val',\n f'{name}_val.csv'),\n split='train')\n for row in val_dataset:\n assert len(row) == 7\n question = row[\"question\"]\n A = row[\"A\"]\n B = row[\"B\"]\n C = row[\"C\"]\n D = row[\"D\"]\n raw_data.append({\n 'question': f\"以下是中国关于{ceval_subject_mapping[name][1]}考试的单项选择题,请选出其中的正确答案。\\n{question}\\nA. {A}\\nB. {B}\\nC. {C}\\nD. {D}\\n答案: \",\n 'A': row[\"A\"],\n 'B': row[\"B\"],\n 'C': row[\"C\"],\n 'D': row[\"D\"],\n 'answer': row[\"answer\"],\n })\n dataset = Dataset.from_list(raw_data)\n\n return dataset"
},
{
"identifier": "AGIEvalDataset",
"path": "lms/runtime/evaluation/benchmark/eval_dataset.py",
"snippet": "class AGIEvalDataset():\n @staticmethod\n def load(path: str = basepath + \"/data/AGIEval/data/v1/\", setting_name: str = \"zero-shot\"):\n agieval_single_choice_sets = [\n 'gaokao-chinese',\n 'gaokao-english',\n 'gaokao-geography',\n 'gaokao-history',\n 'gaokao-biology',\n 'gaokao-chemistry',\n 'gaokao-mathqa',\n 'logiqa-zh',\n 'lsat-ar',\n 'lsat-lr',\n 'lsat-rc',\n 'logiqa-en',\n 'sat-math',\n 'sat-en',\n 'sat-en-without-passage',\n 'aqua-rat',\n ]\n from lms.runtime.evaluation.benchmark.dataset_loader import load_dataset, load_dataset_as_result_schema\n raw_data = []\n for name in agieval_single_choice_sets:\n dataset_wo_label = load_dataset(name, setting_name, path)\n dataset_with_label = load_dataset_as_result_schema(name, path)\n for d1, d2 in zip(dataset_wo_label, dataset_with_label):\n raw_data.append({\n 'id': d2.index,\n 'question': d1['context'],\n 'answer': d2.label,\n })\n\n dataset = Dataset.from_list(raw_data)\n return dataset"
},
{
"identifier": "BBHDataset",
"path": "lms/runtime/evaluation/benchmark/eval_dataset.py",
"snippet": "class BBHDataset():\n @staticmethod\n def load(path: str = basepath + \"/data/BBH\"):\n bbh_multiple_choice_sets = [\n 'temporal_sequences',\n 'disambiguation_qa',\n 'date_understanding',\n 'tracking_shuffled_objects_three_objects',\n 'penguins_in_a_table',\n 'geometric_shapes',\n 'snarks',\n 'ruin_names',\n 'tracking_shuffled_objects_seven_objects',\n 'tracking_shuffled_objects_five_objects',\n 'logical_deduction_three_objects',\n 'hyperbaton',\n 'logical_deduction_five_objects',\n 'logical_deduction_seven_objects',\n 'movie_recommendation',\n 'salient_translation_error_detection',\n 'reasoning_about_colored_objects',\n ]\n raw_data = []\n for name in bbh_multiple_choice_sets:\n _hint = None\n with open(osp.join(path + \"/data\", f'{name}.json'), 'r') as f:\n data = json.load(f)['examples']\n if exists(f\"{path}/lib_prompt/{name}.txt\"):\n _hint = open(f\"{path}/lib_prompt/{name}.txt\", 'r').read()\n for row in data:\n assert len(row) == 2\n question = row[\"input\"]\n raw_data.append({\n 'question': f\"Follow the given examples and answer the question.\\n{_hint}\\n\\nQ: {question}\\nA: Let's think step by step.\",\n 'answer': row[\"target\"],\n })\n dataset = Dataset.from_list(raw_data)\n return dataset"
},
{
"identifier": "AccEvaluator",
"path": "lms/runtime/evaluation/benchmark/eval_metric.py",
"snippet": "class AccEvaluator():\n \"\"\"Accuracy evaluator.\"\"\"\n\n def __init__(self, metric: str = \"./accuracy.py\", seed: int = 0) -> None:\n self.metric = metric\n self.seed = seed\n super().__init__()\n\n def _preprocess(self, predictions: List, references: List) -> dict:\n \"\"\"Preprocess the final predictions and references to needed format.\n\n Args:\n predictions (List): List of predictions of each sample.\n references (List): List of targets for each sample.\n\n Returns:\n dict: preprocessed results.\n \"\"\"\n mapping_to_int_dict = {\n label: idx\n for idx, label in enumerate(set(map(str, references)))\n }\n pred_set = set(predictions)\n for pred in pred_set:\n if str(pred) not in mapping_to_int_dict.keys():\n mapping_to_int_dict[str(pred)] = len(mapping_to_int_dict)\n golds = [mapping_to_int_dict[str(gold)] for gold in references]\n preds = [mapping_to_int_dict[str(pred)] for pred in predictions]\n return {\n 'predictions': preds,\n 'references': golds,\n }\n\n def first_capital_postprocess(self, text, index=3, value=[\"A\",\"B\",\"C\",\"D\"]):\n for t in text:\n if t in value:\n return t\n try:\n return value[ord(text.strip()[0])%4]\n except:\n return value[index] \n\n def eval(self, predictions: List, references: List) -> dict:\n \"\"\"Calculate scores.\n\n Args:\n predictions (List): List of predictions of each sample.\n references (List): List of targets for each sample.\n\n Returns:\n dict: calculated scores.\n \"\"\"\n random_state = random.getstate()\n np_random_state = np.random.get_state()\n\n random.seed(self.seed)\n np.random.seed(self.seed)\n if len(predictions) != len(references):\n return {\n 'error':\n 'predictions and references have different '\n f'length. len(predictions): {len(predictions)}, '\n f'len(references): {len(references)}'\n }\n metric = evaluate.load(self.metric)\n predictions = list(map(self.first_capital_postprocess, predictions))\n scores = metric.compute(**self._preprocess(predictions, references))\n random.setstate(random_state)\n np.random.set_state(np_random_state)\n result = {}\n result[\"acc\"] = round(scores['accuracy'],2)\n return result"
},
{
"identifier": "MCAccEvaluator",
"path": "lms/runtime/evaluation/benchmark/eval_metric.py",
"snippet": "class MCAccEvaluator():\n \"\"\"Accuracy evaluator.\"\"\"\n\n def __init__(self, metric: str = \"./accuracy.py\", seed: int = 0) -> None:\n self.metric = metric\n self.seed = seed\n super().__init__()\n\n def _preprocess(self, predictions: List, references: List) -> dict:\n \"\"\"Preprocess the final predictions and references to needed format.\n\n Args:\n predictions (List): List of predictions of each sample.\n references (List): List of targets for each sample.\n\n Returns:\n dict: preprocessed results.\n \"\"\"\n mapping_to_int_dict = {\n label: idx\n for idx, label in enumerate(set(map(str, references)))\n }\n pred_set = set(predictions)\n for pred in pred_set:\n if str(pred) not in mapping_to_int_dict.keys():\n mapping_to_int_dict[str(pred)] = len(mapping_to_int_dict)\n golds = [mapping_to_int_dict[str(gold)] for gold in references]\n preds = [mapping_to_int_dict[str(pred)] for pred in predictions]\n return {\n 'predictions': preds,\n 'references': golds,\n }\n\n def bbh_mcq_postprocess(self,text, index=0, value=[\"(A)\",\"(B)\",\"(C)\"]) :\n ans = text\n ans_line = ans.split('answer is ')\n if len(ans_line) != 1:\n ans = ans_line[1].strip()\n match = re.search(r'\\(([A-Z])\\)*', ans)\n if match:\n return match.group()\n try:\n return value[ord(text.strip()[0])%3]\n except:\n return value[index] \n\n def eval(self, predictions: List, references: List) -> dict:\n \"\"\"Calculate scores.\n\n Args:\n predictions (List): List of predictions of each sample.\n references (List): List of targets for each sample.\n\n Returns:\n dict: calculated scores.\n \"\"\"\n random_state = random.getstate()\n np_random_state = np.random.get_state()\n\n random.seed(self.seed)\n np.random.seed(self.seed)\n if len(predictions) != len(references):\n return {\n 'error':\n 'predictions and references have different '\n f'length. len(predictions): {len(predictions)}, '\n f'len(references): {len(references)}'\n }\n metric = evaluate.load(self.metric)\n predictions = list(map(self.bbh_mcq_postprocess, predictions))\n scores = metric.compute(**self._preprocess(predictions, references))\n random.setstate(random_state)\n np.random.set_state(np_random_state)\n result = {}\n result[\"acc\"] = round(scores['accuracy'],2)\n return result"
}
] | import os
import torch
import argparse
import json
from lms.runtime.evaluation.benchmark.eval_dataset import ARCDataset, MMLUDataset, CMMLUDataset, CEvalDataset, \
AGIEvalDataset, \
BBHDataset
from lms.runtime.evaluation.benchmark.eval_metric import AccEvaluator, MCAccEvaluator
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer, TextGenerationPipeline, pipeline | 7,340 |
def parse_args():
parser = argparse.ArgumentParser(description='Run an evaluation task')
parser.add_argument('--model_path', help='model_path')
parser.add_argument('--task', help='task')
parser.add_argument('--output_path', help='output_path')
args = parser.parse_args()
return args
def trunk(text, text_length=800):
return str(text[len(text) - text_length:])
def infer(model_path, datalist,task):
try:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", trust_remote_code=True)
pipe = TextGenerationPipeline(model=model, tokenizer=tokenizer, torch_dtype=torch.float16)
except:
pipe = pipeline("text2text-generation", model=model_path, device_map="auto",trust_remote_code=True, torch_dtype=torch.float16)
predict = []
datalist = list(map(trunk, datalist))
for text in tqdm(datalist):
if task=="BigBench":
out = pipe(text, max_new_tokens=32)
else:
out = pipe(text, max_new_tokens=4)
predict.append(out[0]["generated_text"][len(text):])
return predict
task_map = {"ARC": ARCDataset, "MMLU": MMLUDataset, "CMMLU": CMMLUDataset, "ceval": CEvalDataset,
"AGIEval": AGIEvalDataset, "BigBench": BBHDataset}
|
def parse_args():
parser = argparse.ArgumentParser(description='Run an evaluation task')
parser.add_argument('--model_path', help='model_path')
parser.add_argument('--task', help='task')
parser.add_argument('--output_path', help='output_path')
args = parser.parse_args()
return args
def trunk(text, text_length=800):
return str(text[len(text) - text_length:])
def infer(model_path, datalist,task):
try:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", trust_remote_code=True)
pipe = TextGenerationPipeline(model=model, tokenizer=tokenizer, torch_dtype=torch.float16)
except:
pipe = pipeline("text2text-generation", model=model_path, device_map="auto",trust_remote_code=True, torch_dtype=torch.float16)
predict = []
datalist = list(map(trunk, datalist))
for text in tqdm(datalist):
if task=="BigBench":
out = pipe(text, max_new_tokens=32)
else:
out = pipe(text, max_new_tokens=4)
predict.append(out[0]["generated_text"][len(text):])
return predict
task_map = {"ARC": ARCDataset, "MMLU": MMLUDataset, "CMMLU": CMMLUDataset, "ceval": CEvalDataset,
"AGIEval": AGIEvalDataset, "BigBench": BBHDataset} | eval_map = {"ARC": AccEvaluator, "MMLU": AccEvaluator, "CMMLU": AccEvaluator, "ceval": AccEvaluator, | 6 | 2023-10-30 10:50:32+00:00 | 12k |
aws-samples/amazon-bedrock-serverless-prompt-chaining | cdk_stacks.py | [
{
"identifier": "WebappStack",
"path": "stacks/webapp_stack.py",
"snippet": "class WebappStack(Stack):\n def __init__(\n self, scope: Construct, construct_id: str, parent_domain: str, **kwargs\n ) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Set up load-balanced HTTPS Fargate service\n vpc = ec2.Vpc(\n self,\n \"VPC\",\n max_azs=2,\n )\n\n domain_name = f\"bedrock-serverless-prompt-chaining.{parent_domain}\"\n hosted_zone = route53.HostedZone.from_lookup(\n self, \"Zone\", domain_name=parent_domain\n )\n certificate = acm.Certificate(\n self,\n \"Cert\",\n domain_name=domain_name,\n validation=acm.CertificateValidation.from_dns(hosted_zone=hosted_zone),\n )\n\n cluster = ecs.Cluster(self, \"Cluster\", vpc=vpc)\n\n image = ecs.ContainerImage.from_asset(\".\")\n\n fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(\n self,\n \"StreamlitService\",\n cluster=cluster,\n task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(\n image=image, container_port=8501 # 8501 is the default Streamlit port\n ),\n public_load_balancer=True,\n domain_name=domain_name,\n domain_zone=hosted_zone,\n certificate=certificate,\n )\n\n # Configure Streamlit's health check\n fargate_service.target_group.configure_health_check(\n enabled=True, path=\"/_stcore/health\", healthy_http_codes=\"200\"\n )\n\n # Speed up deployments\n fargate_service.target_group.set_attribute(\n key=\"deregistration_delay.timeout_seconds\",\n value=\"10\",\n )\n\n # Grant access to start and query Step Functions exections\n for name_suffix in [\n \"BlogPost\",\n \"TripPlanner\",\n \"StoryWriter\",\n \"MoviePitch\",\n \"MealPlanner\",\n \"MostPopularRepoBedrockAgents\",\n \"MostPopularRepoLangchain\",\n ]:\n workflow = sfn.StateMachine.from_state_machine_name(\n self, f\"{name_suffix}Workflow\", f\"PromptChainDemo-{name_suffix}\"\n )\n workflow.grant_read(fargate_service.task_definition.task_role)\n workflow.grant_start_execution(fargate_service.task_definition.task_role)\n workflow.grant_task_response(fargate_service.task_definition.task_role)\n\n # Add Cognito for authentication\n cognito_domain_prefix = \"bedrock-serverless-prompt-chaining-demo\"\n user_pool = cognito.UserPool(\n self,\n \"StreamlitUserPool\",\n user_pool_name=cognito_domain_prefix,\n removal_policy=RemovalPolicy.DESTROY,\n account_recovery=cognito.AccountRecovery.NONE,\n auto_verify=cognito.AutoVerifiedAttrs(email=True),\n sign_in_aliases=cognito.SignInAliases(email=True),\n self_sign_up_enabled=False,\n password_policy={\n \"min_length\": 12,\n \"require_lowercase\": False,\n \"require_digits\": False,\n \"require_uppercase\": False,\n \"require_symbols\": False,\n },\n )\n\n user_pool_domain = cognito.UserPoolDomain(\n self,\n \"StreamlitUserPoolDomain\",\n user_pool=user_pool,\n cognito_domain=cognito.CognitoDomainOptions(\n domain_prefix=cognito_domain_prefix\n ),\n )\n\n user_pool_client = user_pool.add_client(\n \"StreamlitAlbAppClient\",\n user_pool_client_name=\"StreamlitAlbAuthentication\",\n generate_secret=True,\n auth_flows=cognito.AuthFlow(user_password=True),\n o_auth=cognito.OAuthSettings(\n callback_urls=[\n f\"https://{domain_name}/oauth2/idpresponse\",\n f\"https://{domain_name}\",\n ],\n flows=cognito.OAuthFlows(authorization_code_grant=True),\n scopes=[cognito.OAuthScope.EMAIL],\n logout_urls=[f\"https://{domain_name}\"],\n ),\n prevent_user_existence_errors=True,\n supported_identity_providers=[\n cognito.UserPoolClientIdentityProvider.COGNITO\n ],\n )\n\n fargate_service.listener.add_action(\n \"authenticate-rule\",\n priority=1000,\n action=elb_actions.AuthenticateCognitoAction(\n next=elb.ListenerAction.forward(\n target_groups=[fargate_service.target_group]\n ),\n user_pool=user_pool,\n user_pool_client=user_pool_client,\n user_pool_domain=user_pool_domain,\n ),\n conditions=[elb.ListenerCondition.host_headers([domain_name])],\n )\n\n # Let the load balancer talk to the OIDC provider\n lb_security_group = fargate_service.load_balancer.connections.security_groups[0]\n lb_security_group.add_egress_rule(\n peer=ec2.Peer.any_ipv4(),\n connection=ec2.Port(\n protocol=ec2.Protocol.TCP,\n string_representation=\"443\",\n from_port=443,\n to_port=443,\n ),\n description=\"Outbound HTTPS traffic to the OIDC provider\",\n )\n\n # Disallow accessing the load balancer URL directly\n cfn_listener: elb.CfnListener = fargate_service.listener.node.default_child\n cfn_listener.default_actions = [\n {\n \"type\": \"fixed-response\",\n \"fixedResponseConfig\": {\n \"statusCode\": \"403\",\n \"contentType\": \"text/plain\",\n \"messageBody\": \"This is not a valid endpoint!\",\n },\n }\n ]"
},
{
"identifier": "BlogPostStack",
"path": "stacks/blog_post_stack.py",
"snippet": "class BlogPostStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Agent #1: write book summary\n summary_job = get_claude_instant_invoke_chain(\n self,\n \"Write a Summary\",\n prompt=sfn.JsonPath.format(\n \"Write a 1-2 sentence summary for the book {}.\",\n sfn.JsonPath.string_at(\"$$.Execution.Input.novel\"),\n ),\n include_previous_conversation_in_prompt=False,\n )\n\n # Agent #2: describe the plot\n plot_job = get_claude_instant_invoke_chain(\n self,\n \"Describe the Plot\",\n prompt=sfn.JsonPath.format(\n \"Write a paragraph describing the plot of the book {}.\",\n sfn.JsonPath.string_at(\"$$.Execution.Input.novel\"),\n ),\n )\n\n # Agent #3: analyze key themes\n themes_job = get_claude_instant_invoke_chain(\n self,\n \"Analyze Key Themes\",\n prompt=sfn.JsonPath.format(\n \"Write a paragraph analyzing the key themes of the book {}.\",\n sfn.JsonPath.string_at(\"$$.Execution.Input.novel\"),\n ),\n )\n\n # Agent #4: analyze writing style\n writing_style_job = get_claude_instant_invoke_chain(\n self,\n \"Analyze Writing Style\",\n prompt=sfn.JsonPath.format(\n \"Write a paragraph discussing the writing style and tone of the book {}.\",\n sfn.JsonPath.string_at(\"$$.Execution.Input.novel\"),\n ),\n )\n\n # Agent #5: write the blog post\n blog_post_job = get_claude_instant_invoke_chain(\n self,\n \"Write the Blog Post\",\n prompt=sfn.JsonPath.format(\n (\n 'Combine your previous responses into a blog post titled \"{} - A Literature Review\" for my literature blog. '\n \"Start the blog post with an introductory paragraph at the beginning and a conclusion paragraph at the end. \"\n \"The blog post should be five paragraphs in total.\"\n ),\n sfn.JsonPath.string_at(\"$$.Execution.Input.novel\"),\n ),\n max_tokens_to_sample=1000,\n )\n\n select_final_answer = sfn.Pass(\n scope,\n \"Select Final Answer\",\n output_path=\"$.output.response\",\n )\n\n # Hook the agents together into simple pipeline\n chain = (\n summary_job.next(plot_job)\n .next(themes_job)\n .next(writing_style_job)\n .next(blog_post_job)\n .next(select_final_answer)\n )\n\n sfn.StateMachine(\n self,\n \"BlogPostWorkflow\",\n state_machine_name=\"PromptChainDemo-BlogPost\",\n definition_body=sfn.DefinitionBody.from_chainable(chain),\n timeout=Duration.seconds(300),\n )"
},
{
"identifier": "TripPlannerStack",
"path": "stacks/trip_planner_stack.py",
"snippet": "class TripPlannerStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Agent #1: suggest places to stay\n hotels_job = get_claude_instant_invoke_chain(\n self,\n \"Suggest Hotels\",\n prompt=sfn.JsonPath.format(\n \"\"\"You are a world-class travel agent and an expert on travel to {}.\nI am going on a weekend vacation to {}.\nPlease give me up to 5 suggestions for hotels for my vacation.\"\"\",\n sfn.JsonPath.string_at(\"$$.Execution.Input.location\"),\n sfn.JsonPath.string_at(\"$$.Execution.Input.location\"),\n ),\n max_tokens_to_sample=512,\n include_previous_conversation_in_prompt=False,\n )\n\n # Agent #2: suggest places to eat\n restaurants_job = get_claude_instant_invoke_chain(\n self,\n \"Suggest Restaurants\",\n prompt=sfn.JsonPath.format(\n \"\"\"You are a world-class travel agent and an expert on travel to {}.\nI am going on a weekend vacation to {}.\nPlease give me suggestions for restaurants for my vacation, including up to 5 suggestions for breakfast, lunch, and dinner.\"\"\",\n sfn.JsonPath.string_at(\"$$.Execution.Input.location\"),\n sfn.JsonPath.string_at(\"$$.Execution.Input.location\"),\n ),\n max_tokens_to_sample=512,\n include_previous_conversation_in_prompt=False,\n )\n\n # Agent #3: suggest places to visit\n activities_job = get_claude_instant_invoke_chain(\n self,\n \"Suggest Activities\",\n prompt=sfn.JsonPath.format(\n \"\"\"You are a world-class travel agent and an expert on travel to {}.\nI am going on a weekend vacation to {}.\nPlease give me up to 5 suggestions for activities to do or places to visit during my vacation.\"\"\",\n sfn.JsonPath.string_at(\"$$.Execution.Input.location\"),\n sfn.JsonPath.string_at(\"$$.Execution.Input.location\"),\n ),\n max_tokens_to_sample=512,\n include_previous_conversation_in_prompt=False,\n )\n\n # Agent #4: form an itinerary\n itinerary_job = get_claude_instant_invoke_chain(\n self,\n \"Create an Itinerary\",\n prompt=sfn.JsonPath.format(\n \"\"\"You are a world-class travel agent and an expert on travel to {}.\nI am going on a weekend vacation to {} (arriving Friday, leaving Sunday).\n\nYou previously recommended these hotels, inside <hotels></hotels> XML tags.\n<hotels>\n{}\n</hotels>\n\nYou previously recommended these restaurants, inside <restaurants></restaurants> XML tags.\n<restaurants>\n{}\n</restaurants>\n\nYou previously recommended these activities, inside <activities></activities> XML tags.\n<activities>\n{}\n</activities>\n\nPlease give me a daily itinerary for my three-day vacation, based on your previous recommendations.\nThe itinerary should include one hotel where I will stay for the duration of the vacation.\nEach of the three days in the itinerary should have one activity, one restaurant for breakfast, one restaurant for lunch, and one restaurant for dinner.\nEach entry in the itinerary should include a short description of your recommended hotel, activity, or restaurant.\nThe itinerary should be formatted in Markdown format.\"\"\",\n sfn.JsonPath.string_at(\"$$.Execution.Input.location\"),\n sfn.JsonPath.string_at(\"$$.Execution.Input.location\"),\n sfn.JsonPath.string_at(\"$.hotels\"),\n sfn.JsonPath.string_at(\"$.restaurants\"),\n sfn.JsonPath.string_at(\"$.activities\"),\n ),\n max_tokens_to_sample=512,\n include_previous_conversation_in_prompt=False,\n )\n\n # Final step: Create the itinerary PDF\n pdf_bucket = s3.Bucket(\n self,\n \"PdfBucket\",\n removal_policy=RemovalPolicy.DESTROY,\n block_public_access=s3.BlockPublicAccess.BLOCK_ALL,\n lifecycle_rules=[\n s3.LifecycleRule(\n id=\"clean-up-itinerary-files\",\n expiration=Duration.days(1),\n abort_incomplete_multipart_upload_after=Duration.days(1),\n noncurrent_version_expiration=Duration.days(1),\n noncurrent_versions_to_retain=5,\n )\n ],\n )\n\n weasyprint_layer = lambda_.LayerVersion.from_layer_version_arn(\n self,\n \"WeasyprintLayer\",\n layer_version_arn=ssm.StringParameter.value_for_string_parameter(\n self, parameter_name=\"WeasyprintLambdaLayer\"\n ),\n )\n\n pdf_lambda = lambda_python.PythonFunction(\n self,\n \"PdfCreator\",\n runtime=lambda_.Runtime.PYTHON_3_8, # This must be Python 3.8 for the Weasyprint layer\n entry=\"agents/trip_planner/pdf_creator\",\n bundling=get_lambda_bundling_options(),\n environment={\n \"PDF_BUCKET\": pdf_bucket.bucket_name,\n \"GDK_PIXBUF_MODULE_FILE\": \"/opt/lib/loaders.cache\",\n \"FONTCONFIG_PATH\": \"/opt/fonts\",\n \"XDG_DATA_DIRS\": \"/opt/lib\",\n },\n timeout=Duration.seconds(30),\n memory_size=1024,\n layers=[weasyprint_layer],\n )\n\n pdf_bucket.grant_put(pdf_lambda)\n pdf_bucket.grant_read(pdf_lambda)\n\n pdf_job = tasks.LambdaInvoke(\n self,\n \"Upload the Itinerary\",\n lambda_function=pdf_lambda,\n output_path=\"$.Payload\",\n payload=sfn.TaskInput.from_object(\n {\n \"location\": sfn.JsonPath.string_at(\"$$.Execution.Input.location\"),\n \"itinerary\": sfn.JsonPath.string_at(\"$.output.response\"),\n }\n ),\n )\n\n # Hook the agents together into a workflow that contains some parallel steps\n chain = (\n (\n sfn.Parallel(\n self,\n \"Suggestions\",\n result_selector={\n \"hotels.$\": \"$[0].output.response\",\n \"restaurants.$\": \"$[1].output.response\",\n \"activities.$\": \"$[2].output.response\",\n },\n )\n .branch(hotels_job)\n .branch(restaurants_job)\n .branch(activities_job)\n )\n .next(itinerary_job)\n .next(pdf_job)\n )\n\n sfn.StateMachine(\n self,\n \"TripPlannerWorkflow\",\n state_machine_name=\"PromptChainDemo-TripPlanner\",\n definition_body=sfn.DefinitionBody.from_chainable(chain),\n timeout=Duration.seconds(300),\n )"
},
{
"identifier": "StoryWriterStack",
"path": "stacks/story_writer_stack.py",
"snippet": "class StoryWriterStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Agent #1: create characters\n characters_lambda = lambda_python.PythonFunction(\n self,\n \"CharacterAgent\",\n entry=\"agents/story_writer/characters_agent\",\n bundling=get_lambda_bundling_options(),\n runtime=lambda_.Runtime.PYTHON_3_9,\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n characters_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n characters_job = tasks.LambdaInvoke(\n self,\n \"Generate Characters\",\n lambda_function=characters_lambda,\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(characters_job)\n\n # Agent #2: create character story arc\n character_story_lambda = lambda_python.PythonFunction(\n self,\n \"CharacterStoryAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/story_writer/character_story_agent\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n character_story_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n character_story_job = tasks.LambdaInvoke(\n self,\n \"Generate Character Story Arc\",\n lambda_function=character_story_lambda,\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(character_story_job)\n\n # Agent #3: write the story\n story_lambda = lambda_python.PythonFunction(\n self,\n \"StoryAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/story_writer/story_agent\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n story_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n story_job = tasks.LambdaInvoke(\n self,\n \"Generate the Full Story\",\n lambda_function=story_lambda,\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(story_job)\n\n # Hook the agents together into a workflow that contains some loops\n chain = characters_job.next(\n sfn.Map(\n self,\n \"Character Story Map\",\n items_path=sfn.JsonPath.string_at(\"$.characters\"),\n parameters={\n \"character.$\": \"$$.Map.Item.Value\",\n \"characters.$\": \"$.characters\",\n \"story_description.$\": \"$.story_description\",\n },\n max_concurrency=3,\n ).iterator(character_story_job)\n ).next(story_job)\n\n sfn.StateMachine(\n self,\n \"StoryWriterWorkflow\",\n state_machine_name=\"PromptChainDemo-StoryWriter\",\n definition_body=sfn.DefinitionBody.from_chainable(chain),\n timeout=Duration.seconds(300),\n )"
},
{
"identifier": "MoviePitchStack",
"path": "stacks/movie_pitch_stack.py",
"snippet": "class MoviePitchStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Agent #1: generate movie pitch options\n movie_pitch_generators = sfn.Parallel(self, \"MoviePitches\")\n\n pitch_lambda = lambda_python.PythonFunction(\n self,\n f\"PitchAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/movie_pitch/pitch_generator_agent\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n pitch_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n temperature_settings = OrderedDict([(\"Low\", 0), (\"Medium\", 0.5), (\"High\", 1)])\n for temperature_name, temperature_value in temperature_settings.items():\n pitch_job = tasks.LambdaInvoke(\n self,\n f\"Generate Movie Pitch ({temperature_name})\",\n lambda_function=pitch_lambda,\n payload=sfn.TaskInput.from_object(\n {\n \"temperature\": temperature_value,\n \"movie_description\": sfn.JsonPath.string_at(\n \"$.movie_description\"\n ),\n }\n ),\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(pitch_job)\n movie_pitch_generators = movie_pitch_generators.branch(pitch_job)\n\n # Agent #2: choose best one\n pitch_chooser_lambda = lambda_python.PythonFunction(\n self,\n \"PitchChooserAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/movie_pitch/pitch_chooser_agent\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n pitch_chooser_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n pitch_chooser_job = tasks.LambdaInvoke(\n self,\n \"Choose Best Movie Pitch\",\n lambda_function=pitch_chooser_lambda,\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(pitch_chooser_job)\n\n # Next step: create a task token so that the user can decide whether to accept\n # the movie pitch or not.\n user_choice_lambda = lambda_python.PythonFunction(\n self,\n \"UserProducerChoiceAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/movie_pitch/seek_user_input\",\n timeout=Duration.seconds(5),\n memory_size=128,\n )\n user_choice_job = tasks.LambdaInvoke(\n self,\n \"Pitch to the Movie Producer\",\n lambda_function=user_choice_lambda,\n integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n payload=sfn.TaskInput.from_object(\n {\n \"token\": sfn.JsonPath.task_token,\n \"input.$\": \"$\",\n }\n ),\n output_path=\"$.Payload\",\n )\n\n # Agent #3: develop the movie idea into a one-pager\n pitch_one_pager_lambda = lambda_python.PythonFunction(\n self,\n \"OnePagerPitchAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/movie_pitch/pitch_one_pager_generator\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n pitch_one_pager_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n pitch_one_pager_job = tasks.LambdaInvoke(\n self,\n \"Generate Movie Pitch One-Pager\",\n lambda_function=pitch_one_pager_lambda,\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(pitch_one_pager_job)\n\n # Hook the agents together into a workflow\n user_choice_fork = (\n sfn.Choice(self, \"Greenlight?\")\n .when(\n sfn.Condition.string_equals(\"$.user_choice\", \"yes\"), pitch_one_pager_job\n )\n .when(\n sfn.Condition.string_equals(\"$.user_choice\", \"no\"),\n movie_pitch_generators,\n )\n .otherwise(\n sfn.Fail(\n self,\n \"Job Failed\",\n cause=\"Unknown user choice\",\n error=\"Unknown user choice\",\n )\n )\n )\n chain = (\n movie_pitch_generators.next(pitch_chooser_job)\n .next(user_choice_job)\n .next(user_choice_fork)\n )\n\n sfn.StateMachine(\n self,\n \"MoviePitchWorkflow\",\n state_machine_name=\"PromptChainDemo-MoviePitch\",\n definition_body=sfn.DefinitionBody.from_chainable(chain),\n # 1 hour to account for getting user feedback in the UI\n timeout=Duration.seconds(3600),\n )"
},
{
"identifier": "MealPlannerStack",
"path": "stacks/meal_planner_stack.py",
"snippet": "class MealPlannerStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Agent #1: generate initial meal options from \"red\" and \"blue\" chefs\n initial_meal_generators = sfn.Parallel(\n self,\n \"Meals\",\n result_path=\"$.generated_meals\",\n )\n\n meal_lambda = lambda_python.PythonFunction(\n self,\n f\"MealAgent\",\n entry=\"agents/meal_planner/meal_generator_agent\",\n bundling=get_lambda_bundling_options(),\n runtime=lambda_.Runtime.PYTHON_3_9,\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n meal_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n red_agent_meal_job = tasks.LambdaInvoke(\n self,\n f\"Initial Meal Idea (Red)\",\n lambda_function=meal_lambda,\n output_path=\"$.Payload\",\n payload=sfn.TaskInput.from_object(\n {\n \"agent\": \"red\",\n \"input\": sfn.JsonPath.object_at(\"$\"),\n }\n ),\n )\n add_bedrock_retries(red_agent_meal_job)\n\n blue_agent_meal_job = tasks.LambdaInvoke(\n self,\n f\"Initial Meal Idea (Blue)\",\n lambda_function=meal_lambda,\n output_path=\"$.Payload\",\n payload=sfn.TaskInput.from_object(\n {\n \"agent\": \"blue\",\n \"input\": sfn.JsonPath.object_at(\"$\"),\n }\n ),\n )\n add_bedrock_retries(blue_agent_meal_job)\n\n initial_meal_generators = initial_meal_generators.branch(\n red_agent_meal_job\n ).branch(blue_agent_meal_job)\n\n # Agent #2: score the meals generated\n meal_scoring_lambda = lambda_python.PythonFunction(\n self,\n \"MealScoringAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/meal_planner/meal_scoring_agent\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n meal_scoring_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n meal_scoring_job = tasks.LambdaInvoke(\n self,\n \"Score Meals\",\n lambda_function=meal_scoring_lambda,\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(meal_scoring_job)\n\n # Agent #3: generate new meal options from \"red\" and \"blue\" chefs via debate\n meal_debaters = sfn.Parallel(\n self,\n \"MealDebaters\",\n result_path=\"$.latest_debate_round\",\n )\n\n meal_debater_lambda = lambda_python.PythonFunction(\n self,\n f\"MealDebaterAgent\",\n entry=\"agents/meal_planner/meal_debater_agent\",\n bundling=get_lambda_bundling_options(),\n runtime=lambda_.Runtime.PYTHON_3_9,\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n meal_debater_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n red_agent_meal_debater_job = tasks.LambdaInvoke(\n self,\n f\"Debate Meal (Red)\",\n lambda_function=meal_debater_lambda,\n output_path=\"$.Payload\",\n payload=sfn.TaskInput.from_object(\n {\n \"agent\": \"red\",\n \"input\": sfn.JsonPath.object_at(\"$\"),\n }\n ),\n )\n add_bedrock_retries(red_agent_meal_debater_job)\n\n blue_agent_meal_debater_job = tasks.LambdaInvoke(\n self,\n f\"Debate Meal (Blue)\",\n lambda_function=meal_debater_lambda,\n output_path=\"$.Payload\",\n payload=sfn.TaskInput.from_object(\n {\n \"agent\": \"blue\",\n \"input\": sfn.JsonPath.object_at(\"$\"),\n }\n ),\n )\n add_bedrock_retries(blue_agent_meal_debater_job)\n\n meal_debaters = meal_debaters.branch(red_agent_meal_debater_job).branch(\n blue_agent_meal_debater_job\n )\n\n # Agent #4: determine if there is consensus or if we need another debate round\n meal_debate_referee_lambda = lambda_python.PythonFunction(\n self,\n \"MealDebateRefereeAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/meal_planner/meal_debate_referee_agent\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n meal_debate_referee_lambda.add_to_role_policy(\n get_bedrock_iam_policy_statement()\n )\n\n meal_debate_referee_job = tasks.LambdaInvoke(\n self,\n \"Referee Meal Debate\",\n lambda_function=meal_debate_referee_lambda,\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(meal_debate_referee_job)\n\n # Agent #5: produce a final score for the final meal ideas from each chef\n final_meal_scoring_job = tasks.LambdaInvoke(\n self,\n \"Score Final Meals\",\n lambda_function=meal_scoring_lambda,\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(final_meal_scoring_job)\n\n # Agent #6: choose the highest scoring meal\n meal_choose_winner_lambda = lambda_python.PythonFunction(\n self,\n \"MealChooseAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/meal_planner/meal_choose_winner_agent\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n\n meal_choose_winner_job = tasks.LambdaInvoke(\n self,\n \"Choose Winning Meal\",\n lambda_function=meal_choose_winner_lambda,\n output_path=\"$.Payload\",\n )\n\n # Agent #7: generate a recipe for the meal\n recipe_lambda = lambda_python.PythonFunction(\n self,\n \"RecipeAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/meal_planner/recipe_generator\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=256,\n )\n recipe_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n recipe_job = tasks.LambdaInvoke(\n self,\n \"Generate Recipe\",\n lambda_function=recipe_lambda,\n output_path=\"$.Payload\",\n )\n add_bedrock_retries(recipe_job)\n\n # Hook the agents together into a workflow\n meal_consensus_fork = (\n sfn.Choice(self, \"Consensus reached?\")\n .when(\n sfn.Condition.or_(\n sfn.Condition.string_equals(\"$.consensus\", \"yes\"),\n sfn.Condition.string_equals(\n \"$.consensus\", \"max debate rounds reached\"\n ),\n ),\n final_meal_scoring_job.next(meal_choose_winner_job).next(recipe_job),\n )\n .when(\n sfn.Condition.string_equals(\"$.consensus\", \"no\"),\n meal_scoring_job,\n )\n )\n chain = (\n initial_meal_generators.next(meal_scoring_job)\n .next(meal_debaters)\n .next(meal_debate_referee_job)\n .next(meal_consensus_fork)\n )\n\n sfn.StateMachine(\n self,\n \"MealPlannerWorkflow\",\n state_machine_name=\"PromptChainDemo-MealPlanner\",\n definition_body=sfn.DefinitionBody.from_chainable(chain),\n timeout=Duration.seconds(300),\n )"
},
{
"identifier": "MostPopularRepoBedrockAgentStack",
"path": "stacks/most_popular_repo_bedrock_agent_stack.py",
"snippet": "class MostPopularRepoBedrockAgentStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n ### Setup for Bedrock Agent ###\n bedrock_agent_access_policy = iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n actions=[\n \"bedrock:InvokeAgent\",\n ],\n resources=[\n \"*\",\n ],\n )\n\n bedrock_agent_service_role = iam.Role(\n self,\n \"BedrockAgentServiceRole\",\n role_name=\"AmazonBedrockExecutionRoleForAgents_BedrockServerlessPromptChain\",\n assumed_by=iam.ServicePrincipal(\"bedrock.amazonaws.com\"),\n )\n\n bedrock_agent_service_role.add_to_policy(\n iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n actions=[\n \"bedrock:InvokeModel\",\n ],\n resources=[\n f\"arn:aws:bedrock:{self.region}::foundation-model/anthropic.claude-v2\",\n f\"arn:aws:bedrock:{self.region}::foundation-model/anthropic.claude-v2:1\",\n f\"arn:aws:bedrock:{self.region}::foundation-model/anthropic.claude-instant-v1\",\n ],\n )\n )\n\n github_agent_actions_lambda = lambda_python.PythonFunction(\n self,\n \"GitHubAgentActions\",\n function_name=\"PromptChainDemo-MostPopularRepoBedrockAgents-GitHubActions\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/most_popular_repo_bedrock_agent/github_agent_actions\",\n timeout=Duration.seconds(60),\n memory_size=512,\n )\n bedrock_principal = iam.ServicePrincipal(\n \"bedrock.amazonaws.com\",\n conditions={\n \"StringEquals\": {\"aws:SourceAccount\": self.account},\n \"ArnLike\": {\n \"aws:SourceArn\": f\"arn:aws:bedrock:{self.region}:{self.account}:agent/*\"\n },\n },\n )\n github_agent_actions_lambda.grant_invoke(bedrock_principal)\n\n agent_action_schema_asset = assets.Asset(\n self,\n \"AgentActionSchema\",\n path=os.path.join(\n dirname,\n \"../agents/most_popular_repo_bedrock_agent/github_agent_actions/openapi-schema.yaml\",\n ),\n )\n agent_action_schema_asset.grant_read(bedrock_agent_service_role)\n CfnOutput(\n self,\n \"BedrockAgentActionSchema\",\n value=agent_action_schema_asset.s3_object_url,\n )\n\n ### Agents and Workflow ###\n\n # Agent #1: look up the highest trending repo on GitHub\n lookup_repo_lambda = lambda_python.PythonFunction(\n self,\n \"LookupRepoAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/most_popular_repo_bedrock_agent/agent\",\n handler=\"lookup_trending_repo_agent\",\n bundling=lambda_python.BundlingOptions(\n asset_excludes=[\".venv\", \".mypy_cache\", \"__pycache__\"],\n ),\n timeout=Duration.seconds(60),\n memory_size=512,\n environment={\n \"BEDROCK_AGENT_ID\": \"INSERT BEDROCK AGENT ID HERE\",\n \"BEDROCK_AGENT_ALIAS_ID\": \"INSERT BEDROCK AGENT ALIAS ID HERE\",\n },\n )\n lookup_repo_lambda.add_to_role_policy(bedrock_agent_access_policy)\n\n lookup_repo_job = tasks.LambdaInvoke(\n self,\n \"Lookup Repo\",\n lambda_function=lookup_repo_lambda,\n output_path=\"$.Payload\",\n )\n\n # Agent #2: summarize the repo\n summarize_repo_lambda = lambda_python.PythonFunction(\n self,\n \"SummarizeRepoAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/most_popular_repo_bedrock_agent/agent\",\n handler=\"summarize_repo_readme_agent\",\n bundling=lambda_python.BundlingOptions(\n asset_excludes=[\".venv\", \".mypy_cache\", \"__pycache__\"],\n ),\n timeout=Duration.seconds(60),\n memory_size=512,\n environment={\n \"BEDROCK_AGENT_ID\": \"INSERT BEDROCK AGENT ID HERE\",\n \"BEDROCK_AGENT_ALIAS_ID\": \"INSERT BEDROCK AGENT ALIAS ID HERE\",\n },\n )\n summarize_repo_lambda.add_to_role_policy(bedrock_agent_access_policy)\n\n summarize_repo_job = tasks.LambdaInvoke(\n self,\n \"Summarize Repo\",\n lambda_function=summarize_repo_lambda,\n output_path=\"$.Payload\",\n )\n\n # Hook the agents together into a sequential pipeline\n chain = lookup_repo_job.next(summarize_repo_job)\n\n sfn.StateMachine(\n self,\n \"MostPopularRepoWorkflow\",\n state_machine_name=\"PromptChainDemo-MostPopularRepoBedrockAgents\",\n definition_body=sfn.DefinitionBody.from_chainable(chain),\n timeout=Duration.seconds(300),\n )"
},
{
"identifier": "MostPopularRepoLangchainStack",
"path": "stacks/most_popular_repo_langchain_stack.py",
"snippet": "class MostPopularRepoLangchainStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Agent #1: look up the highest trending repo on GitHub\n lookup_repo_lambda = lambda_python.PythonFunction(\n self,\n \"LookupRepoAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/most_popular_repo_langchain\",\n handler=\"lookup_trending_repo_agent\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=512,\n )\n lookup_repo_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n lookup_repo_job = tasks.LambdaInvoke(\n self,\n \"Lookup Repo\",\n lambda_function=lookup_repo_lambda,\n output_path=\"$.Payload\",\n )\n\n # Agent #2: summarize the repo\n summarize_repo_lambda = lambda_python.PythonFunction(\n self,\n \"SummarizeRepoAgent\",\n runtime=lambda_.Runtime.PYTHON_3_9,\n entry=\"agents/most_popular_repo_langchain\",\n handler=\"summarize_repo_readme_agent\",\n bundling=get_lambda_bundling_options(),\n timeout=Duration.seconds(60),\n memory_size=512,\n )\n summarize_repo_lambda.add_to_role_policy(get_bedrock_iam_policy_statement())\n\n summarize_repo_job = tasks.LambdaInvoke(\n self,\n \"Summarize Repo\",\n lambda_function=summarize_repo_lambda,\n output_path=\"$.Payload\",\n )\n\n # Hook the agents together into a sequential pipeline\n chain = lookup_repo_job.next(summarize_repo_job)\n\n sfn.StateMachine(\n self,\n \"MostPopularRepoWorkflow\",\n state_machine_name=\"PromptChainDemo-MostPopularRepoLangchain\",\n definition_body=sfn.DefinitionBody.from_chainable(chain),\n timeout=Duration.seconds(300),\n )"
},
{
"identifier": "AlarmsStack",
"path": "stacks/alarms_stack.py",
"snippet": "class AlarmsStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n alarms = []\n\n for name_suffix in [\n \"BlogPost\",\n \"TripPlanner\",\n \"StoryWriter\",\n \"MoviePitch\",\n \"MealPlanner\",\n \"MostPopularRepoBedrockAgents\",\n \"MostPopularRepoLangchain\",\n ]:\n workflow = sfn.StateMachine.from_state_machine_name(\n self, f\"{name_suffix}Workflow\", f\"PromptChainDemo-{name_suffix}\"\n )\n\n alarm = cloudwatch.Alarm(\n self,\n f\"{name_suffix}WorkflowFailures\",\n alarm_name=f\"PromptChainDemo-{name_suffix}-Workflow-Failures\",\n threshold=1,\n evaluation_periods=1,\n metric=workflow.metric_failed(statistic=cloudwatch.Stats.SUM),\n )\n alarms.append(alarm)\n\n composite_alarm = cloudwatch.CompositeAlarm(\n self,\n f\"CompositeAlarm\",\n composite_alarm_name=\"PromptChainDemo-Composite-Alarm\",\n alarm_rule=cloudwatch.AlarmRule.any_of(*alarms),\n )\n\n topic = sns.Topic(\n self,\n \"PromptChainDemo-Notifications\",\n topic_name=\"bedrock-serverless-prompt-chaining-notifications\",\n )\n topic.add_to_resource_policy(\n iam.PolicyStatement(\n actions=[\"SNS:Publish\"],\n principals=[\n iam.ServicePrincipal(\"codestar-notifications.amazonaws.com\")\n ],\n resources=[\n Stack.of(self).format_arn(\n service=\"sns\",\n resource=\"bedrock-serverless-prompt-chaining-notifications\",\n )\n ],\n )\n )\n composite_alarm.add_alarm_action(cw_actions.SnsAction(topic))"
}
] | from aws_cdk import (
App,
Environment,
)
from stacks.webapp_stack import WebappStack
from stacks.blog_post_stack import BlogPostStack
from stacks.trip_planner_stack import TripPlannerStack
from stacks.story_writer_stack import StoryWriterStack
from stacks.movie_pitch_stack import MoviePitchStack
from stacks.meal_planner_stack import MealPlannerStack
from stacks.most_popular_repo_bedrock_agent_stack import (
MostPopularRepoBedrockAgentStack,
)
from stacks.most_popular_repo_langchain_stack import (
MostPopularRepoLangchainStack,
)
from stacks.alarms_stack import AlarmsStack
import os | 9,871 |
app = App()
env = Environment(account=os.environ["CDK_DEFAULT_ACCOUNT"], region="us-west-2")
WebappStack(
app,
"PromptChaining-StreamlitWebapp",
env=env,
parent_domain="TODO FILL IN",
)
BlogPostStack(
app,
"PromptChaining-BlogPostDemo",
env=env,
)
TripPlannerStack(
app,
"PromptChaining-TripPlannerDemo",
env=env,
)
StoryWriterStack(
app,
"PromptChaining-StoryWriterDemo",
env=env,
)
MoviePitchStack(
app,
"PromptChaining-MoviePitchDemo",
env=env,
)
MealPlannerStack(
app,
"PromptChaining-MealPlannerDemo",
env=env,
)
MostPopularRepoBedrockAgentStack(
app,
"PromptChaining-MostPopularRepoBedrockAgentsDemo",
env=env,
)
|
app = App()
env = Environment(account=os.environ["CDK_DEFAULT_ACCOUNT"], region="us-west-2")
WebappStack(
app,
"PromptChaining-StreamlitWebapp",
env=env,
parent_domain="TODO FILL IN",
)
BlogPostStack(
app,
"PromptChaining-BlogPostDemo",
env=env,
)
TripPlannerStack(
app,
"PromptChaining-TripPlannerDemo",
env=env,
)
StoryWriterStack(
app,
"PromptChaining-StoryWriterDemo",
env=env,
)
MoviePitchStack(
app,
"PromptChaining-MoviePitchDemo",
env=env,
)
MealPlannerStack(
app,
"PromptChaining-MealPlannerDemo",
env=env,
)
MostPopularRepoBedrockAgentStack(
app,
"PromptChaining-MostPopularRepoBedrockAgentsDemo",
env=env,
) | MostPopularRepoLangchainStack( | 7 | 2023-10-26 22:17:30+00:00 | 12k |
chenran-li/RQL-release | stable_baselines3/ppo/ppo.py | [
{
"identifier": "OnPolicyAlgorithm",
"path": "stable_baselines3/common/on_policy_algorithm.py",
"snippet": "class OnPolicyAlgorithm(BaseAlgorithm):\n \"\"\"\n The base for On-Policy algorithms (ex: A2C/PPO).\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: The learning rate, it can be a function\n of the current progress remaining (from 1 to 0)\n :param n_steps: The number of steps to run for each environment per update\n (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)\n :param gamma: Discount factor\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator.\n Equivalent to classic advantage when set to 1.\n :param ent_coef: Entropy coefficient for the loss calculation\n :param vf_coef: Value function coefficient for the loss calculation\n :param max_grad_norm: The maximum value for the gradient clipping\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param monitor_wrapper: When creating an environment, whether to wrap it\n or not in a Monitor wrapper.\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n :param supported_action_spaces: The action spaces supported by the algorithm.\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[ActorCriticPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule],\n n_steps: int,\n gamma: float,\n gae_lambda: float,\n ent_coef: float,\n vf_coef: float,\n max_grad_norm: float,\n use_sde: bool,\n sde_sample_freq: int,\n tensorboard_log: Optional[str] = None,\n monitor_wrapper: bool = True,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n supported_action_spaces: Optional[Tuple[spaces.Space, ...]] = None,\n ):\n\n super().__init__(\n policy=policy,\n env=env,\n learning_rate=learning_rate,\n policy_kwargs=policy_kwargs,\n verbose=verbose,\n device=device,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n support_multi_env=True,\n seed=seed,\n tensorboard_log=tensorboard_log,\n supported_action_spaces=supported_action_spaces,\n )\n\n self.n_steps = n_steps\n self.gamma = gamma\n self.gae_lambda = gae_lambda\n self.ent_coef = ent_coef\n self.vf_coef = vf_coef\n self.max_grad_norm = max_grad_norm\n self.rollout_buffer = None\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n self._setup_lr_schedule()\n self.set_random_seed(self.seed)\n\n buffer_cls = DictRolloutBuffer if isinstance(self.observation_space, spaces.Dict) else RolloutBuffer\n\n self.rollout_buffer = buffer_cls(\n self.n_steps,\n self.observation_space,\n self.action_space,\n device=self.device,\n gamma=self.gamma,\n gae_lambda=self.gae_lambda,\n n_envs=self.n_envs,\n )\n self.policy = self.policy_class( # pytype:disable=not-instantiable\n self.observation_space,\n self.action_space,\n self.lr_schedule,\n use_sde=self.use_sde,\n **self.policy_kwargs # pytype:disable=not-instantiable\n )\n self.policy = self.policy.to(self.device)\n\n def collect_rollouts(\n self,\n env: VecEnv,\n callback: BaseCallback,\n rollout_buffer: RolloutBuffer,\n n_rollout_steps: int,\n ) -> bool:\n \"\"\"\n Collect experiences using the current policy and fill a ``RolloutBuffer``.\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n\n :param env: The training environment\n :param callback: Callback that will be called at each step\n (and at the beginning and end of the rollout)\n :param rollout_buffer: Buffer to fill with rollouts\n :param n_rollout_steps: Number of experiences to collect per environment\n :return: True if function returned with at least `n_rollout_steps`\n collected, False if callback terminated rollout prematurely.\n \"\"\"\n assert self._last_obs is not None, \"No previous observation was provided\"\n # Switch to eval mode (this affects batch norm / dropout)\n self.policy.set_training_mode(False)\n\n n_steps = 0\n rollout_buffer.reset()\n # Sample new weights for the state dependent exploration\n if self.use_sde:\n self.policy.reset_noise(env.num_envs)\n\n callback.on_rollout_start()\n\n while n_steps < n_rollout_steps:\n if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:\n # Sample a new noise matrix\n self.policy.reset_noise(env.num_envs)\n\n with th.no_grad():\n # Convert to pytorch tensor or to TensorDict\n obs_tensor = obs_as_tensor(self._last_obs, self.device)\n actions, values, log_probs = self.policy(obs_tensor)\n actions = actions.cpu().numpy()\n\n # Rescale and perform action\n clipped_actions = actions\n # Clip the actions to avoid out of bound error\n if isinstance(self.action_space, spaces.Box):\n clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n new_obs, rewards, dones, infos = env.step(clipped_actions)\n\n self.num_timesteps += env.num_envs\n\n # Give access to local variables\n callback.update_locals(locals())\n if callback.on_step() is False:\n return False\n\n self._update_info_buffer(infos)\n n_steps += 1\n\n if isinstance(self.action_space, spaces.Discrete):\n # Reshape in case of discrete action\n actions = actions.reshape(-1, 1)\n\n # Handle timeout by bootstraping with value function\n # see GitHub issue #633\n for idx, done in enumerate(dones):\n if (\n done\n and infos[idx].get(\"terminal_observation\") is not None\n and infos[idx].get(\"TimeLimit.truncated\", False)\n ):\n terminal_obs = self.policy.obs_to_tensor(infos[idx][\"terminal_observation\"])[0]\n with th.no_grad():\n terminal_value = self.policy.predict_values(terminal_obs)[0]\n rewards[idx] += self.gamma * terminal_value\n\n rollout_buffer.add(self._last_obs, actions, rewards, self._last_episode_starts, values, log_probs)\n self._last_obs = new_obs\n self._last_episode_starts = dones\n\n with th.no_grad():\n # Compute value for the last timestep\n values = self.policy.predict_values(obs_as_tensor(new_obs, self.device))\n\n rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)\n\n callback.on_rollout_end()\n\n return True\n\n def train(self) -> None:\n \"\"\"\n Consume current rollout data and update policy parameters.\n Implemented by individual algorithms.\n \"\"\"\n raise NotImplementedError\n\n def learn(\n self: SelfOnPolicyAlgorithm,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 1,\n tb_log_name: str = \"OnPolicyAlgorithm\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfOnPolicyAlgorithm:\n iteration = 0\n\n total_timesteps, callback = self._setup_learn(\n total_timesteps,\n callback,\n reset_num_timesteps,\n tb_log_name,\n progress_bar,\n )\n\n callback.on_training_start(locals(), globals())\n\n while self.num_timesteps < total_timesteps:\n\n continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps)\n\n if continue_training is False:\n break\n\n iteration += 1\n self._update_current_progress_remaining(self.num_timesteps, total_timesteps)\n\n # Display training infos\n if log_interval is not None and iteration % log_interval == 0:\n time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon)\n fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed)\n self.logger.record(\"time/iterations\", iteration, exclude=\"tensorboard\")\n if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:\n self.logger.record(\"rollout/ep_rew_mean\", safe_mean([ep_info[\"r\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"rollout/ep_len_mean\", safe_mean([ep_info[\"l\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"time/fps\", fps)\n self.logger.record(\"time/time_elapsed\", int(time_elapsed), exclude=\"tensorboard\")\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(step=self.num_timesteps)\n\n self.train()\n\n callback.on_training_end()\n\n return self\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n state_dicts = [\"policy\", \"policy.optimizer\"]\n\n return state_dicts, []"
},
{
"identifier": "ActorCriticCnnPolicy",
"path": "stable_baselines3/common/policies.py",
"snippet": "class ActorCriticCnnPolicy(ActorCriticPolicy):\n \"\"\"\n CNN policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Union[List[int], Dict[str, List[int]], List[Dict[str, List[int]]], None] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n share_features_extractor: bool = True,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super().__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n ortho_init,\n use_sde,\n log_std_init,\n full_std,\n use_expln,\n squash_output,\n features_extractor_class,\n features_extractor_kwargs,\n share_features_extractor,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n )"
},
{
"identifier": "ActorCriticPolicy",
"path": "stable_baselines3/common/policies.py",
"snippet": "class ActorCriticPolicy(BasePolicy):\n \"\"\"\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n # TODO(antonin): update type annotation when we remove shared network support\n net_arch: Union[List[int], Dict[str, List[int]], List[Dict[str, List[int]]], None] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n share_features_extractor: bool = True,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n\n if optimizer_kwargs is None:\n optimizer_kwargs = {}\n # Small values to avoid NaN in Adam optimizer\n if optimizer_class == th.optim.Adam:\n optimizer_kwargs[\"eps\"] = 1e-5\n\n super().__init__(\n observation_space,\n action_space,\n features_extractor_class,\n features_extractor_kwargs,\n optimizer_class=optimizer_class,\n optimizer_kwargs=optimizer_kwargs,\n squash_output=squash_output,\n normalize_images=normalize_images,\n )\n\n # Convert [dict()] to dict() as shared network are deprecated\n if isinstance(net_arch, list) and len(net_arch) > 0:\n if isinstance(net_arch[0], dict):\n warnings.warn(\n (\n \"As shared layers in the mlp_extractor are deprecated and will be removed in SB3 v1.8.0, \"\n \"you should now pass directly a dictionary and not a list \"\n \"(net_arch=dict(pi=..., vf=...) instead of net_arch=[dict(pi=..., vf=...)])\"\n ),\n )\n net_arch = net_arch[0]\n else:\n # Note: deprecation warning will be emitted\n # by the MlpExtractor constructor\n pass\n\n # Default network architecture, from stable-baselines\n if net_arch is None:\n if features_extractor_class == NatureCNN:\n net_arch = []\n else:\n net_arch = dict(pi=[64, 64], vf=[64, 64])\n\n self.net_arch = net_arch\n self.activation_fn = activation_fn\n self.ortho_init = ortho_init\n\n self.share_features_extractor = share_features_extractor\n self.features_extractor = self.make_features_extractor()\n self.features_dim = self.features_extractor.features_dim\n if self.share_features_extractor:\n self.pi_features_extractor = self.features_extractor\n self.vf_features_extractor = self.features_extractor\n else:\n self.pi_features_extractor = self.features_extractor\n self.vf_features_extractor = self.make_features_extractor()\n # if the features extractor is not shared, there cannot be shared layers in the mlp_extractor\n # TODO(antonin): update the check once we change net_arch behavior\n if isinstance(net_arch, list) and len(net_arch) > 0:\n raise ValueError(\n \"Error: if the features extractor is not shared, there cannot be shared layers in the mlp_extractor\"\n )\n\n self.log_std_init = log_std_init\n dist_kwargs = None\n # Keyword arguments for gSDE distribution\n if use_sde:\n dist_kwargs = {\n \"full_std\": full_std,\n \"squash_output\": squash_output,\n \"use_expln\": use_expln,\n \"learn_features\": False,\n }\n\n self.use_sde = use_sde\n self.dist_kwargs = dist_kwargs\n\n # Action distribution\n self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)\n\n self._build(lr_schedule)\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n data = super()._get_constructor_parameters()\n\n default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)\n\n data.update(\n dict(\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n use_sde=self.use_sde,\n log_std_init=self.log_std_init,\n squash_output=default_none_kwargs[\"squash_output\"],\n full_std=default_none_kwargs[\"full_std\"],\n use_expln=default_none_kwargs[\"use_expln\"],\n lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone\n ortho_init=self.ortho_init,\n optimizer_class=self.optimizer_class,\n optimizer_kwargs=self.optimizer_kwargs,\n features_extractor_class=self.features_extractor_class,\n features_extractor_kwargs=self.features_extractor_kwargs,\n )\n )\n return data\n\n def reset_noise(self, n_envs: int = 1) -> None:\n \"\"\"\n Sample new weights for the exploration matrix.\n\n :param n_envs:\n \"\"\"\n assert isinstance(self.action_dist, StateDependentNoiseDistribution), \"reset_noise() is only available when using gSDE\"\n self.action_dist.sample_weights(self.log_std, batch_size=n_envs)\n\n def _build_mlp_extractor(self) -> None:\n \"\"\"\n Create the policy and value networks.\n Part of the layers can be shared.\n \"\"\"\n # Note: If net_arch is None and some features extractor is used,\n # net_arch here is an empty list and mlp_extractor does not\n # really contain any layers (acts like an identity module).\n self.mlp_extractor = MlpExtractor(\n self.features_dim,\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n device=self.device,\n )\n\n def _build(self, lr_schedule: Schedule) -> None:\n \"\"\"\n Create the networks and the optimizer.\n\n :param lr_schedule: Learning rate schedule\n lr_schedule(1) is the initial learning rate\n \"\"\"\n self._build_mlp_extractor()\n\n latent_dim_pi = self.mlp_extractor.latent_dim_pi\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):\n self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)\n else:\n raise NotImplementedError(f\"Unsupported distribution '{self.action_dist}'.\")\n\n self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)\n # Init weights: use orthogonal initialization\n # with small initial weight for the output\n if self.ortho_init:\n # TODO: check for features_extractor\n # Values from stable-baselines.\n # features_extractor/mlp values are\n # originally from openai/baselines (default gains/init_scales).\n module_gains = {\n self.features_extractor: np.sqrt(2),\n self.mlp_extractor: np.sqrt(2),\n self.action_net: 0.01,\n self.value_net: 1,\n }\n if not self.share_features_extractor:\n # Note(antonin): this is to keep SB3 results\n # consistent, see GH#1148\n del module_gains[self.features_extractor]\n module_gains[self.pi_features_extractor] = np.sqrt(2)\n module_gains[self.vf_features_extractor] = np.sqrt(2)\n\n for module, gain in module_gains.items():\n module.apply(partial(self.init_weights, gain=gain))\n\n # Setup optimizer with initial learning rate\n self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)\n\n def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Forward pass in all the networks (actor and critic)\n\n :param obs: Observation\n :param deterministic: Whether to sample or use deterministic actions\n :return: action, value and log probability of the action\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n if self.share_features_extractor:\n latent_pi, latent_vf = self.mlp_extractor(features)\n else:\n pi_features, vf_features = features\n latent_pi = self.mlp_extractor.forward_actor(pi_features)\n latent_vf = self.mlp_extractor.forward_critic(vf_features)\n # Evaluate the values for the given observations\n values = self.value_net(latent_vf)\n distribution = self._get_action_dist_from_latent(latent_pi)\n actions = distribution.get_actions(deterministic=deterministic)\n log_prob = distribution.log_prob(actions)\n actions = actions.reshape((-1,) + self.action_space.shape)\n return actions, values, log_prob\n\n def extract_features(self, obs: th.Tensor) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:\n \"\"\"\n Preprocess the observation if needed and extract features.\n\n :param obs: Observation\n :return: the output of the features extractor(s)\n \"\"\"\n if self.share_features_extractor:\n return super().extract_features(obs, self.features_extractor)\n else:\n pi_features = super().extract_features(obs, self.pi_features_extractor)\n vf_features = super().extract_features(obs, self.vf_features_extractor)\n return pi_features, vf_features\n\n def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> Distribution:\n \"\"\"\n Retrieve action distribution given the latent codes.\n\n :param latent_pi: Latent code for the actor\n :return: Action distribution\n \"\"\"\n mean_actions = self.action_net(latent_pi)\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std)\n elif isinstance(self.action_dist, CategoricalDistribution):\n # Here mean_actions are the logits before the softmax\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, MultiCategoricalDistribution):\n # Here mean_actions are the flattened logits\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, BernoulliDistribution):\n # Here mean_actions are the logits (before rounding to get the binary actions)\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_pi)\n else:\n raise ValueError(\"Invalid action distribution\")\n\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n return self.get_distribution(observation).get_actions(deterministic=deterministic)\n\n def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Optional[th.Tensor]]:\n \"\"\"\n Evaluate actions according to the current policy,\n given the observations.\n\n :param obs: Observation\n :param actions: Actions\n :return: estimated value, log likelihood of taking those actions\n and entropy of the action distribution.\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n if self.share_features_extractor:\n latent_pi, latent_vf = self.mlp_extractor(features)\n else:\n pi_features, vf_features = features\n latent_pi = self.mlp_extractor.forward_actor(pi_features)\n latent_vf = self.mlp_extractor.forward_critic(vf_features)\n distribution = self._get_action_dist_from_latent(latent_pi)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n entropy = distribution.entropy()\n return values, log_prob, entropy\n\n def get_distribution(self, obs: th.Tensor) -> Distribution:\n \"\"\"\n Get the current policy distribution given the observations.\n\n :param obs:\n :return: the action distribution.\n \"\"\"\n features = super().extract_features(obs, self.pi_features_extractor)\n latent_pi = self.mlp_extractor.forward_actor(features)\n return self._get_action_dist_from_latent(latent_pi)\n\n def predict_values(self, obs: th.Tensor) -> th.Tensor:\n \"\"\"\n Get the estimated values according to the current policy given the observations.\n\n :param obs: Observation\n :return: the estimated values.\n \"\"\"\n features = super().extract_features(obs, self.vf_features_extractor)\n latent_vf = self.mlp_extractor.forward_critic(features)\n return self.value_net(latent_vf)"
},
{
"identifier": "BasePolicy",
"path": "stable_baselines3/common/policies.py",
"snippet": "class BasePolicy(BaseModel, ABC):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super().__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy, and reshape to the original action shape\n actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)\n\n if isinstance(self.action_space, spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions.squeeze(axis=0)\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))"
},
{
"identifier": "MultiInputActorCriticPolicy",
"path": "stable_baselines3/common/policies.py",
"snippet": "class MultiInputActorCriticPolicy(ActorCriticPolicy):\n \"\"\"\n MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space (Tuple)\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Uses the CombinedExtractor\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Union[List[int], Dict[str, List[int]], List[Dict[str, List[int]]], None] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n share_features_extractor: bool = True,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super().__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n ortho_init,\n use_sde,\n log_std_init,\n full_std,\n use_expln,\n squash_output,\n features_extractor_class,\n features_extractor_kwargs,\n share_features_extractor,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n )"
},
{
"identifier": "GymEnv",
"path": "stable_baselines3/common/type_aliases.py",
"snippet": "class RolloutBufferSamples(NamedTuple):\nclass DictRolloutBufferSamples(NamedTuple):\nclass ReplayBufferSamples(NamedTuple):\nclass DictReplayBufferSamples(NamedTuple):\nclass RolloutReturn(NamedTuple):\nclass TrainFrequencyUnit(Enum):\nclass TrainFreq(NamedTuple):\nclass PolicyPredictor(Protocol):\n STEP = \"step\"\n EPISODE = \"episode\"\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:"
},
{
"identifier": "explained_variance",
"path": "stable_baselines3/common/utils.py",
"snippet": "def explained_variance(y_pred: np.ndarray, y_true: np.ndarray) -> np.ndarray:\n \"\"\"\n Computes fraction of variance that ypred explains about y.\n Returns 1 - Var[y-ypred] / Var[y]\n\n interpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero\n\n :param y_pred: the prediction\n :param y_true: the expected value\n :return: explained variance of ypred and y\n \"\"\"\n assert y_true.ndim == 1 and y_pred.ndim == 1\n var_y = np.var(y_true)\n return np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y"
},
{
"identifier": "get_schedule_fn",
"path": "stable_baselines3/common/utils.py",
"snippet": "def get_schedule_fn(value_schedule: Union[Schedule, float, int]) -> Schedule:\n \"\"\"\n Transform (if needed) learning rate and clip range (for PPO)\n to callable.\n\n :param value_schedule: Constant value of schedule function\n :return: Schedule function (can return constant value)\n \"\"\"\n # If the passed schedule is a float\n # create a constant function\n if isinstance(value_schedule, (float, int)):\n # Cast to float to avoid errors\n value_schedule = constant_fn(float(value_schedule))\n else:\n assert callable(value_schedule)\n return value_schedule"
}
] | import warnings
import numpy as np
import torch as th
from typing import Any, Dict, Optional, Type, TypeVar, Union
from gym import spaces
from torch.nn import functional as F
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, BasePolicy, MultiInputActorCriticPolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import explained_variance, get_schedule_fn | 10,399 |
SelfPPO = TypeVar("SelfPPO", bound="PPO")
class PPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)
NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)
See https://github.com/pytorch/pytorch/issues/29372
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param normalize_advantage: Whether to normalize or not the advantage
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for
debug messages
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
|
SelfPPO = TypeVar("SelfPPO", bound="PPO")
class PPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)
NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)
See https://github.com/pytorch/pytorch/issues/29372
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param normalize_advantage: Whether to normalize or not the advantage
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for
debug messages
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
| policy_aliases: Dict[str, Type[BasePolicy]] = { | 3 | 2023-10-28 01:09:21+00:00 | 12k |
AmgdGocha/DriveFS-Sleuth | drivefs_sleuth/setup.py | [
{
"identifier": "get_last_pid",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_last_pid(drivefs_path):\n try:\n with open(os.path.join(drivefs_path, 'pid.txt')) as pid_file:\n return pid_file.read()\n except OSError:\n return -1"
},
{
"identifier": "get_item_info",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_item_info(profile_path, stable_id):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(f\"SELECT is_folder, stable_id, id, local_title, mime_type, is_owner, file_size, \"\n f\"modified_date, viewed_by_me_date, trashed, proto FROM items WHERE stable_id={stable_id}\")\n return cursor.fetchone()\n except sqlite3.OperationalError:\n return ()"
},
{
"identifier": "get_last_sync",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_last_sync(drivefs_path):\n try:\n with sqlite3.connect(os.path.join(drivefs_path, \"experiments.db\")) as experiments_db:\n cursor = experiments_db.cursor()\n cursor.execute(\"SELECT value FROM PhenotypeValues WHERE key='last_sync'\")\n return int(cursor.fetchone()[0])\n except sqlite3.OperationalError:\n return -1"
},
{
"identifier": "parse_protobuf",
"path": "drivefs_sleuth/utils.py",
"snippet": "def parse_protobuf(protobuf):\n if not protobuf:\n return {}\n\n with contextlib.redirect_stdout(None):\n protodeep_schema = guess_schema(data=protobuf)\n return protodeep_schema.values"
},
{
"identifier": "get_max_root_ids",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_max_root_ids(drivefs_path):\n try:\n with sqlite3.connect(os.path.join(drivefs_path, \"root_preference_sqlite.db\")) as root_preference_db:\n cursor = root_preference_db.cursor()\n cursor.execute(\"SELECT value FROM max_ids WHERE id_type='max_root_id'\")\n max_root_ids = cursor.fetchone()\n if max_root_ids:\n return int(max_root_ids[0])\n return None\n except sqlite3.OperationalError:\n return None"
},
{
"identifier": "get_deleted_items",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_deleted_items(profile_path):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(\"SELECT stable_id, proto FROM deleted_items\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []"
},
{
"identifier": "get_mirrored_items",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_mirrored_items(profile_path):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"mirror_sqlite.db\")) as mirror_sqlite_db:\n cursor = mirror_sqlite_db.cursor()\n cursor.execute(\"SELECT local_stable_id, stable_id, volume, parent_local_stable_id, local_filename, \"\n \"cloud_filename, local_mtime_ms, cloud_mtime_ms, local_md5_checksum, cloud_md5_checksum,\"\n \"local_size, cloud_size, local_version, cloud_version, shared, read_only, is_root \"\n \"FROM mirror_item\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []"
},
{
"identifier": "get_item_properties",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_item_properties(profile_path, item_id):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(f\"SELECT key, value FROM item_properties WHERE item_stable_id={item_id}\")\n item_properties = {}\n for item_property in cursor.fetchall():\n item_properties[item_property[0]] = item_property[1]\n return item_properties\n except sqlite3.OperationalError:\n return {}"
},
{
"identifier": "get_target_stable_id",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_target_stable_id(profile_path, shortcut_stable_id):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(f\"SELECT target_stable_id FROM shortcut_details \"\n f\"WHERE shortcut_stable_id={shortcut_stable_id}\")\n shortcut_stable_id = cursor.fetchone()\n if shortcut_stable_id:\n return int(shortcut_stable_id[0])\n return 0\n except sqlite3.OperationalError:\n return 0"
},
{
"identifier": "get_connected_devices",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_connected_devices(drivefs_path):\n try:\n with sqlite3.connect(os.path.join(drivefs_path, \"root_preference_sqlite.db\")) as root_preference_db:\n cursor = root_preference_db.cursor()\n cursor.execute(\"SELECT media_id, name, last_mount_point, capacity, ignored FROM media\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []"
},
{
"identifier": "get_parent_relationships",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_parent_relationships(profile_path):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(\n \"SELECT parent_stable_id, item_stable_id FROM stable_parents ORDER BY parent_stable_id, item_stable_id\"\n )\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []"
},
{
"identifier": "get_content_caches_paths",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_content_caches_paths(content_cache_dir):\n content_caches_paths = {}\n\n for root, _, content_caches in os.walk(content_cache_dir):\n for content_cache in content_caches:\n content_caches_paths[content_cache] = os.path.abspath(os.path.join(root, content_cache))\n del(content_caches_paths['chunks.db'])\n\n return content_caches_paths"
},
{
"identifier": "get_file_content_cache_path",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_file_content_cache_path(content_entry, content_caches_paths):\n if content_entry:\n parsed_content_entry = parse_protobuf(content_entry)\n content_entry_filename = str(parsed_content_entry['1'])\n return content_caches_paths.get(content_entry_filename, '')\n return ''"
},
{
"identifier": "get_shared_with_me_without_link",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_shared_with_me_without_link(profile_path):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(\"SELECT is_folder, stable_id, id, local_title, mime_type, is_owner, file_size, modified_date\"\n \", viewed_by_me_date, trashed, proto FROM items \"\n \"LEFT JOIN stable_parents ON items.stable_id = stable_parents.item_stable_id \"\n \"LEFT JOIN shortcut_details ON items.stable_id = shortcut_details.target_stable_id \"\n \"WHERE items.is_owner=0 AND items.shared_with_me_date=1 \"\n \"AND stable_parents.item_stable_id IS NULL \"\n \"AND shortcut_details.target_stable_id IS NULL \"\n \"ORDER BY items.stable_id\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []"
},
{
"identifier": "get_mirroring_roots_for_account",
"path": "drivefs_sleuth/utils.py",
"snippet": "def get_mirroring_roots_for_account(drivefs_path, account_id):\n try:\n with sqlite3.connect(os.path.join(drivefs_path, \"root_preference_sqlite.db\")) as root_preference_db:\n cursor = root_preference_db.cursor()\n cursor.execute(\"SELECT account_token, root_id, media_id, title, root_path, sync_type, destination, \"\n f\"last_seen_absolute_path FROM roots WHERE account_token=\\\"{account_id}\\\"\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []"
},
{
"identifier": "File",
"path": "drivefs_sleuth/synced_files_tree.py",
"snippet": "class File(Item):\n def __init__(self, stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date, viewed_by_me_date,\n trashed, properties, tree_path, content_cache_path, proto):\n super().__init__(stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date,\n viewed_by_me_date, trashed, properties, tree_path, proto)\n\n self.__content_cache_path = content_cache_path\n self.__file_type = parse_protobuf(proto).get('45', '')\n\n def get_content_cache_path(self):\n return self.__content_cache_path\n\n def get_file_type(self):\n return self.__file_type"
},
{
"identifier": "Link",
"path": "drivefs_sleuth/synced_files_tree.py",
"snippet": "class Link(Item):\n def __init__(self, stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date, viewed_by_me_date,\n trashed, properties, tree_path, target_item, proto):\n super().__init__(stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date,\n viewed_by_me_date, trashed, properties, tree_path, proto)\n self.__target_item = target_item\n\n def get_target_item(self):\n return self.__target_item"
},
{
"identifier": "Directory",
"path": "drivefs_sleuth/synced_files_tree.py",
"snippet": "class Directory(Item):\n def __init__(self, stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date, viewed_by_me_date,\n trashed, properties, tree_path, proto):\n super().__init__(stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date,\n viewed_by_me_date, trashed, properties, tree_path, proto)\n self.__sub_items = []\n\n def add_item(self, item):\n self.__sub_items.append(item)\n\n def remove_item(self, stable_id):\n for item in self.__sub_items:\n if item.get_stable_id() == stable_id:\n self.__sub_items.remove(item)\n\n def get_sub_items(self):\n return self.__sub_items"
},
{
"identifier": "DummyItem",
"path": "drivefs_sleuth/synced_files_tree.py",
"snippet": "class DummyItem(Item):\n def __init__(self, stable_id):\n super().__init__(stable_id, '', 'DELETED_ITEM', '', '', '', '', '', '', '', '', '')\n\n def get_sub_items(self):\n return []"
},
{
"identifier": "MirrorItem",
"path": "drivefs_sleuth/synced_files_tree.py",
"snippet": "class MirrorItem:\n def __init__(self, local_stable_id, stable_id, volume, parent, local_filename, cloud_filename, local_mtime,\n cloud_mtime, local_md5, cloud_md5, local_size, cloud_size, local_version, cloud_version, shared,\n read_only, is_root):\n self.local_stable_id = local_stable_id\n self.stable_id = stable_id\n self.volume = volume\n self.parent = parent\n self.local_filename = local_filename\n self.cloud_filename = cloud_filename\n self.local_mtime = local_mtime\n self.cloud_mtime = cloud_mtime\n self.local_md5 = local_md5\n self.cloud_md5 = cloud_md5\n self.local_size = local_size\n self.cloud_size = cloud_size\n self.local_version = local_version\n self.cloud_version = cloud_version\n self.shared = shared\n self.read_only = read_only\n self.is_root = is_root\n\n def get_local_mtime_utc(self):\n return datetime.datetime.fromtimestamp(int(self.local_mtime)/1000.0, datetime.timezone.utc)\n\n def get_cloud_mtime_utc(self):\n return datetime.datetime.fromtimestamp(int(self.cloud_mtime)/1000.0, datetime.timezone.utc)"
},
{
"identifier": "SyncedFilesTree",
"path": "drivefs_sleuth/synced_files_tree.py",
"snippet": "class SyncedFilesTree:\n def __init__(self, root):\n self.__root = root\n self.__orphan_items = []\n self.__shared_with_me = []\n self.__recovered_deleted_items = []\n self.__deleted_items = []\n self.__mirror_items = []\n self.__recoverable_items_from_cache = []\n\n def get_root(self):\n return self.__root\n\n def get_orphan_items(self):\n return self.__orphan_items\n\n def add_orphan_item(self, item):\n self.__orphan_items.append(item)\n\n def add_deleted_item(self, stable_id):\n self.__deleted_items.append(stable_id)\n\n def add_recovered_deleted_item(self, item):\n self.__recovered_deleted_items.append(item)\n\n def add_shared_with_me_item(self, item):\n self.__shared_with_me.append(item)\n\n def get_shared_with_me_items(self):\n return self.__shared_with_me\n\n def get_deleted_items(self):\n return self.__deleted_items\n\n def get_recovered_deleted_items(self):\n return self.__recovered_deleted_items\n\n def get_item_by_id(self, target_id, is_owner=False):\n if not is_owner:\n queue = [self.get_root()] + self.get_orphan_items() + self.get_shared_with_me_items()\n else:\n queue = [self.get_root()]\n\n while queue:\n current_item = queue.pop(0)\n\n if current_item.get_stable_id() == target_id:\n return current_item\n\n if current_item.is_file():\n continue\n\n elif current_item.is_dir():\n queue += current_item.get_sub_items()\n\n elif current_item.is_link():\n queue += current_item.get_target_item()\n\n return None\n\n def search_item_by_name(self, filenames=None, regex=None, contains=True, list_sub_items=True):\n if filenames is None:\n filenames = []\n if regex is None:\n regex = []\n items = []\n\n def append_item_childs(item):\n items.append(item)\n if isinstance(item, File):\n return\n\n elif isinstance(item, Link):\n target = item.get_target_item()\n if isinstance(item, File):\n append_item_childs(target)\n else:\n for sub_item in target.get_sub_items():\n append_item_childs(sub_item)\n\n elif isinstance(item, Directory):\n for sub_item in item.get_sub_items():\n append_item_childs(sub_item)\n\n else:\n for sub_item in item:\n append_item_childs(sub_item)\n\n def search(current_item):\n hit = False\n if regex:\n for exp in regex:\n match = re.search(exp, current_item.local_title)\n if match:\n items.append(current_item)\n hit = True\n\n if contains:\n for filename in filenames:\n if filename.lower() in current_item.local_title.lower():\n items.append(current_item)\n hit = True\n else:\n for filename in filenames:\n if filename.lower() == current_item.local_title.lower():\n items.append(current_item)\n hit = True\n\n if isinstance(current_item, File):\n return\n\n elif isinstance(current_item, Link) and hit and list_sub_items:\n target = current_item.get_target_item()\n if isinstance(target, File):\n append_item_childs(target)\n else:\n for sub_item in target.get_sub_items():\n append_item_childs(sub_item)\n\n elif isinstance(current_item, Directory) and hit and list_sub_items:\n for sub_item in current_item.get_sub_items():\n append_item_childs(sub_item)\n\n else:\n if isinstance(current_item, Link):\n target = current_item.get_target_item()\n if isinstance(target, File):\n search(target)\n else:\n for sub_item in target.get_sub_items():\n search(sub_item)\n else:\n for sub_item in current_item.get_sub_items():\n search(sub_item)\n\n search(self.get_root())\n for orphan_item in self.get_orphan_items():\n search(orphan_item)\n\n for shared_item in self.get_shared_with_me_items():\n search(shared_item)\n\n for recovered_deleted_item in self.get_recovered_deleted_items():\n search(recovered_deleted_item)\n\n return items\n\n def add_mirrored_item(self, mirrored_item):\n self.__mirror_items.append(mirrored_item)\n\n def get_mirrored_items(self):\n return self.__mirror_items\n\n def add_recoverable_item_from_cache(self, recoverable_from_cache_item):\n self.__recoverable_items_from_cache.append(recoverable_from_cache_item)\n\n def get_recoverable_items_from_cache(self):\n return self.__recoverable_items_from_cache\n\n def print_synced_files_tree(self):\n print('\\n----------Synced Items----------\\n')\n\n _print_tree([self.get_root()] + self.get_orphan_items())\n\n print('\\n----------Deleted Items----------\\n')\n\n for recovered_deleted_items in self.__recovered_deleted_items:\n print(f'- ({recovered_deleted_items.get_stable_id()}) {recovered_deleted_items.local_title}')\n\n for deleted_item in self.__deleted_items:\n print(f'- {deleted_item}')\n\n print('\\n----------Orphan Items----------\\n')\n\n for orphan in self.get_orphan_items():\n print(f'- ({orphan.get_stable_id()}) {orphan.local_title}')\n\n print('\\n----------Shared With Me Items----------\\n')\n\n for shared_with_me_item in self.get_shared_with_me_items():\n print(f'- ({shared_with_me_item.get_stable_id()}) {shared_with_me_item.local_title}')"
},
{
"identifier": "get_accounts",
"path": "drivefs_sleuth/tasks.py",
"snippet": "def get_accounts(drivefs_path):\n accounts = {}\n experiments_ids = get_experiment_account_ids(drivefs_path)\n profiles = get_available_profiles(drivefs_path)\n available_accounts = set(experiments_ids + profiles)\n for account_id in available_accounts:\n accounts[account_id] = {\n 'email': lookup_account_id(drivefs_path, account_id)\n }\n logged_in = account_id in profiles\n accounts[account_id]['logged_in'] = logged_in\n accounts[account_id]['properties'] = get_account_properties(os.path.join(drivefs_path, account_id))\n return accounts"
}
] | import os.path
import datetime
from enum import Enum
from collections import OrderedDict
from drivefs_sleuth.utils import get_last_pid
from drivefs_sleuth.utils import get_item_info
from drivefs_sleuth.utils import get_last_sync
from drivefs_sleuth.utils import parse_protobuf
from drivefs_sleuth.utils import get_max_root_ids
from drivefs_sleuth.utils import get_deleted_items
from drivefs_sleuth.utils import get_mirrored_items
from drivefs_sleuth.utils import get_item_properties
from drivefs_sleuth.utils import get_target_stable_id
from drivefs_sleuth.utils import get_connected_devices
from drivefs_sleuth.utils import get_parent_relationships
from drivefs_sleuth.utils import get_content_caches_paths
from drivefs_sleuth.utils import get_file_content_cache_path
from drivefs_sleuth.utils import get_shared_with_me_without_link
from drivefs_sleuth.utils import get_mirroring_roots_for_account
from drivefs_sleuth.synced_files_tree import File
from drivefs_sleuth.synced_files_tree import Link
from drivefs_sleuth.synced_files_tree import Directory
from drivefs_sleuth.synced_files_tree import DummyItem
from drivefs_sleuth.synced_files_tree import MirrorItem
from drivefs_sleuth.synced_files_tree import SyncedFilesTree
from drivefs_sleuth.tasks import get_accounts | 7,644 | child = orphan_dirs.get(child_id, None)
if child:
child.tree_path = f'{current_parent_dir.tree_path}\\{child.local_title}'
del orphan_dirs[child_id]
else:
child = Directory(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5],
child_info[6], child_info[7], child_info[8], child_info[9],
child_properties,
f'{current_parent_dir.tree_path}\\{child_info[3]}', child_info[10])
added_dirs[child_id] = child
current_parent_dir.add_item(child)
# TODO: check if I can add a link in the shared with me
for shared_with_me_item_info in get_shared_with_me_without_link(self.__profile_path):
shared_with_me_item_properties = get_item_properties(self.__profile_path, shared_with_me_item_info[1])
if shared_with_me_item_info[0] == 0:
content_cache_path = get_file_content_cache_path(
shared_with_me_item_properties.get('content-entry', None), content_caches_paths)
shared_with_me_file = File(shared_with_me_item_info[1], shared_with_me_item_info[2],
shared_with_me_item_info[3], shared_with_me_item_info[4],
shared_with_me_item_info[5], shared_with_me_item_info[6],
shared_with_me_item_info[7], shared_with_me_item_info[8],
shared_with_me_item_info[9], shared_with_me_item_properties,
f'Shared with me\\{shared_with_me_item_info[3]}', content_cache_path,
shared_with_me_item_info[10])
self.__synced_files_tree.add_shared_with_me_item(shared_with_me_file)
if shared_with_me_file:
self.__synced_files_tree.add_recoverable_item_from_cache(shared_with_me_file)
else:
shared_with_me_item = orphan_dirs.get(shared_with_me_item_info[1], None)
if shared_with_me_item:
del orphan_dirs[shared_with_me_item_info[1]]
else:
shared_with_me_item = Directory(shared_with_me_item_info[1], shared_with_me_item_info[2],
shared_with_me_item_info[3], shared_with_me_item_info[4],
shared_with_me_item_info[5], shared_with_me_item_info[6],
shared_with_me_item_info[7], shared_with_me_item_info[8],
shared_with_me_item_info[9], shared_with_me_item_properties,
f'{current_parent_dir.tree_path}\\{shared_with_me_item_info[3]}',
shared_with_me_item_info[10])
self.__synced_files_tree.add_shared_with_me_item(shared_with_me_item)
for orphan_id, orphan_dir in orphan_dirs.items():
self.__synced_files_tree.add_orphan_item(orphan_dir)
mirrored_items = get_mirrored_items(self.__profile_path)
for item in mirrored_items:
self.__synced_files_tree.add_mirrored_item(
MirrorItem(item[0], item[1], item[2], item[3], item[4], item[5], item[6], item[7], item[8], item[9],
item[10], item[11], item[12], item[13], item[14], item[15], item[16]
)
)
for deleted_item in get_deleted_items(self.__profile_path):
parsed_buf = parse_protobuf(deleted_item[1])
properties = {}
for index, props in parsed_buf.items():
if index == '55' or index.startswith('55-'):
for prop in props:
if isinstance(prop, dict):
properties[prop['1']] = prop[[key for key in prop.keys() if key != '1'][0]]
elif isinstance(prop, list):
for p in prop:
properties[p['1']] = p[[key for key in p.keys() if key != '1'][0]]
if parsed_buf['4'] == 'application/vnd.google-apps.folder':
self.__synced_files_tree.add_recovered_deleted_item(
Directory(deleted_item[0], parsed_buf.get('1', ''), parsed_buf.get('3', ''),
parsed_buf.get('4', ''), parsed_buf.get('63', 0), parsed_buf.get('14', 0),
parsed_buf.get('11', 0), parsed_buf.get('13', 0), parsed_buf.get('7', 1),
properties, parsed_buf.get('3', ''), deleted_item[1])
)
elif parsed_buf['4'] == 'application/vnd.google-apps.shortcut':
target_item = None
target_info = parsed_buf.get('132', None)
if target_info:
target_item = self.__synced_files_tree.get_item_by_id(target_info['2'])
self.__synced_files_tree.add_recovered_deleted_item(
Link(deleted_item[0], parsed_buf.get('1', ''), parsed_buf.get('3', ''), parsed_buf.get('4', ''),
parsed_buf.get('63', 0), parsed_buf.get('14', 0), parsed_buf.get('11', 0),
parsed_buf.get('13', 0), parsed_buf.get('7', 1), properties, parsed_buf.get('3', ''),
target_item, deleted_item[1])
)
else:
content_cache_path = get_file_content_cache_path(
properties.get('content-entry', None), content_caches_paths)
recovered_file = File(deleted_item[0], parsed_buf.get('1', ''), parsed_buf.get('3', ''),
parsed_buf.get('4', ''), parsed_buf.get('63', 0), parsed_buf.get('14', 0),
parsed_buf.get('11', 0), parsed_buf.get('13', 0), parsed_buf.get('7', 1),
properties, parsed_buf.get('3', ''), content_cache_path, deleted_item[1])
self.__synced_files_tree.add_recovered_deleted_item(recovered_file)
if content_cache_path:
self.__synced_files_tree.add_recoverable_item_from_cache(recovered_file)
class Setup:
def __init__(self, drivefs_path, accounts=None):
self.__drivefs_path = drivefs_path
self.__last_sync_date = datetime.datetime.fromtimestamp(get_last_sync(drivefs_path), datetime.timezone.utc)
self.__max_root_ids = get_max_root_ids(drivefs_path)
self.__last_pid = get_last_pid(drivefs_path)
self.__connected_devices = []
for connected_device in get_connected_devices(drivefs_path):
device = {
"media_id": connected_device[0],
"name": connected_device[1],
"last_mount_point": connected_device[2],
"ignore": connected_device[4],
}
if int(connected_device[3]) == -1:
device["capacity"] = connected_device[3]
else:
device["capacity"] = round(int(connected_device[3]) / 1e+9, 2)
self.__connected_devices.append(device)
if not accounts:
accounts = []
self.__accounts = []
|
class StorageDestinations(Enum):
DRIVE = "DRIVE"
PHOTOS = "PHOTOS"
class Account:
def __init__(self, drivefs_path, account_id, email, is_logged_in, mirroring_roots, properties):
self.__profile_path = os.path.join(drivefs_path, account_id)
self.__account_id = account_id
self.__account_email = email
self.__is_logged_in = is_logged_in
self.__synced_files_tree = None
if is_logged_in:
self._construct_synced_files_trees()
self.__mirroring_roots = []
for mirroring_root in mirroring_roots:
mirroring_root_info = {
'root_id': mirroring_root[1],
'media_id': mirroring_root[2],
'title': mirroring_root[3],
'root_path': mirroring_root[4],
'sync_type': mirroring_root[5],
'last_seen_absolute_path': mirroring_root[7],
}
if mirroring_root[6] == 1:
mirroring_root_info['destination'] = StorageDestinations.DRIVE.value
else:
mirroring_root_info['destination'] = StorageDestinations.PHOTOS.value
self.__mirroring_roots.append(mirroring_root_info)
self.__name = properties['name']
self.__photo_url = properties['photo_url']
def get_profile_path(self):
return self.__profile_path
def get_account_id(self):
return self.__account_id
def get_account_email(self):
return self.__account_email
def is_logged_in(self):
return self.__is_logged_in
def get_synced_files_tree(self):
return self.__synced_files_tree
def get_mirroring_roots(self):
return self.__mirroring_roots
def get_name(self):
return self.__name
def get_photo_url(self):
return self.__photo_url
def _construct_synced_files_trees(self):
parent_relationships = get_parent_relationships(self.__profile_path)
root_info = get_item_info(self.__profile_path, parent_relationships[0][0])
root = Directory(root_info[1], root_info[2], root_info[3], root_info[4], root_info[5], root_info[6],
root_info[7], root_info[8], root_info[9],
get_item_properties(self.__profile_path, root_info[1]), root_info[3], root_info[10])
self.__synced_files_tree = SyncedFilesTree(root)
content_caches_paths = get_content_caches_paths(os.path.join(self.__profile_path, 'content_cache'))
parent_relationships_dict = OrderedDict()
for parent, child in parent_relationships:
if parent not in parent_relationships_dict.keys():
parent_relationships_dict[parent] = []
parent_relationships_dict[parent].append(child)
added_dirs = {self.__synced_files_tree.get_root().get_stable_id(): self.__synced_files_tree.get_root()}
orphan_dirs = {}
current_parent_dir = self.__synced_files_tree.get_root()
for parent_id, childs_ids in parent_relationships_dict.items():
if parent_id != current_parent_dir.get_stable_id():
if parent_id in added_dirs:
current_parent_dir = added_dirs[parent_id]
elif parent_id in orphan_dirs:
current_parent_dir = orphan_dirs[parent_id]
else:
parent_info = get_item_info(self.__profile_path, parent_id)
if not parent_info:
self.__synced_files_tree.add_deleted_item(DummyItem(parent_id))
else:
current_parent_dir = Directory(parent_info[1], parent_info[2], parent_info[3], parent_info[4],
parent_info[5], parent_info[6], parent_info[7], parent_info[8],
parent_info[9], get_item_properties(self.__profile_path,
parent_id), parent_info[3],
parent_info[10])
orphan_dirs[parent_id] = current_parent_dir
for child_id in childs_ids:
child_info = get_item_info(self.__profile_path, child_id)
child_properties = get_item_properties(self.__profile_path, child_id)
if not child_info:
self.__synced_files_tree.add_deleted_item(DummyItem(child_id))
continue
if child_info[0] == 0:
content_cache_path = get_file_content_cache_path(
child_properties.get('content-entry', None), content_caches_paths)
child_file = File(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5],
child_info[6], child_info[7], child_info[8], child_info[9], child_properties,
f'{current_parent_dir.tree_path}\\{child_info[3]}', content_cache_path,
child_info[10])
current_parent_dir.add_item(child_file)
if content_cache_path:
self.__synced_files_tree.add_recoverable_item_from_cache(child_file)
else:
if child_info[4] == 'application/vnd.google-apps.shortcut':
target_stable_id = get_target_stable_id(self.__profile_path, child_info[1])
if target_stable_id:
target = orphan_dirs.get(target_stable_id, None)
if target:
added_dirs[target_stable_id] = target
del orphan_dirs[target_stable_id]
else:
target_info = get_item_info(self.__profile_path, target_stable_id)
if target_info:
if target_info[0] == 0:
content_cache_path = get_file_content_cache_path(
child_properties.get('content-entry', None), content_caches_paths)
target = File(target_info[1], target_info[2], target_info[3], target_info[4],
target_info[5], target_info[6], target_info[7], target_info[8],
target_info[9],
get_item_properties(self.__profile_path, target_info[1]),
f'{current_parent_dir.tree_path}\\{target_info[3]}',
content_cache_path, target_info[10])
else:
target = Directory(target_info[1], target_info[2], target_info[3],
target_info[4], target_info[5], target_info[6],
target_info[7], target_info[8], target_info[9],
get_item_properties(self.__profile_path, target_info[1]),
f'{current_parent_dir.tree_path}\\{target_info[3]}',
target_info[10])
added_dirs[target_stable_id] = target
else:
target = DummyItem(target_stable_id)
self.__synced_files_tree.add_deleted_item(target)
child = Link(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5],
child_info[6], child_info[7], child_info[8], child_info[9], child_properties,
f'{current_parent_dir.tree_path}\\{child_info[3]}', target, child_info[10])
else:
target = DummyItem('-1')
child = Link(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5],
child_info[6], child_info[7], child_info[8], child_info[9], child_properties,
f'{current_parent_dir.tree_path}\\{child_info[3]}', target, child_info[10])
else:
child = orphan_dirs.get(child_id, None)
if child:
child.tree_path = f'{current_parent_dir.tree_path}\\{child.local_title}'
del orphan_dirs[child_id]
else:
child = Directory(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5],
child_info[6], child_info[7], child_info[8], child_info[9],
child_properties,
f'{current_parent_dir.tree_path}\\{child_info[3]}', child_info[10])
added_dirs[child_id] = child
current_parent_dir.add_item(child)
# TODO: check if I can add a link in the shared with me
for shared_with_me_item_info in get_shared_with_me_without_link(self.__profile_path):
shared_with_me_item_properties = get_item_properties(self.__profile_path, shared_with_me_item_info[1])
if shared_with_me_item_info[0] == 0:
content_cache_path = get_file_content_cache_path(
shared_with_me_item_properties.get('content-entry', None), content_caches_paths)
shared_with_me_file = File(shared_with_me_item_info[1], shared_with_me_item_info[2],
shared_with_me_item_info[3], shared_with_me_item_info[4],
shared_with_me_item_info[5], shared_with_me_item_info[6],
shared_with_me_item_info[7], shared_with_me_item_info[8],
shared_with_me_item_info[9], shared_with_me_item_properties,
f'Shared with me\\{shared_with_me_item_info[3]}', content_cache_path,
shared_with_me_item_info[10])
self.__synced_files_tree.add_shared_with_me_item(shared_with_me_file)
if shared_with_me_file:
self.__synced_files_tree.add_recoverable_item_from_cache(shared_with_me_file)
else:
shared_with_me_item = orphan_dirs.get(shared_with_me_item_info[1], None)
if shared_with_me_item:
del orphan_dirs[shared_with_me_item_info[1]]
else:
shared_with_me_item = Directory(shared_with_me_item_info[1], shared_with_me_item_info[2],
shared_with_me_item_info[3], shared_with_me_item_info[4],
shared_with_me_item_info[5], shared_with_me_item_info[6],
shared_with_me_item_info[7], shared_with_me_item_info[8],
shared_with_me_item_info[9], shared_with_me_item_properties,
f'{current_parent_dir.tree_path}\\{shared_with_me_item_info[3]}',
shared_with_me_item_info[10])
self.__synced_files_tree.add_shared_with_me_item(shared_with_me_item)
for orphan_id, orphan_dir in orphan_dirs.items():
self.__synced_files_tree.add_orphan_item(orphan_dir)
mirrored_items = get_mirrored_items(self.__profile_path)
for item in mirrored_items:
self.__synced_files_tree.add_mirrored_item(
MirrorItem(item[0], item[1], item[2], item[3], item[4], item[5], item[6], item[7], item[8], item[9],
item[10], item[11], item[12], item[13], item[14], item[15], item[16]
)
)
for deleted_item in get_deleted_items(self.__profile_path):
parsed_buf = parse_protobuf(deleted_item[1])
properties = {}
for index, props in parsed_buf.items():
if index == '55' or index.startswith('55-'):
for prop in props:
if isinstance(prop, dict):
properties[prop['1']] = prop[[key for key in prop.keys() if key != '1'][0]]
elif isinstance(prop, list):
for p in prop:
properties[p['1']] = p[[key for key in p.keys() if key != '1'][0]]
if parsed_buf['4'] == 'application/vnd.google-apps.folder':
self.__synced_files_tree.add_recovered_deleted_item(
Directory(deleted_item[0], parsed_buf.get('1', ''), parsed_buf.get('3', ''),
parsed_buf.get('4', ''), parsed_buf.get('63', 0), parsed_buf.get('14', 0),
parsed_buf.get('11', 0), parsed_buf.get('13', 0), parsed_buf.get('7', 1),
properties, parsed_buf.get('3', ''), deleted_item[1])
)
elif parsed_buf['4'] == 'application/vnd.google-apps.shortcut':
target_item = None
target_info = parsed_buf.get('132', None)
if target_info:
target_item = self.__synced_files_tree.get_item_by_id(target_info['2'])
self.__synced_files_tree.add_recovered_deleted_item(
Link(deleted_item[0], parsed_buf.get('1', ''), parsed_buf.get('3', ''), parsed_buf.get('4', ''),
parsed_buf.get('63', 0), parsed_buf.get('14', 0), parsed_buf.get('11', 0),
parsed_buf.get('13', 0), parsed_buf.get('7', 1), properties, parsed_buf.get('3', ''),
target_item, deleted_item[1])
)
else:
content_cache_path = get_file_content_cache_path(
properties.get('content-entry', None), content_caches_paths)
recovered_file = File(deleted_item[0], parsed_buf.get('1', ''), parsed_buf.get('3', ''),
parsed_buf.get('4', ''), parsed_buf.get('63', 0), parsed_buf.get('14', 0),
parsed_buf.get('11', 0), parsed_buf.get('13', 0), parsed_buf.get('7', 1),
properties, parsed_buf.get('3', ''), content_cache_path, deleted_item[1])
self.__synced_files_tree.add_recovered_deleted_item(recovered_file)
if content_cache_path:
self.__synced_files_tree.add_recoverable_item_from_cache(recovered_file)
class Setup:
def __init__(self, drivefs_path, accounts=None):
self.__drivefs_path = drivefs_path
self.__last_sync_date = datetime.datetime.fromtimestamp(get_last_sync(drivefs_path), datetime.timezone.utc)
self.__max_root_ids = get_max_root_ids(drivefs_path)
self.__last_pid = get_last_pid(drivefs_path)
self.__connected_devices = []
for connected_device in get_connected_devices(drivefs_path):
device = {
"media_id": connected_device[0],
"name": connected_device[1],
"last_mount_point": connected_device[2],
"ignore": connected_device[4],
}
if int(connected_device[3]) == -1:
device["capacity"] = connected_device[3]
else:
device["capacity"] = round(int(connected_device[3]) / 1e+9, 2)
self.__connected_devices.append(device)
if not accounts:
accounts = []
self.__accounts = [] | for account_id, account_info in get_accounts(drivefs_path).items(): | 21 | 2023-10-29 11:05:04+00:00 | 12k |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.