repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
jiangjiechen/auction-arena
auction_workflow.py
[ { "identifier": "Auctioneer", "path": "src/auctioneer_base.py", "snippet": "class Auctioneer(BaseModel):\n enable_discount: bool = False\n items: List[Item] = []\n cur_item: Item = None\n highest_bidder: Bidder = None\n highest_bid: int = -1\n bidding_history = defaultdict(list) # history about the bidding war of one item\n items_queue: List[Item] = [] # updates when a item is taken.\n auction_logs = defaultdict(list) # history about the bidding war of all items\n openai_cost = 0\n prev_round_max_bid: int = -1\n min_bid: int = 0\n fail_to_sell = False\n min_markup_pct = 0.1\n\n class Config:\n arbitrary_types_allowed = True\n \n def init_items(self, items: List[Item]):\n for item in items:\n # reset discounted price\n item.reset_price()\n self.items = items\n self.items_queue = items.copy()\n\n def summarize_items_info(self):\n desc = ''\n for item in self.items:\n desc += f\"- {item.get_desc()}\\n\"\n return desc.strip()\n \n def present_item(self):\n cur_item = self.items_queue.pop(0)\n self.cur_item = cur_item\n return cur_item\n \n def shuffle_items(self):\n random.shuffle(self.items)\n self.items_queue = self.items.copy()\n \n def record_bid(self, bid_info: dict, bid_round: int):\n '''\n Save the bidding history for each round, log the highest bidder and highest bidding\n '''\n # bid_info: {'bidder': xxx, 'bid': xxx, 'raw_msg': xxx}\n self.bidding_history[bid_round].append(bid_info)\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n if self.highest_bid < hist['bid']:\n self.highest_bid = hist['bid']\n self.highest_bidder = hist['bidder']\n elif self.highest_bid == hist['bid']:\n # random if there's a tie\n self.highest_bidder = random.choice([self.highest_bidder, hist['bidder']])\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append(\n {'bidder': bid_info['bidder'], \n 'bid': bid_info['bid'], \n 'bid_round': bid_round})\n\n def _biddings_to_string(self, bid_round: int):\n '''\n Return a string that summarizes the bidding history in a round\n '''\n # bid_hist_text = '' if bid_round == 0 else f'- {self.highest_bidder}: ${self.highest_bid}\\n'\n bid_hist_text = ''\n for js in self.bidding_history[bid_round]:\n if js['bid'] < 0:\n bid_hist_text += f\"- {js['bidder']} withdrew\\n\"\n else:\n bid_hist_text += f\"- {js['bidder']}: ${js['bid']}\\n\"\n return bid_hist_text.strip()\n \n def all_bidding_history_to_string(self):\n bid_hist_text = ''\n for bid_round in self.bidding_history:\n bid_hist_text += f\"Round {bid_round}:\\n{self._biddings_to_string(bid_round)}\\n\\n\"\n return bid_hist_text.strip()\n\n def ask_for_bid(self, bid_round: int):\n '''\n Ask for bid, return the message to be sent to bidders\n '''\n if self.highest_bidder is None:\n if bid_round > 0:\n msg = f\"Seeing as we've had no takers at the initial price, we're going to lower the starting bid to ${self.cur_item.price} for {self.cur_item.name} to spark some interest! Do I have any takers?\"\n else:\n remaining_items = [self.cur_item.name] + [item.name for item in self.items_queue]\n msg = f\"Attention, bidders! {len(remaining_items)} item(s) left, they are: {', '.join(remaining_items)}.\\n\\nNow, please bid on {self.cur_item}. The starting price for bidding for {self.cur_item} is ${self.cur_item.price}. Anyone interested in this item?\"\n else:\n bidding_history = self._biddings_to_string(bid_round - 1)\n msg = f\"Thank you! This is the {p.ordinal(bid_round)} round of bidding for this item:\\n{bidding_history}\\n\\nNow we have ${self.highest_bid} from {self.highest_bidder.name} for {self.cur_item.name}. The minimum increase over this highest bid is ${int(self.cur_item.price * self.min_markup_pct)}. Do I have any advance on ${self.highest_bid}?\"\n return msg\n \n def ask_for_rebid(self, fail_msg: str, bid_price: int):\n return f\"Your bid of ${bid_price} failed, because {fail_msg}: You must reconsider your bid.\"\n\n def get_hammer_msg(self):\n if self.highest_bidder is None:\n return f\"Since no one bid on {self.cur_item.name}, we'll move on to the next item.\"\n else:\n return f\"Sold! {self.cur_item} to {self.highest_bidder} at ${self.highest_bid}! The true value for {self.cur_item} is ${self.cur_item.true_value}.\"# Thus {self.highest_bidder}'s profit by winning this item is ${self.cur_item.true_value - self.highest_bid}.\"\n\n def check_hammer(self, bid_round: int):\n # check if the item is sold\n self.fail_to_sell = False\n num_bid = self._num_bids_in_round(bid_round)\n\n # highest_bidder has already been updated in record_bid().\n # so when num_bid == 0 & highest_bidder is None, it means no one bid on this item\n if self.highest_bidder is None:\n if num_bid == 0:\n # failed to sell, as there is no highest bidder\n self.fail_to_sell = True\n if self.enable_discount and bid_round < 3:\n # lower the starting price by 50%. discoutn only applies to the first 3 rounds\n self.cur_item.lower_price(0.5)\n is_sold = False\n else:\n is_sold = True\n else:\n # won't happen\n raise ValueError(f\"highest_bidder is None but num_bid is {num_bid}\")\n else:\n if self.prev_round_max_bid < 0 and num_bid == 1:\n # only one bidder in the first round \n is_sold = True\n else:\n self.prev_round_max_bid = self.highest_bid\n is_sold = self._num_bids_in_round(bid_round) == 0\n return is_sold\n \n def _num_bids_in_round(self, bid_round: int):\n # check if there is no bid in the current round\n cnt = 0\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n cnt += 1\n return cnt\n\n def hammer_fall(self):\n print(f'* Sold! {self.cur_item} (${self.cur_item.true_value}) goes to {self.highest_bidder} at ${self.highest_bid}.')\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append({\n 'bidder': self.highest_bidder, \n 'bid': f\"{self.highest_bid} (${self.cur_item.true_value})\", # no need for the first $, as it will be added in the self.log()\n 'bid_round': 'Hammer price (true value)'})\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n\n def end_auction(self):\n return len(self.items_queue) == 0\n \n def gather_all_status(self, bidders: List[Bidder]):\n status = {}\n for bidder in bidders:\n status[bidder.name] = {\n 'profit': bidder.profit, \n 'items_won': bidder.items_won\n }\n return status\n\n def parse_bid(self, text: str):\n prompt = PARSE_BID_INSTRUCTION.format(response=text)\n with get_openai_callback() as cb:\n llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n result = llm([HumanMessage(content=prompt)]).content\n self.openai_cost += cb.total_cost\n \n bid_number = re.findall(r'\\$?\\d+', result.replace(',', ''))\n # find number in the result\n if '-1' in result:\n return -1\n elif len(bid_number) > 0:\n return int(bid_number[-1].replace('$', ''))\n else:\n print('* Rebid:', text)\n return None\n\n def log(self, bidder_personal_reports: list = [], show_model_name=True):\n ''' example\n Apparatus H, starting at $1000.\n\n 1st bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): $1200\n Bidder 2 (gpt-3.5-turbo-16k-0613): $1100\n Bidder 3 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n \n 2nd bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 2 (gpt-3.5-turbo-16k-0613): Withdrawn\n \n Hammer price:\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n '''\n markdown_output = \"## Auction Log\\n\\n\"\n for i, (item, bids) in enumerate(self.auction_logs.items()):\n markdown_output += f\"### {i+1}. {item}\\n\\n\"\n cur_bid_round = -1\n for i, bid in enumerate(bids):\n if bid['bid_round'] != cur_bid_round:\n cur_bid_round = bid['bid_round']\n if isinstance(bid['bid_round'], int):\n markdown_output += f\"\\n#### {p.ordinal(bid['bid_round']+1)} bid:\\n\\n\"\n else:\n markdown_output += f\"\\n#### {bid['bid_round']}:\\n\\n\"\n bid_price = f\"${bid['bid']}\" if bid['bid'] != -1 else 'Withdrew'\n if isinstance(bid['bidder'], Bidder) or isinstance(bid['bidder'], HumanBidder):\n if show_model_name:\n markdown_output += f\"* {bid['bidder']} ({bid['bidder'].model_name}): {bid_price}\\n\"\n else:\n markdown_output += f\"* {bid['bidder']}: {bid_price}\\n\"\n else:\n markdown_output += f\"* None bid\\n\"\n markdown_output += \"\\n\"\n \n if len(bidder_personal_reports) != 0:\n markdown_output += f\"\\n## Personal Report\"\n for report in bidder_personal_reports:\n markdown_output += f\"\\n\\n{report}\"\n return markdown_output.strip()\n \n def finish_auction(self):\n self.auction_logs = defaultdict(list)\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.items_queue = []\n self.items = []\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n self.min_bid = 0" }, { "identifier": "Bidder", "path": "src/bidder_base.py", "snippet": "class Bidder(BaseModel):\n name: str\n model_name: str \n budget: int \n desire: str\n plan_strategy: str\n temperature: float = 0.7\n overestimate_percent: int = 10\n correct_belief: bool\n enable_learning: bool = False\n \n llm: BaseLanguageModel = None\n openai_cost = 0\n llm_token_count = 0\n \n verbose: bool = False\n auction_hash: str = ''\n\n system_message: str = ''\n original_budget: int = 0\n\n # working memory\n profit: int = 0\n cur_item_id = 0\n items: list = []\n dialogue_history: list = [] # for gradio UI display\n llm_prompt_history: list = [] # for tracking llm calling\n items_won = []\n bid_history: list = [] # history of the bidding of a single item\n plan_instruct: str = '' # instruction for planning\n cur_plan: str = '' # current plan\n status_quo: dict = {} # belief of budget and profit, self and others\n withdraw: bool = False # state of withdraw\n learnings: str = '' # learnings from previous biddings. If given, then use it to guide the rest of the auction.\n max_bid_cnt: int = 4 # Rule Bidder: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n rule_bid_cnt: int = 0 # Rule Bidder: count of bids on one item\n\n # belief tracking\n failed_bid_cnt: int = 0 # count of failed bids (overspending)\n total_bid_cnt: int = 0 # count of total bids\n self_belief_error_cnt: int = 0\n total_self_belief_cnt: int = 0\n other_belief_error_cnt: int = 0\n total_other_belief_cnt: int = 0\n \n engagement_count: int = 0\n budget_history = []\n profit_history = []\n budget_error_history = []\n profit_error_history = []\n win_bid_error_history = []\n engagement_history = defaultdict(int)\n all_bidders_status = {} # track others' profit\n changes_of_plan = []\n \n # not used\n input_box: str = None\n need_input = False\n semaphore = 0\n\n class Config:\n arbitrary_types_allowed = True\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.name\n \n @classmethod\n def create(cls, **data):\n instance = cls(**data)\n instance._post_init()\n return instance\n\n def _post_init(self):\n self.original_budget = self.budget\n self.system_message = SYSTEM_MESSAGE.format(\n name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n )\n self._parse_llm()\n self.dialogue_history += [\n SystemMessage(content=self.system_message), \n AIMessage(content='')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n\n def _parse_llm(self):\n if 'gpt-' in self.model_name:\n self.llm = ChatOpenAI(model=self.model_name, temperature=self.temperature, max_retries=30, request_timeout=1200)\n elif 'claude' in self.model_name:\n self.llm = ChatAnthropic(model=self.model_name, temperature=self.temperature, default_request_timeout=1200)\n elif 'bison' in self.model_name:\n self.llm = ChatGooglePalm(model_name=f'models/{self.model_name}', temperature=self.temperature)\n elif 'rule' in self.model_name or 'human' in self.model_name:\n self.llm = None\n else:\n raise NotImplementedError(self.model_name)\n \n # def _rotate_openai_org(self):\n # # use two organizations to avoid rate limit\n # if os.environ.get('OPENAI_ORGANIZATION_1') and os.environ.get('OPENAI_ORGANIZATION_2'):\n # return random.choice([os.environ.get('OPENAI_ORGANIZATION_1'), os.environ.get('OPENAI_ORGANIZATION_2')])\n # else:\n # return None\n \n def _run_llm_standalone(self, messages: list):\n \n with get_openai_callback() as cb:\n for i in range(6):\n try:\n input_token_num = self.llm.get_num_tokens_from_messages(messages)\n if 'claude' in self.model_name: # anthropic's claude\n result = self.llm(messages, max_tokens_to_sample=2048)\n elif 'bison' in self.model_name: # google's palm-2\n max_tokens = min(max(3900 - input_token_num, 192), 2048)\n if isinstance(self.llm, ChatVertexAI):\n result = self.llm(messages, max_output_tokens=max_tokens)\n else:\n result = self.llm(messages)\n elif 'gpt' in self.model_name: # openai\n if 'gpt-3.5-turbo' in self.model_name and '16k' not in self.model_name:\n max_tokens = max(3900 - input_token_num, 192)\n else:\n # gpt-4\n # self.llm.openai_organization = self._rotate_openai_org()\n max_tokens = max(8000 - input_token_num, 192)\n result = self.llm(messages, max_tokens=max_tokens)\n elif 'llama' in self.model_name.lower():\n raise NotImplementedError\n else:\n raise NotImplementedError\n break\n except:\n print(f'Retrying for {self.model_name} ({i+1}/6), wait for {2**(i+1)} sec...')\n time.sleep(2**(i+1))\n self.openai_cost += cb.total_cost\n self.llm_token_count = self.llm.get_num_tokens_from_messages(messages)\n return result.content\n\n def _get_estimated_value(self, item):\n value = item.true_value * (1 + self.overestimate_percent / 100)\n return int(value)\n \n def _get_cur_item(self, key=None):\n if self.cur_item_id < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id].__dict__[key]\n else:\n return self.items[self.cur_item_id]\n else:\n return 'no item left'\n \n def _get_next_item(self, key=None):\n if self.cur_item_id + 1 < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id + 1].__dict__[key]\n else:\n return self.items[self.cur_item_id + 1]\n else:\n return 'no item left'\n \n def _get_remaining_items(self, as_str=False):\n remain_items = self.items[self.cur_item_id + 1:]\n if as_str:\n return ', '.join([item.name for item in remain_items])\n else:\n return remain_items\n \n def _get_items_value_str(self, items: List[Item]):\n if not isinstance(items, list):\n items = [items]\n items_info = ''\n for i, item in enumerate(items):\n estimated_value = self._get_estimated_value(item)\n _info = f\"{i+1}. {item}, starting price is ${item.price}. Your estimated value for this item is ${estimated_value}.\\n\"\n items_info += _info\n return items_info.strip()\n \n # ********** Main Instructions and Functions ********** #\n \n def learn_from_prev_auction(self, past_learnings, past_auction_log):\n if not self.enable_learning or 'rule' in self.model_name or 'human' in self.model_name:\n return ''\n \n instruct_learn = INSTRUCT_LEARNING_TEMPLATE.format(\n past_auction_log=past_auction_log,\n past_learnings=past_learnings)\n\n result = self._run_llm_standalone([HumanMessage(content=instruct_learn)])\n self.dialogue_history += [\n HumanMessage(content=instruct_learn),\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in [HumanMessage(content=instruct_learn)]],\n 'result': result,\n 'tag': 'learn_0'\n })\n \n self.learnings = '\\n'.join(extract_numbered_list(result))\n if self.learnings != '':\n self.system_message += f\"\\n\\nHere are your key learning points and practical tips from a previous auction. You can use them to guide this auction:\\n```\\n{self.learnings}\\n```\"\n \n if self.verbose:\n print(f\"Learn from previous auction: {self.name} ({self.model_name}).\")\n return result\n\n def _choose_items(self, budget, items: List[Item]):\n '''\n Choose items within budget for rule bidders.\n Cheap ones first if maximize_items, expensive ones first if maximize_profit.\n '''\n sorted_items = sorted(items, key=lambda x: self._get_estimated_value(x), \n reverse=self.desire == 'maximize_profit')\n \n chosen_items = []\n i = 0\n while budget >= 0 and i < len(sorted_items):\n item = sorted_items[i]\n if item.price <= budget:\n chosen_items.append(item)\n budget -= item.price\n i += 1\n \n return chosen_items\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = INSTRUCT_PLAN_TEMPLATE.format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items), \n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n '''\n Plan for bidding with auctioneer's instruction and items information for customize estimated value.\n plan = plan(system_message, instruct_plan)\n '''\n if 'rule' in self.model_name: \n # self.cur_plan = ', '.join([x.name for x in self._choose_items(self.budget, self.items)])\n # self.dialogue_history += [\n # HumanMessage(content=plan_instruct),\n # AIMessage(content=self.cur_plan),\n # ]\n # return self.cur_plan\n return ''\n\n self.status_quo = {\n 'remaining_budget': self.budget,\n 'total_profits': {bidder: 0 for bidder in self.all_bidders_status.keys()},\n 'winning_bids': {bidder: {} for bidder in self.all_bidders_status.keys()},\n }\n\n if self.plan_strategy == 'none':\n self.plan_instruct = ''\n self.cur_plan = ''\n return None\n\n system_msg = SystemMessage(content=self.system_message)\n plan_msg = HumanMessage(content=plan_instruct)\n messages = [system_msg, plan_msg]\n result = self._run_llm_standalone(messages)\n \n if self.verbose:\n print(get_colored_text(plan_msg.content, 'red'))\n print(get_colored_text(result, 'green'))\n \n self.dialogue_history += [\n plan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': 'plan_0'\n })\n self.cur_plan = result\n self.plan_instruct = plan_instruct\n \n self.changes_of_plan.append([\n f\"{self.cur_item_id} (Initial)\", \n False, \n json.dumps(extract_jsons_from_text(result)[-1]),\n ])\n \n if self.verbose:\n print(f\"Plan: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n return result\n \n def get_rebid_instruct(self, auctioneer_msg: str):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg),\n AIMessage(content='')\n ]\n return auctioneer_msg\n\n def get_bid_instruct(self, auctioneer_msg: str, bid_round: int):\n auctioneer_msg = auctioneer_msg.replace(self.name, f'You ({self.name})')\n \n bid_instruct = INSTRUCT_BID_TEMPLATE.format(\n auctioneer_msg=auctioneer_msg, \n bidder_name=self.name,\n cur_item=self._get_cur_item(),\n estimated_value=self._get_estimated_value(self._get_cur_item()),\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n if bid_round == 0:\n if self.plan_strategy in ['static', 'none']:\n # if static planner, then no replanning is needed. status quo is updated in replanning. thus need to add status quo in bid instruct.\n bid_instruct = f\"\"\"The status quo of this auction so far is:\\n\"{json.dumps(self.status_quo, indent=4)}\"\\n\\n{bid_instruct}\\n---\\n\"\"\"\n else:\n bid_instruct = f'Now, the auctioneer says: \"{auctioneer_msg}\"'\n \n self.dialogue_history += [\n HumanMessage(content=bid_instruct),\n AIMessage(content='')\n ]\n return bid_instruct\n \n def bid_rule(self, cur_bid: int, min_markup_pct: float = 0.1):\n '''\n :param cur_bid: current highest bid\n :param min_markup_pct: minimum percentage for bid increase\n :param max_bid_cnt: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n '''\n # dialogue history already got bid_instruction.\n cur_item = self._get_cur_item()\n \n if cur_bid <= 0:\n next_bid = cur_item.price\n else:\n next_bid = cur_bid + min_markup_pct * cur_item.price\n \n if self.budget - next_bid >= 0 and self.rule_bid_cnt < self.max_bid_cnt:\n msg = int(next_bid)\n self.rule_bid_cnt += 1\n else:\n msg = -1\n \n content = f'The current highest bid for {cur_item.name} is ${cur_bid}. '\n content += \"I'm out!\" if msg < 0 else f\"I bid ${msg}! (Rule generated)\"\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=content)\n ]\n \n return msg\n \n def bid(self, bid_instruct):\n '''\n Bid for an item with auctioneer's instruction and bidding history.\n bid_history = bid(system_message, instruct_plan, plan, bid_history)\n '''\n if self.model_name == 'rule':\n return ''\n \n bid_msg = HumanMessage(content=bid_instruct)\n \n if self.plan_strategy == 'none':\n messages = [SystemMessage(content=self.system_message)]\n else:\n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n \n self.bid_history += [bid_msg]\n messages += self.bid_history\n \n result = self._run_llm_standalone(messages)\n \n self.bid_history += [AIMessage(content=result)]\n\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=result)\n ]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'bid_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(bid_instruct, 'yellow'))\n print(get_colored_text(result, 'green'))\n \n print(f\"Bid: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n self.total_bid_cnt += 1\n \n return result\n\n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct = INSTRUCT_SUMMARIZE_TEMPLATE.format(\n cur_item=self._get_cur_item(), \n bidding_history=bidding_history, \n hammer_msg=hammer_msg.strip(), \n win_lose_msg=win_lose_msg.strip(), \n bidder_name=self.name,\n prev_status=self._status_json_to_text(self.status_quo),\n )\n return instruct\n\n def summarize(self, instruct_summarize: str):\n '''\n Update belief/status quo\n status_quo = summarize(system_message, bid_history, prev_status + instruct_summarize)\n '''\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n \n if self.model_name == 'rule': \n self.rule_bid_cnt = 0 # reset bid count for rule bidder\n return ''\n \n messages = [SystemMessage(content=self.system_message)]\n # messages += self.bid_history\n summ_msg = HumanMessage(content=instruct_summarize)\n messages.append(summ_msg)\n\n status_quo_text = self._run_llm_standalone(messages)\n \n self.dialogue_history += [summ_msg, AIMessage(content=status_quo_text)]\n self.bid_history += [summ_msg, AIMessage(content=status_quo_text)]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': status_quo_text,\n 'tag': f'summarize_{self.cur_item_id}'\n })\n\n cnt = 0\n while cnt <= 3:\n sanity_msg = self._sanity_check_status_json(extract_jsons_from_text(status_quo_text)[-1])\n if sanity_msg == '':\n # pass sanity check then track beliefs\n consistency_msg = self._belief_tracking(status_quo_text)\n else:\n sanity_msg = f'- {sanity_msg}'\n consistency_msg = ''\n \n if sanity_msg != '' or (consistency_msg != '' and self.correct_belief):\n err_msg = f\"As {self.name}, here are some error(s) of your summary of the status JSON:\\n{sanity_msg.strip()}\\n{consistency_msg.strip()}\\n\\nPlease revise the status JSON based on the errors. Don't apologize. Just give me the revised status JSON.\".strip()\n \n # print(f\"{self.name}: revising status quo for the {cnt} time:\")\n # print(get_colored_text(err_msg, 'green'))\n # print(get_colored_text(status_quo_text, 'red'))\n \n messages += [AIMessage(content=status_quo_text), \n HumanMessage(content=err_msg)]\n status_quo_text = self._run_llm_standalone(messages)\n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=status_quo_text),\n ]\n cnt += 1\n else:\n break\n \n self.status_quo = extract_jsons_from_text(status_quo_text)[-1]\n\n if self.verbose:\n print(get_colored_text(instruct_summarize, 'blue'))\n print(get_colored_text(status_quo_text, 'green'))\n \n print(f\"Summarize: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n \n return status_quo_text\n \n def get_replan_instruct(self):\n instruct = INSTRUCT_REPLAN_TEMPLATE.format(\n status_quo=self._status_json_to_text(self.status_quo),\n remaining_items_info=self._get_items_value_str(self._get_remaining_items()),\n bidder_name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return instruct\n\n def replan(self, instruct_replan: str):\n '''\n plan = replan(system_message, instruct_plan, prev_plan, status_quo + (learning) + instruct_replan)\n '''\n if self.model_name == 'rule': \n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n if self.plan_strategy in ['none', 'static']:\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n self.withdraw = False\n return 'Skip replanning for bidders with static or no plan.'\n \n replan_msg = HumanMessage(content=instruct_replan)\n \n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n messages.append(replan_msg)\n\n result = self._run_llm_standalone(messages)\n \n new_plan_dict = extract_jsons_from_text(result)[-1]\n cnt = 0\n while len(new_plan_dict) == 0 and cnt < 2:\n err_msg = 'Your response does not contain a JSON-format priority list for items. Please revise your plan.'\n messages += [\n AIMessage(content=result),\n HumanMessage(content=err_msg),\n ]\n result = self._run_llm_standalone(messages)\n new_plan_dict = extract_jsons_from_text(result)[-1]\n \n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=result),\n ]\n cnt += 1\n \n old_plan_dict = extract_jsons_from_text(self.cur_plan)[-1]\n self.changes_of_plan.append([\n f\"{self.cur_item_id + 1} ({self._get_cur_item('name')})\", \n self._change_of_plan(old_plan_dict, new_plan_dict),\n json.dumps(new_plan_dict)\n ])\n \n self.plan_instruct = instruct_replan\n self.cur_plan = result\n self.withdraw = False\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n\n self.dialogue_history += [\n replan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'plan_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(instruct_replan, 'blue'))\n print(get_colored_text(result, 'green'))\n\n print(f\"Replan: {self.name} ({self.model_name}).\")\n return result\n \n def _change_of_plan(self, old_plan: dict, new_plan: dict):\n for k in new_plan:\n if new_plan[k] != old_plan.get(k, None):\n return True\n return False\n \n # *********** Belief Tracking and Sanity Check *********** #\n \n def bid_sanity_check(self, bid_price, prev_round_max_bid, min_markup_pct):\n # can't bid more than budget or less than previous highest bid\n if bid_price < 0:\n msg = None\n else:\n min_bid_increase = int(min_markup_pct * self._get_cur_item('price'))\n if bid_price > self.budget:\n msg = f\"you don't have insufficient budget (${self.budget} left)\"\n elif bid_price < self._get_cur_item('price'):\n msg = f\"your bid is lower than the starting bid (${self._get_cur_item('price')})\"\n elif bid_price < prev_round_max_bid + min_bid_increase:\n msg = f\"you must advance previous highest bid (${prev_round_max_bid}) by at least ${min_bid_increase} ({int(100 * min_markup_pct)}%).\"\n else:\n msg = None\n return msg\n\n def rebid_for_failure(self, fail_instruct: str):\n result = self.bid(fail_instruct)\n self.failed_bid_cnt += 1\n return result\n \n def _sanity_check_status_json(self, data: dict):\n if data == {}:\n return \"Error: No parsible JSON in your response. Possibly due to missing a closing curly bracket '}', or unpasible values (e.g., 'profit': 1000 + 400, instead of 'profit': 1400).\"\n\n # Check if all expected top-level keys are present\n expected_keys = [\"remaining_budget\", \"total_profits\", \"winning_bids\"]\n for key in expected_keys:\n if key not in data:\n return f\"Error: Missing '{key}' field in the status JSON.\"\n\n # Check if \"remaining_budget\" is a number\n if not isinstance(data[\"remaining_budget\"], (int, float)):\n return \"Error: 'remaining_budget' should be a number, and only about your remaining budget.\"\n\n # Check if \"total_profits\" is a dictionary with numbers as values\n if not isinstance(data[\"total_profits\"], dict):\n return \"Error: 'total_profits' should be a dictionary of every bidder.\"\n for bidder, profit in data[\"total_profits\"].items():\n if not isinstance(profit, (int, float)):\n return f\"Error: Profit for {bidder} should be a number.\"\n\n # Check if \"winning_bids\" is a dictionary and that each bidder's entry is a dictionary with numbers\n if not isinstance(data[\"winning_bids\"], dict):\n return \"Error: 'winning_bids' should be a dictionary.\"\n for bidder, bids in data[\"winning_bids\"].items():\n if not isinstance(bids, dict):\n return f\"Error: Bids for {bidder} should be a dictionary.\"\n for item, amount in bids.items():\n if not isinstance(amount, (int, float)):\n return f\"Error: Amount for {item} under {bidder} should be a number.\"\n\n # If everything is fine\n return \"\"\n \n def _status_json_to_text(self, data: dict):\n if 'rule' in self.model_name: return ''\n \n # Extract and format remaining budget\n structured_text = f\"* Remaining Budget: ${data.get('remaining_budget', 'unknown')}\\n\\n\"\n \n # Extract and format total profits for each bidder\n structured_text += \"* Total Profits:\\n\"\n if data.get('total_profits'):\n for bidder, profit in data['total_profits'].items():\n structured_text += f\" * {bidder}: ${profit}\\n\"\n \n # Extract and list the winning bids for each item by each bidder\n structured_text += \"\\n* Winning Bids:\\n\"\n if data.get('winning_bids'):\n for bidder, bids in data['winning_bids'].items():\n structured_text += f\" * {bidder}:\\n\"\n if bids:\n for item, amount in bids.items():\n structured_text += f\" * {item}: ${amount}\\n\"\n else:\n structured_text += f\" * No winning bids\\n\"\n \n return structured_text.strip()\n\n def _belief_tracking(self, status_text: str):\n '''\n Parse status quo and check if the belief is correct.\n '''\n belief_json = extract_jsons_from_text(status_text)[-1]\n # {\"remaining_budget\": 8000, \"total_profits\": {\"Bidder 1\": 1300, \"Bidder 2\": 1800, \"Bidder 3\": 0}, \"winning_bids\": {\"Bidder 1\": {\"Item 2\": 1200, \"Item 3\": 1000}, \"Bidder 2\": {\"Item 1\": 2000}, \"Bidder 3\": {}}}\n budget_belief = belief_json['remaining_budget']\n profits_belief = belief_json['total_profits']\n winning_bids = belief_json['winning_bids']\n\n msg = ''\n # track belief of budget\n self.total_self_belief_cnt += 1\n if budget_belief != self.budget:\n msg += f'- Your belief of budget is wrong: you have ${self.budget} left, but you think you have ${budget_belief} left.\\n'\n self.self_belief_error_cnt += 1\n self.budget_error_history.append([\n self._get_cur_item('name'),\n budget_belief,\n self.budget,\n ])\n \n # track belief of profits\n for bidder_name, profit in profits_belief.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n \n if self.name in bidder_name: \n bidder_name = self.name\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n real_profit = self.all_bidders_status[bidder_name]['profit']\n \n if profit != real_profit:\n if self.name == bidder_name:\n self.self_belief_error_cnt += 1\n else:\n self.other_belief_error_cnt += 1\n\n msg += f'- Your belief of total profit of {bidder_name} is wrong: {bidder_name} has earned ${real_profit} so far, but you think {bidder_name} has earned ${profit}.\\n'\n\n # add to history\n self.profit_error_history.append([\n f\"{bidder_name} ({self._get_cur_item('name')})\",\n profit,\n real_profit\n ])\n\n # track belief of winning bids\n for bidder_name, items_won_dict in winning_bids.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n\n real_items_won = self.all_bidders_status[bidder_name]['items_won']\n # items_won = [(item, bid_price), ...)]\n \n items_won_list = list(items_won_dict.keys())\n real_items_won_list = [str(x) for x, _ in real_items_won]\n \n if self.name in bidder_name:\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n if not item_list_equal(items_won_list, real_items_won_list):\n if bidder_name == self.name:\n self.self_belief_error_cnt += 1\n _bidder_name = f'you'\n else:\n self.other_belief_error_cnt += 1\n _bidder_name = bidder_name\n \n msg += f\"- Your belief of winning items of {bidder_name} is wrong: {bidder_name} won {real_items_won}, but you think {bidder_name} won {items_won_dict}.\\n\"\n\n self.win_bid_error_history.append([\n f\"{_bidder_name} ({self._get_cur_item('name')})\",\n ', '.join(items_won_list),\n ', '.join(real_items_won_list)\n ])\n \n return msg\n \n def win_bid(self, item: Item, bid: int):\n self.budget -= bid\n self.profit += item.true_value - bid\n self.items_won += [[item, bid]]\n msg = f\"Congratuations! You won {item} at ${bid}.\"# Now you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n return msg\n \n def lose_bid(self, item: Item):\n return f\"You lost {item}.\"# Now, you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n \n # set the profit information of other bidders\n def set_all_bidders_status(self, all_bidders_status: dict):\n self.all_bidders_status = all_bidders_status.copy()\n\n def set_withdraw(self, bid: int):\n if bid < 0: # withdraw\n self.withdraw = True\n elif bid == 0: # enable discount and bid again\n self.withdraw = False\n else: # normal bid\n self.withdraw = False\n self.engagement_count += 1\n self.engagement_history[self._get_cur_item('name')] += 1\n \n # ****************** Logging ****************** #\n \n # def _parse_hedging(self, plan: str): # deprecated\n # prompt = PARSE_HEDGE_INSTRUCTION.format(\n # item_name=self._get_cur_item(), \n # plan=plan)\n \n # with get_openai_callback() as cb:\n # llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n # result = llm([HumanMessage(content=prompt)]).content\n # self.openai_cost += cb.total_cost\n # # parse a number, which could be a digit\n # hedge_percent = re.findall(r'\\d+\\.?\\d*%', result)\n # if len(hedge_percent) > 0:\n # hedge_percent = hedge_percent[0].replace('%', '')\n # else:\n # hedge_percent = 0\n # return float(hedge_percent)\n \n def profit_report(self):\n '''\n Personal profit report at the end of an auction.\n '''\n msg = f\"* {self.name}, starting with ${self.original_budget}, has won {len(self.items_won)} items in this auction, with a total profit of ${self.profit}.:\\n\"\n profit = 0\n for item, bid in self.items_won:\n profit += item.true_value - bid\n msg += f\" * Won {item} at ${bid} over ${item.price}, with a true value of ${item.true_value}.\\n\"\n return msg.strip()\n \n def to_monitors(self, as_json=False):\n # budget, profit, items_won, tokens\n if len(self.items_won) == 0 and not as_json: \n items_won = [['', 0, 0]]\n else:\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n \n profit_error_history = self.profit_error_history if self.profit_error_history != [] or as_json else [['', '', '']]\n win_bid_error_history = self.win_bid_error_history if self.win_bid_error_history != [] or as_json else [['', '', '']]\n budget_error_history = self.budget_error_history if self.budget_error_history != [] or as_json else [['', '']]\n changes_of_plan = self.changes_of_plan if self.changes_of_plan != [] or as_json else [['', '', '']]\n \n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'model_name': self.model_name,\n 'desire': self.desire,\n 'plan_strategy': self.plan_strategy,\n 'overestimate_percent': self.overestimate_percent,\n 'temperature': self.temperature,\n 'correct_belief': self.correct_belief,\n 'enable_learning': self.enable_learning,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'tokens_used': self.llm_token_count,\n 'openai_cost': round(self.openai_cost, 2),\n 'failed_bid_cnt': self.failed_bid_cnt,\n 'self_belief_error_cnt': self.self_belief_error_cnt,\n 'other_belief_error_cnt': self.other_belief_error_cnt,\n 'failed_bid_rate': round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2),\n 'self_error_rate': round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2),\n 'other_error_rate': round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2),\n 'engagement_count': self.engagement_count,\n 'engagement_history': self.engagement_history,\n 'changes_of_plan': changes_of_plan,\n 'budget_error_history': budget_error_history,\n 'profit_error_history': profit_error_history,\n 'win_bid_error_history': win_bid_error_history,\n 'history': self.llm_prompt_history\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n self.llm_token_count, \n round(self.openai_cost, 2), \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2), \n round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2), \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n changes_of_plan,\n budget_error_history,\n profit_error_history, \n win_bid_error_history\n ]\n\n def dialogue_to_chatbot(self):\n # chatbot: [[Human, AI], [], ...]\n # only dialogue will be sent to LLMs. chatbot is just for display.\n assert len(self.dialogue_history) % 2 == 0\n chatbot = []\n for i in range(0, len(self.dialogue_history), 2):\n # if exceeds the length of dialogue, append the last message\n human_msg = self.dialogue_history[i].content\n ai_msg = self.dialogue_history[i+1].content\n if ai_msg == '': ai_msg = None\n if human_msg == '': human_msg = None\n chatbot.append([human_msg, ai_msg])\n return chatbot" }, { "identifier": "bidders_to_chatbots", "path": "src/bidder_base.py", "snippet": "def bidders_to_chatbots(bidder_list: List[Bidder], profit_report=False):\n if profit_report: # usually at the end of an auction\n return [x.dialogue_to_chatbot() + [[x.profit_report(), None]] for x in bidder_list]\n else:\n return [x.dialogue_to_chatbot() for x in bidder_list]" }, { "identifier": "bidding_multithread", "path": "src/bidder_base.py", "snippet": "def bidding_multithread(bidder_list: List[Bidder], \n instruction_list, \n func_type,\n thread_num=5,\n retry=1):\n '''\n auctioneer_msg: either a uniform message (str) or customed (list)\n '''\n assert func_type in ['plan', 'bid', 'summarize', 'replan']\n \n result_queue = queue.Queue()\n threads = []\n semaphore = threading.Semaphore(thread_num)\n\n def run_once(i: int, bidder: Bidder, auctioneer_msg: str):\n try:\n semaphore.acquire()\n if func_type == 'bid':\n \n result = bidder.bid(auctioneer_msg)\n elif func_type == 'summarize':\n result = bidder.summarize(auctioneer_msg)\n elif func_type == 'plan':\n result = bidder.init_plan(auctioneer_msg)\n elif func_type == 'replan':\n result = bidder.replan(auctioneer_msg)\n else:\n raise NotImplementedError(f'func_type {func_type} not implemented')\n result_queue.put((True, i, result))\n # except Exception as e:\n # result_queue.put((False, i, str(trace_back(e))))\n finally:\n semaphore.release()\n\n if isinstance(instruction_list, str):\n instruction_list = [instruction_list] * len(bidder_list)\n \n for i, (bidder, msg) in enumerate(zip(bidder_list, instruction_list)):\n thread = threading.Thread(target=run_once, args=(i, bidder, msg))\n thread.start()\n threads.append(thread)\n \n for thread in threads:\n thread.join(timeout=600)\n \n results = [result_queue.get() for _ in range(len(bidder_list))]\n \n errors = []\n for success, id, result in results:\n if not success:\n errors.append((id, result))\n \n if errors:\n raise Exception(f\"Error(s) in {func_type}:\\n\" + '\\n'.join([f'{i}: {e}' for i, e in errors]))\n \n valid_results = [x[1:] for x in results if x[0]]\n valid_results.sort()\n \n return [x for _, x in valid_results]" }, { "identifier": "trace_back", "path": "utils.py", "snippet": "def trace_back(error_msg):\n exc = traceback.format_exc()\n msg = f'[Error]: {error_msg}.\\n[Traceback]: {exc}'\n return msg" } ]
import os import time import gradio as gr import ujson as json import traceback import argparse from typing import List from tqdm import tqdm from src.auctioneer_base import Auctioneer from src.bidder_base import Bidder, bidders_to_chatbots, bidding_multithread from utils import trace_back from src.item_base import create_items from src.bidder_base import create_bidders from transformers import GPT2TokenizerFast
12,711
LOG_DIR = 'logs' enable_gr = gr.update(interactive=True) disable_gr = gr.update(interactive=False)
LOG_DIR = 'logs' enable_gr = gr.update(interactive=True) disable_gr = gr.update(interactive=False)
def monitor_all(bidder_list: List[Bidder]):
1
2023-10-08 09:30:57+00:00
16k
SH1ROd/Bert-VITS2-Integration-train-txt-infer
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(hparams, \"use_mel_posterior_encoder\", False)\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text:\n audiopath = f'{_id}'\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph])\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n print(\"skipped: \", skipped, \", total: \", len(self.audiopaths_sid_text))\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath)\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert)\n\n def get_audio(self, filename):\n audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)\n '''\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n '''\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n if os.path.exists(spec_filename):\n spec = torch.load(spec_filename)\n else:\n if self.use_mel_spec_posterior:\n # if os.path.exists(filename.replace(\".wav\", \".spec.pt\")):\n # # spec, n_fft, num_mels, sampling_rate, fmin, fmax\n # spec = spec_to_mel_torch(\n # torch.load(filename.replace(\".wav\", \".spec.pt\")), \n # self.filter_length, self.n_mel_channels, self.sampling_rate,\n # self.hparams.mel_fmin, self.hparams.mel_fmax)\n spec = mel_spectrogram_torch(audio_norm, self.filter_length,\n self.n_mel_channels, self.sampling_rate, self.hop_length,\n self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False)\n else:\n spec = spectrogram_torch(audio_norm, self.filter_length,\n self.sampling_rate, self.hop_length, self.win_length,\n center=False)\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n # print(text, word2ph,phone, tone, language_str)\n pold = phone\n w2pho = [i for i in word2ph]\n word2ph = [i for i in word2ph]\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n pold2 = phone\n\n if self.add_blank:\n p1 = len(phone)\n phone = commons.intersperse(phone, 0)\n p2 = len(phone)\n t1 = len(tone)\n tone = commons.intersperse(tone, 0)\n t2 = len(tone)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert = torch.load(bert_path)\n assert bert.shape[-1] == len(phone)\n except:\n bert = get_bert(text, word2ph, language_str)\n torch.save(bert, bert_path)\n #print(bert.shape[-1], bert_path, text, pold)\n assert bert.shape[-1] == len(phone)\n\n assert bert.shape[-1] == len(phone), (\n bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho)\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate():\n \"\"\" Zero-pads model inputs and targets\n \"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]),\n dim=0, descending=True)\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, :text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, :spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, :wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, :tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, :language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, :bert.size(1)] = bert\n\n return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if (len_bucket == 0):\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]\n\n # subsample\n ids_bucket = ids_bucket[self.rank::self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer = 4,\n n_layers_trans_flow = 3,\n flow_share_parameter = False,\n use_transformer_flow = True,\n **kwargs):\n\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\"use_spk_conditioned_encoder\", True)\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels)\n self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,\n upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)\n self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,\n gin_channels=gin_channels)\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter)\n else:\n self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels)\n self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)\n self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)\n \n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]\n neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),\n s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n \n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)\n o = self.dec(z_slice, g=g)\n return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_)\n \n def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None):\n #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,\n 2) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): #vits2\n def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(filter_channels, 1), \n nn.Sigmoid() \n )\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2 " }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):\n if torch.min(y) < -1.:\n print('min value is ', torch.min(y))\n if torch.max(y) > 1.:\n print('max value is ', torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + '_' + str(y.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n wnsize_dtype_device = str(win_size) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],\n center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + '_' + str(spec.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import os import json import argparse import itertools import math import torch import shutil import torch.multiprocessing as mp import torch.distributed as dist import logging import commons import utils from torch import nn, optim from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
10,914
optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
logging.getLogger('numba').setLevel(logging.WARNING) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision('medium') global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '65280' hps = utils.get_hparams() role='' for t in hps.data.spk2id.items(): role=t[0] if not hps.cont: folder_path = f"./logs/{role}" if not os.path.exists(folder_path): os.makedirs(folder_path) print(f"文件夹 '{role}' 已创建在 './logs/' 目录下。") else: print(f"文件夹 '{role}' 已经存在于 './logs/' 目录下。") shutil.copy('./pretrained_models/D_0.pth',f'./logs/{role}/D_0.pth') shutil.copy('./pretrained_models/G_0.pth',f'./logs/{role}/G_0.pth') shutil.copy('./pretrained_models/DUR_0.pth',f'./logs/{role}/DUR_0.pth') mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps, role)) def run(rank, n_gpus, hps, role): global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn) if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: print("Using duration discriminator for VITS2") use_duration_discriminator = True net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: if hps.data.n_speakers == 0: raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial = mas_noise_scale_initial, noise_scale_delta = noise_scale_delta, **hps.model).cuda(rank) freeze_enc = getattr(hps.model, "freeze_enc", False) if freeze_enc: print("freeze encoder !!!") for param in net_g.enc_p.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) if net_dur_disc is not None: net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) pretrain_dir = None if pretrain_dir is None: try: if net_dur_disc is not None: _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=not hps.cont) _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
loss_gen, losses_gen = generator_loss(y_d_hat_g)
6
2023-10-10 02:23:23+00:00
16k
sakemin/cog-musicgen-chord
audiocraft/modules/conditioners.py
[ { "identifier": "ChromaExtractor", "path": "audiocraft/modules/chroma.py", "snippet": "class ChromaExtractor(nn.Module):\n \"\"\"Chroma extraction and quantization.\n\n Args:\n sample_rate (int): Sample rate for the chroma extraction.\n n_chroma (int): Number of chroma bins for the chroma extraction.\n radix2_exp (int): Size of stft window for the chroma extraction (power of 2, e.g. 12 -> 2^12).\n nfft (int, optional): Number of FFT.\n winlen (int, optional): Window length.\n winhop (int, optional): Window hop size.\n argmax (bool, optional): Whether to use argmax. Defaults to False.\n norm (float, optional): Norm for chroma normalization. Defaults to inf.\n \"\"\"\n def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, nfft: tp.Optional[int] = None,\n winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, argmax: bool = False,\n norm: float = torch.inf):\n super().__init__()\n self.winlen = winlen or 2 ** radix2_exp\n self.nfft = nfft or self.winlen\n self.winhop = winhop or (self.winlen // 4)\n self.sample_rate = sample_rate\n self.n_chroma = n_chroma\n self.norm = norm\n self.argmax = argmax\n self.register_buffer('fbanks', torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0,\n n_chroma=self.n_chroma)), persistent=False)\n self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen,\n hop_length=self.winhop, power=2, center=True,\n pad=0, normalized=True)\n\n def forward(self, wav: torch.Tensor) -> torch.Tensor:\n T = wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.nfft:\n pad = self.nfft - T\n r = 0 if pad % 2 == 0 else 1\n wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0)\n assert wav.shape[-1] == self.nfft, f\"expected len {self.nfft} but got {wav.shape[-1]}\"\n\n spec = self.spec(wav).squeeze(1)\n raw_chroma = torch.einsum('cf,...ft->...ct', self.fbanks, spec)\n norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6)\n norm_chroma = rearrange(norm_chroma, 'b d t -> b t d')\n\n if self.argmax:\n idx = norm_chroma.argmax(-1, keepdim=True)\n norm_chroma[:] = 0\n norm_chroma.scatter_(dim=-1, index=idx, value=1)\n\n return norm_chroma" }, { "identifier": "ChordExtractor", "path": "audiocraft/modules/chord_chroma.py", "snippet": "class ChordExtractor(nn.Module):\n\n def __init__(self, device, sample_rate, max_duration, chroma_len, n_chroma, winhop):\n super().__init__()\n self.config = HParams.load(\"/src/audiocraft/modules/btc/run_config.yaml\") #gotta specify the path for run_config.yaml of btc\n\n # self.config.feature['large_voca'] = False\n # self.config.model['num_chords'] = 25\n\n self.model_file = '/src/audiocraft/modules/btc/test/btc_model_large_voca.pt'\n # self.model_file = 'audiocraft/modules/btc/test/btc_model.pt'\n self.idx_to_chord = idx2voca_chord()\n self.sr = sample_rate\n\n self.n_chroma = n_chroma\n self.max_duration = max_duration\n self.chroma_len = chroma_len\n self.to_timebin = self.max_duration/self.chroma_len\n self.timebin = winhop\n\n self.chords = chords.Chords()\n self.device = device\n\n self.denoise_window_size = 7\n self.denoise_threshold = 0.5\n \n self.model = BTC_model(config=self.config.model).to(device)\n if os.path.isfile(self.model_file):\n checkpoint = torch.load(self.model_file)\n self.mean = checkpoint['mean']\n self.std = checkpoint['std']\n self.model.load_state_dict(checkpoint['model'])\n\n def forward(self, wavs:torch.Tensor) -> torch.Tensor:\n sr = self.config.mp3['song_hz']\n chromas = []\n for wav in wavs:\n original_wav = librosa.resample(wav.cpu().numpy(), orig_sr=self.sr, target_sr=sr)\n original_wav = original_wav.squeeze(0)\n # print(original_wav.shape)\n T = original_wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.timebin//4:\n pad = self.timebin//4 - T\n r = 0 if pad % 2 == 0 else 1\n original_wav = F.pad(torch.Tensor(original_wav), (pad // 2, pad // 2 + r), 'constant', 0)\n original_wav = original_wav.numpy()\n assert original_wav.shape[-1] == self.timebin//4, f\"expected len {self.timebin//4} but got {original_wav.shape[-1]}\"\n # print(original_wav.shape)\n #preprocess\n currunt_sec_hz = 0\n\n while len(original_wav) > currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len']:\n start_idx = int(currunt_sec_hz)\n end_idx = int(currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len'])\n tmp = librosa.cqt(original_wav[start_idx:end_idx], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n if start_idx == 0:\n feature = tmp\n else:\n feature = np.concatenate((feature, tmp), axis=1)\n currunt_sec_hz = end_idx\n \n if currunt_sec_hz == 0:\n feature = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n else:\n tmp = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n feature = np.concatenate((feature, tmp), axis=1)\n # print(feature.shape)\n feature = np.log(np.abs(feature) + 1e-6)\n # print(feature)\n feature_per_second = self.config.mp3['inst_len'] / self.config.model['timestep']\n song_length_second = len(original_wav)/self.config.mp3['song_hz']\n\n feature = feature.T\n feature = (feature - self.mean)/self.std\n\n time_unit = feature_per_second\n n_timestep = self.config.model['timestep']\n\n num_pad = n_timestep - (feature.shape[0] % n_timestep)\n feature = np.pad(feature, ((0, num_pad), (0, 0)), mode=\"constant\", constant_values=0)\n num_instance = feature.shape[0] // n_timestep\n\n #inference\n start_time = 0.0\n lines = []\n with torch.no_grad():\n self.model.eval()\n feature = torch.tensor(feature, dtype=torch.float32).unsqueeze(0).to(self.device)\n for t in range(num_instance):\n self_attn_output, _ = self.model.self_attn_layers(feature[:, n_timestep * t:n_timestep * (t + 1), :])\n prediction, _ = self.model.output_layer(self_attn_output)\n prediction = prediction.squeeze()\n for i in range(n_timestep):\n if t == 0 and i == 0:\n prev_chord = prediction[i].item()\n continue\n if prediction[i].item() != prev_chord:\n lines.append(\n '%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n start_time = time_unit * (n_timestep * t + i)\n prev_chord = prediction[i].item()\n if t == num_instance - 1 and i + num_pad == n_timestep:\n if start_time != time_unit * (n_timestep * t + i):\n lines.append('%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n break\n\n strlines = ''.join(lines)\n\n chroma = []\n\n count = 0\n for line in lines:\n if count >= self.chroma_len: \n break\n splits = line.split()\n if len(splits) == 3:\n s = splits[0]\n e = splits[1]\n l = splits[2]\n\n crd = self.chords.chord(l)\n \n if crd[0] == -1:\n multihot = torch.Tensor(crd[2])\n else:\n multihot = torch.concat([torch.Tensor(crd[2])[-crd[0]:],torch.Tensor(crd[2])[:-crd[0]]])\n start_bin = round(float(s)/self.to_timebin)\n end_bin = round(float(e)/self.to_timebin)\n for j in range(start_bin,end_bin):\n if count >= self.chroma_len: \n break\n chroma.append(multihot)\n count += 1\n \n chroma = torch.stack(chroma, dim=0)\n\n # Denoising chroma\n kernel = torch.ones(self.denoise_window_size)/self.denoise_window_size\n\n filtered_signals = []\n for i in range(chroma.shape[-1]):\n filtered_signals.append(torch.nn.functional.conv1d(chroma[...,i].unsqueeze(0),\n kernel.unsqueeze(0).unsqueeze(0).to(chroma.device), \n padding=(self.denoise_window_size - 1) // 2))\n filtered_signals = torch.stack(filtered_signals, dim=-1)\n filtered_signals = filtered_signals > self.denoise_threshold\n\n chromas.append(filtered_signals.squeeze(0))\n \n return torch.stack(chromas, dim=0).to(self.device)" }, { "identifier": "StreamingModule", "path": "audiocraft/modules/streaming.py", "snippet": "class StreamingModule(nn.Module):\n \"\"\"Common API for streaming components.\n\n Each streaming component has a streaming state, which is just a dict[str, Tensor].\n By convention, the first dim of each tensor must be the batch size.\n Don't use dots in the key names, as this would clash with submodules\n (like in state_dict).\n\n If `self._is_streaming` is True, the component should use and remember\n the proper state inside `self._streaming_state`.\n\n To set a streaming component in streaming state, use\n\n with module.streaming():\n ...\n\n This will automatically reset the streaming state when exiting the context manager.\n This also automatically propagates to all streaming children module.\n\n Some module might also implement the `StreamingModule.flush` method, although\n this one is trickier, as all parents module must be StreamingModule and implement\n it as well for it to work properly. See `StreamingSequential` after.\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self._streaming_state: State = {}\n self._is_streaming = False\n\n def _apply_named_streaming(self, fn: tp.Any):\n for name, module in self.named_modules():\n if isinstance(module, StreamingModule):\n fn(name, module)\n\n def _set_streaming(self, streaming: bool):\n def _set_streaming(name, module):\n module._is_streaming = streaming\n self._apply_named_streaming(_set_streaming)\n\n @contextmanager\n def streaming(self):\n \"\"\"Context manager to enter streaming mode. Reset streaming state on exit.\"\"\"\n self._set_streaming(True)\n try:\n yield\n finally:\n self._set_streaming(False)\n self.reset_streaming()\n\n def reset_streaming(self):\n \"\"\"Reset the streaming state.\"\"\"\n def _reset(name: str, module: StreamingModule):\n module._streaming_state.clear()\n\n self._apply_named_streaming(_reset)\n\n def get_streaming_state(self) -> State:\n \"\"\"Return the streaming state, including that of sub-modules.\"\"\"\n state: State = {}\n\n def _add(name: str, module: StreamingModule):\n if name:\n name += \".\"\n for key, value in module._streaming_state.items():\n state[name + key] = value\n\n self._apply_named_streaming(_add)\n return state\n\n def set_streaming_state(self, state: State):\n \"\"\"Set the streaming state, including that of sub-modules.\"\"\"\n state = dict(state)\n\n def _set(name: str, module: StreamingModule):\n if name:\n name += \".\"\n module._streaming_state.clear()\n for key, value in list(state.items()):\n # complexity is not ideal here, but probably fine.\n if key.startswith(name):\n local_key = key[len(name):]\n if '.' not in local_key:\n module._streaming_state[local_key] = value\n del state[key]\n\n self._apply_named_streaming(_set)\n assert len(state) == 0, list(state.keys())\n\n def flush(self, x: tp.Optional[torch.Tensor] = None):\n \"\"\"Flush any remaining outputs that were waiting for completion.\n Typically, for convolutions, this will add the final padding\n and process the last buffer.\n\n This should take an optional argument `x`, which will be provided\n if a module before this one in the streaming pipeline has already\n spitted out a flushed out buffer.\n \"\"\"\n if x is None:\n return None\n else:\n return self(x)" }, { "identifier": "create_sin_embedding", "path": "audiocraft/modules/transformer.py", "snippet": "def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,\n dtype: torch.dtype = torch.float32) -> torch.Tensor:\n \"\"\"Create sinusoidal positional embedding, with shape `[B, T, C]`.\n\n Args:\n positions (torch.Tensor): LongTensor of positions.\n dim (int): Dimension of the embedding.\n max_period (float): Maximum period of the cosine/sine functions.\n dtype (torch.dtype or str): dtype to use to generate the embedding.\n Returns:\n torch.Tensor: Sinusoidal positional embedding.\n \"\"\"\n # We aim for BTC format\n assert dim % 2 == 0\n half_dim = dim // 2\n positions = positions.to(dtype)\n adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)\n max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point\n phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))\n return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)" }, { "identifier": "audio_read", "path": "audiocraft/data/audio.py", "snippet": "def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,\n duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:\n \"\"\"Read audio by picking the most appropriate backend tool based on the audio format.\n\n Args:\n filepath (str or Path): Path to audio file to read.\n seek_time (float): Time at which to start reading in the file.\n duration (float): Duration to read from the file. If set to -1, the whole file is read.\n pad (bool): Pad output audio if not reaching expected duration.\n Returns:\n tuple of torch.Tensor, int: Tuple containing audio data and sample rate.\n \"\"\"\n fp = Path(filepath)\n if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg\n # There is some bug with ffmpeg and reading flac\n info = _soundfile_info(filepath)\n frames = -1 if duration <= 0 else int(duration * info.sample_rate)\n frame_offset = int(seek_time * info.sample_rate)\n wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)\n assert info.sample_rate == sr, f\"Mismatch of sample rates {info.sample_rate} {sr}\"\n wav = torch.from_numpy(wav).t().contiguous()\n if len(wav.shape) == 1:\n wav = torch.unsqueeze(wav, 0)\n else:\n wav, sr = _av_read(filepath, seek_time, duration)\n if pad and duration > 0:\n expected_frames = int(duration * sr)\n wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))\n return wav, sr" }, { "identifier": "SegmentInfo", "path": "audiocraft/data/audio_dataset.py", "snippet": "class SegmentInfo(BaseInfo):\n meta: AudioMeta\n seek_time: float\n # The following values are given once the audio is processed, e.g.\n # at the target sample rate and target number of channels.\n n_frames: int # actual number of frames without padding\n total_frames: int # total number of frames, padding included\n sample_rate: int # actual sample rate\n channels: int # number of audio channels." }, { "identifier": "convert_audio", "path": "audiocraft/data/audio_utils.py", "snippet": "def convert_audio(wav: torch.Tensor, from_rate: float,\n to_rate: float, to_channels: int) -> torch.Tensor:\n \"\"\"Convert audio to new sample rate and number of audio channels.\"\"\"\n wav = julius.resample_frac(wav, int(from_rate), int(to_rate))\n wav = convert_audio_channels(wav, to_channels)\n return wav" }, { "identifier": "AudioCraftEnvironment", "path": "audiocraft/environment.py", "snippet": "class AudioCraftEnvironment:\n \"\"\"Environment configuration for teams and clusters.\n\n AudioCraftEnvironment picks compute cluster settings (slurm, dora) from the current running environment\n or declared variable and the loaded team configuration. Additionally, the AudioCraftEnvironment\n provides pointers to a reference folder resolved automatically across clusters that is shared across team members,\n allowing to share sigs or other files to run jobs. Finally, it provides dataset mappers to automatically\n map dataset file paths to new locations across clusters, allowing to use the same manifest of files across cluters.\n\n The cluster type is identified automatically and base configuration file is read from config/teams.yaml.\n Use the following environment variables to specify the cluster, team or configuration:\n\n AUDIOCRAFT_CLUSTER (optional): Cluster type to enforce. Useful if the cluster type\n cannot be inferred automatically.\n AUDIOCRAFT_CONFIG (optional): Path to yaml config holding the teams configuration.\n If not set, configuration is read from config/teams.yaml.\n AUDIOCRAFT_TEAM (optional): Name of the team. Recommended to set to your own team.\n Cluster configuration are shared across teams to match compute allocation,\n specify your cluster configuration in the configuration file under a key mapping\n your team name.\n \"\"\"\n _instance = None\n DEFAULT_TEAM = \"default\"\n\n def __init__(self) -> None:\n \"\"\"Loads configuration.\"\"\"\n self.team: str = os.getenv(\"AUDIOCRAFT_TEAM\", self.DEFAULT_TEAM)\n cluster_type = _guess_cluster_type()\n cluster = os.getenv(\n \"AUDIOCRAFT_CLUSTER\", cluster_type.value\n )\n logger.info(\"Detecting cluster type %s\", cluster_type)\n\n self.cluster: str = cluster\n\n config_path = os.getenv(\n \"AUDIOCRAFT_CONFIG\",\n Path(__file__)\n .parent.parent.joinpath(\"config/teams\", self.team)\n .with_suffix(\".yaml\"),\n )\n self.config = omegaconf.OmegaConf.load(config_path)\n self._dataset_mappers = []\n cluster_config = self._get_cluster_config()\n if \"dataset_mappers\" in cluster_config:\n for pattern, repl in cluster_config[\"dataset_mappers\"].items():\n regex = re.compile(pattern)\n self._dataset_mappers.append((regex, repl))\n\n def _get_cluster_config(self) -> omegaconf.DictConfig:\n assert isinstance(self.config, omegaconf.DictConfig)\n return self.config[self.cluster]\n\n @classmethod\n def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n\n @classmethod\n def reset(cls):\n \"\"\"Clears the environment and forces a reload on next invocation.\"\"\"\n cls._instance = None\n\n @classmethod\n def get_team(cls) -> str:\n \"\"\"Gets the selected team as dictated by the AUDIOCRAFT_TEAM env var.\n If not defined, defaults to \"labs\".\n \"\"\"\n return cls.instance().team\n\n @classmethod\n def get_cluster(cls) -> str:\n \"\"\"Gets the detected cluster.\n This value can be overridden by the AUDIOCRAFT_CLUSTER env var.\n \"\"\"\n return cls.instance().cluster\n\n @classmethod\n def get_dora_dir(cls) -> Path:\n \"\"\"Gets the path to the dora directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_DORA_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n dora_dir = os.getenv(\"AUDIOCRAFT_DORA_DIR\", cluster_config[\"dora_dir\"])\n logger.warning(f\"Dora directory: {dora_dir}\")\n return Path(dora_dir)\n\n @classmethod\n def get_reference_dir(cls) -> Path:\n \"\"\"Gets the path to the reference directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_REFERENCE_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return Path(os.getenv(\"AUDIOCRAFT_REFERENCE_DIR\", cluster_config[\"reference_dir\"]))\n\n @classmethod\n def get_slurm_exclude(cls) -> tp.Optional[str]:\n \"\"\"Get the list of nodes to exclude for that cluster.\"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return cluster_config.get(\"slurm_exclude\")\n\n @classmethod\n def get_slurm_partitions(cls, partition_types: tp.Optional[tp.List[str]] = None) -> str:\n \"\"\"Gets the requested partitions for the current team and cluster as a comma-separated string.\n\n Args:\n partition_types (list[str], optional): partition types to retrieve. Values must be\n from ['global', 'team']. If not provided, the global partition is returned.\n \"\"\"\n if not partition_types:\n partition_types = [\"global\"]\n\n cluster_config = cls.instance()._get_cluster_config()\n partitions = [\n cluster_config[\"partitions\"][partition_type]\n for partition_type in partition_types\n ]\n return \",\".join(partitions)\n\n @classmethod\n def resolve_reference_path(cls, path: tp.Union[str, Path]) -> Path:\n \"\"\"Converts reference placeholder in path with configured reference dir to resolve paths.\n\n Args:\n path (str or Path): Path to resolve.\n Returns:\n Path: Resolved path.\n \"\"\"\n path = str(path)\n\n if path.startswith(\"//reference\"):\n reference_dir = cls.get_reference_dir()\n logger.warn(f\"Reference directory: {reference_dir}\")\n assert (\n reference_dir.exists() and reference_dir.is_dir()\n ), f\"Reference directory does not exist: {reference_dir}.\"\n path = re.sub(\"^//reference\", str(reference_dir), path)\n\n return Path(path)\n\n @classmethod\n def apply_dataset_mappers(cls, path: str) -> str:\n \"\"\"Applies dataset mapping regex rules as defined in the configuration.\n If no rules are defined, the path is returned as-is.\n \"\"\"\n instance = cls.instance()\n\n for pattern, repl in instance._dataset_mappers:\n path = pattern.sub(repl, path)\n\n return path" }, { "identifier": "ResidualVectorQuantizer", "path": "audiocraft/quantization/vq.py", "snippet": "class ResidualVectorQuantizer(BaseQuantizer):\n \"\"\"Residual Vector Quantizer.\n\n Args:\n dimension (int): Dimension of the codebooks.\n n_q (int): Number of residual vector quantizers used.\n q_dropout (bool): Random quantizer drop out at train time.\n bins (int): Codebook size.\n decay (float): Decay for exponential moving average over the codebooks.\n kmeans_init (bool): Whether to use kmeans to initialize the codebooks.\n kmeans_iters (int): Number of iterations used for kmeans initialization.\n threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes\n that have an exponential moving average cluster size less than the specified threshold with\n randomly selected vector from the current batch.\n orthogonal_reg_weight (float): Orthogonal regularization weights.\n orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.\n orthogonal_reg_max_codes (optional int): Maximum number of codes to consider.\n for orthogonal regularization.\n \"\"\"\n def __init__(\n self,\n dimension: int = 256,\n n_q: int = 8,\n q_dropout: bool = False,\n bins: int = 1024,\n decay: float = 0.99,\n kmeans_init: bool = True,\n kmeans_iters: int = 10,\n threshold_ema_dead_code: int = 2,\n orthogonal_reg_weight: float = 0.0,\n orthogonal_reg_active_codes_only: bool = False,\n orthogonal_reg_max_codes: tp.Optional[int] = None,\n ):\n super().__init__()\n self.max_n_q = n_q\n self.n_q = n_q\n self.q_dropout = q_dropout\n self.dimension = dimension\n self.bins = bins\n self.decay = decay\n self.kmeans_init = kmeans_init\n self.kmeans_iters = kmeans_iters\n self.threshold_ema_dead_code = threshold_ema_dead_code\n self.orthogonal_reg_weight = orthogonal_reg_weight\n self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only\n self.orthogonal_reg_max_codes = orthogonal_reg_max_codes\n self.vq = ResidualVectorQuantization(\n dim=self.dimension,\n codebook_size=self.bins,\n num_quantizers=self.n_q,\n decay=self.decay,\n kmeans_init=self.kmeans_init,\n kmeans_iters=self.kmeans_iters,\n threshold_ema_dead_code=self.threshold_ema_dead_code,\n orthogonal_reg_weight=self.orthogonal_reg_weight,\n orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only,\n orthogonal_reg_max_codes=self.orthogonal_reg_max_codes,\n channels_last=False\n )\n\n def forward(self, x: torch.Tensor, frame_rate: int):\n n_q = self.n_q\n if self.training and self.q_dropout:\n n_q = int(torch.randint(1, self.n_q + 1, (1,)).item())\n bw_per_q = math.log2(self.bins) * frame_rate / 1000\n quantized, codes, commit_loss = self.vq(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n bw = torch.tensor(n_q * bw_per_q).to(x)\n return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss))\n\n def encode(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Encode a given input tensor with the specified frame rate at the given bandwidth.\n The RVQ encode method sets the appropriate number of quantizer to use\n and returns indices for each quantizer.\n \"\"\"\n n_q = self.n_q\n codes = self.vq.encode(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n return codes\n\n def decode(self, codes: torch.Tensor) -> torch.Tensor:\n \"\"\"Decode the given codes to the quantized representation.\"\"\"\n # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T].\n codes = codes.transpose(0, 1)\n quantized = self.vq.decode(codes)\n return quantized\n\n @property\n def total_codebooks(self):\n return self.max_n_q\n\n @property\n def num_codebooks(self):\n return self.n_q\n\n def set_num_codebooks(self, n: int):\n assert n > 0 and n <= self.max_n_q\n self.n_q = n" }, { "identifier": "TorchAutocast", "path": "audiocraft/utils/autocast.py", "snippet": "class TorchAutocast:\n \"\"\"TorchAutocast utility class.\n Allows you to enable and disable autocast. This is specially useful\n when dealing with different architectures and clusters with different\n levels of support.\n\n Args:\n enabled (bool): Whether to enable torch.autocast or not.\n args: Additional args for torch.autocast.\n kwargs: Additional kwargs for torch.autocast\n \"\"\"\n def __init__(self, enabled: bool, *args, **kwargs):\n self.autocast = torch.autocast(*args, **kwargs) if enabled else None\n\n def __enter__(self):\n if self.autocast is None:\n return\n try:\n self.autocast.__enter__()\n except RuntimeError:\n device = self.autocast.device\n dtype = self.autocast.fast_dtype\n raise RuntimeError(\n f\"There was an error autocasting with dtype={dtype} device={device}\\n\"\n \"If you are on the FAIR Cluster, you might need to use autocast_dtype=float16\"\n )\n\n def __exit__(self, *args, **kwargs):\n if self.autocast is None:\n return\n self.autocast.__exit__(*args, **kwargs)" }, { "identifier": "EmbeddingCache", "path": "audiocraft/utils/cache.py", "snippet": "class EmbeddingCache:\n \"\"\"Cache around embeddings computation for faster execution.\n The EmbeddingCache is storing pre-computed embeddings on disk and provides a simple API\n to retrieve the pre-computed embeddings on full inputs and extract only a given chunk\n using a user-provided function. When the cache is warm (all embeddings are pre-computed),\n the EmbeddingCache allows for faster training as it removes the need of computing the embeddings.\n Additionally, it provides in-memory cache around the loaded embeddings to limit IO footprint\n and synchronization points in the forward calls.\n\n Args:\n cache_path (Path): Path to folder where all pre-computed embeddings are saved on disk.\n device (str or torch.device): Device on which the embedding is returned.\n compute_embed_fn (callable[[Path, any, int], torch.Tensor], optional): Function to compute\n the embedding from a given object and path. This user provided function can compute the\n embedding from the provided object or using the provided path as entry point. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n extract_embed_fn (callable[[torch.Tensor, any, int], torch.Tensor], optional): Function to extract\n the desired embedding chunk from the full embedding loaded from the cache. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n If not specified, will return the full embedding unmodified.\n \"\"\"\n def __init__(self, cache_path: tp.Union[str, Path], device: tp.Union[str, torch.device],\n compute_embed_fn: tp.Callable[[Path, tp.Any, int], torch.Tensor],\n extract_embed_fn: tp.Optional[tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]] = None):\n self.cache_path = Path(cache_path)\n self.device = device\n self._compute_embed_fn = compute_embed_fn\n self._extract_embed_fn: tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]\n if extract_embed_fn is not None:\n self._extract_embed_fn = extract_embed_fn\n else:\n self._extract_embed_fn = partial(get_full_embed, device=device)\n if self.cache_path is not None:\n self.cache_path.mkdir(exist_ok=True, parents=True)\n logger.info(f\"Cache instantiated at: {self.cache_path}\")\n self.pool = ThreadPoolExecutor(8)\n self.pool.__enter__()\n self._current_batch_cache: dict = {}\n self._memory_cache: dict = {}\n\n def _get_cache_path(self, path: tp.Union[Path, str]):\n \"\"\"Get cache path for the given file path.\"\"\"\n sig = sha1(str(path).encode()).hexdigest()\n return self.cache_path / sig\n\n @staticmethod\n def _get_full_embed_from_cache(cache: Path):\n \"\"\"Loads full pre-computed embedding from the cache.\"\"\"\n try:\n embed = torch.load(cache, 'cpu')\n except Exception as exc:\n logger.error(\"Error loading %s: %r\", cache, exc)\n embed = None\n return embed\n\n def get_embed_from_cache(self, paths: tp.List[Path], x: tp.Any) -> torch.Tensor:\n \"\"\"Get embedding from cache, computing and storing it to cache if not already cached.\n The EmbeddingCache first tries to load the embedding from the in-memory cache\n containing the pre-computed chunks populated through `populate_embed_cache`.\n If not found, the full embedding is computed and stored on disk to be later accessed\n to populate the in-memory cache, and the desired embedding chunk is extracted and returned.\n\n Args:\n paths (list[Path or str]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n embeds = []\n for idx, path in enumerate(paths):\n cache = self._get_cache_path(path)\n if cache in self._current_batch_cache:\n embed = self._current_batch_cache[cache]\n else:\n full_embed = self._compute_embed_fn(path, x, idx)\n try:\n with flashy.utils.write_and_rename(cache, pid=True) as f:\n torch.save(full_embed.cpu(), f)\n except Exception as exc:\n logger.error('Error saving embed %s (%s): %r', cache, full_embed.shape, exc)\n else:\n logger.info('New embed cache saved: %s (%s)', cache, full_embed.shape)\n embed = self._extract_embed_fn(full_embed, x, idx)\n embeds.append(embed)\n embed = torch.stack(embeds, dim=0)\n return embed\n\n def populate_embed_cache(self, paths: tp.List[Path], x: tp.Any) -> None:\n \"\"\"Populate in-memory caches for embeddings reading from the embeddings stored on disk.\n The in-memory caches consist in a cache for the full embedding and another cache for the\n final embedding chunk. Such caches are used to limit the IO access when computing the actual embeddings\n and reduce the IO footprint and synchronization points during forward passes.\n\n Args:\n paths (list[Path]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n self._current_batch_cache.clear()\n if self.cache_path is not None:\n futures: list = []\n for path in paths:\n assert path is not None, \"Path is required for computation from cache\"\n cache = self._get_cache_path(path)\n if cache in self._memory_cache or not cache.exists():\n futures.append(None)\n else:\n futures.append(self.pool.submit(EmbeddingCache._get_full_embed_from_cache, cache))\n for idx, (path, future) in enumerate(zip(paths, futures)):\n assert path is not None\n cache = self._get_cache_path(path)\n full_embed = None\n if future is None:\n if cache in self._memory_cache:\n full_embed = self._memory_cache[cache]\n else:\n full_embed = future.result()\n if full_embed is not None:\n self._memory_cache[cache] = full_embed\n full_embed = full_embed.to(self.device)\n if full_embed is not None:\n embed = self._extract_embed_fn(full_embed, x, idx)\n self._current_batch_cache[cache] = embed" }, { "identifier": "collate", "path": "audiocraft/utils/utils.py", "snippet": "def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Get a list of tensors and collate them to a single tensor. according to the following logic:\n - `dim` specifies the time dimension which will be stacked and padded.\n - The output will contain 1 new dimension (dimension index 0) which will be the size of\n of the original list.\n\n Args:\n tensors (tp.List[torch.Tensor]): List of tensors to collate.\n dim (int): Dimension which will be stacked and padded.\n Returns:\n tp.Tuple[torch.Tensor, torch.Tensor]:\n torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension\n (dimension index 0) which will be the size of the original list.\n torch.Tensor: Tensor containing length of original tensor sizes (without padding).\n \"\"\"\n tensors = [x.transpose(0, dim) for x in tensors]\n lens = torch.LongTensor([len(x) for x in tensors])\n padded_tensors = pad_sequence(tensors)\n padded_tensors = padded_tensors.transpose(0, 1)\n padded_tensors = padded_tensors.transpose(1, dim + 1)\n return padded_tensors, lens" }, { "identifier": "hash_trick", "path": "audiocraft/utils/utils.py", "snippet": "def hash_trick(word: str, vocab_size: int) -> int:\n \"\"\"Hash trick to pair each word with an index\n\n Args:\n word (str): word we wish to convert to an index\n vocab_size (int): size of the vocabulary\n Returns:\n int: index of the word in the embedding LUT\n \"\"\"\n hash = int(hashlib.sha256(word.encode(\"utf-8\")).hexdigest(), 16)\n return hash % vocab_size" }, { "identifier": "length_to_mask", "path": "audiocraft/utils/utils.py", "snippet": "def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:\n \"\"\"Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).\n For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]\n\n Args:\n lengths (torch.Tensor): tensor with lengths\n max_len (int): can set the max length manually. Defaults to None.\n Returns:\n torch.Tensor: mask with 0s where there is pad tokens else 1s\n \"\"\"\n assert len(lengths.shape) == 1, \"Length shape should be 1 dimensional.\"\n final_length = lengths.max().item() if not max_len else max_len\n final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor\n return torch.arange(final_length, device=lengths.device)[None, :] < lengths[:, None]" }, { "identifier": "load_clap_state_dict", "path": "audiocraft/utils/utils.py", "snippet": "def load_clap_state_dict(clap_model, path: tp.Union[str, Path]):\n \"\"\"Wrapper around state dict loading of CLAP model\n addressing compatibility issues between CLAP and AudioCraft\n HuggingFace transformer version.\n See: https://github.com/LAION-AI/CLAP/issues/118\n \"\"\"\n from clap_module.factory import load_state_dict # type: ignore\n pkg = load_state_dict(path)\n pkg.pop('text_branch.embeddings.position_ids', None)\n clap_model.model.load_state_dict(pkg)" }, { "identifier": "warn_once", "path": "audiocraft/utils/utils.py", "snippet": "@lru_cache(None)\ndef warn_once(logger, msg):\n \"\"\"Warn about a given message only once.\"\"\"\n logger.warning(msg)" }, { "identifier": "chords", "path": "audiocraft/modules/btc/utils/chords.py", "snippet": "def chords(self, labels):\n\n \"\"\"\n Transform a list of chord labels into an array of internal numeric\n representations.\n\n Parameters\n ----------\n labels : list\n List of chord labels (str).\n\n Returns\n -------\n chords : numpy.array\n Structured array with columns 'root', 'bass', and 'intervals',\n containing a numeric representation of chords.\n\n \"\"\"\n crds = np.zeros(len(labels), dtype=CHORD_DTYPE)\n cache = {}\n for i, lbl in enumerate(labels):\n cv = cache.get(lbl, None)\n if cv is None:\n cv = self.chord(lbl)\n cache[lbl] = cv\n crds[i] = cv\n\n return crds" } ]
from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from itertools import chain from pathlib import Path from num2words import num2words from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore from torch import nn from torch.nn.utils.rnn import pad_sequence from .chroma import ChromaExtractor from .chord_chroma import ChordExtractor from .streaming import StreamingModule from .transformer import create_sin_embedding from ..data.audio import audio_read from ..data.audio_dataset import SegmentInfo from ..data.audio_utils import convert_audio from ..environment import AudioCraftEnvironment from ..quantization import ResidualVectorQuantizer from ..utils.autocast import TorchAutocast from ..utils.cache import EmbeddingCache from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once from .btc.utils import chords from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio import logging import math import random import re import typing as tp import warnings import einops import spacy import torch import torch.nn.functional as F import numpy as np import laion_clap # type: ignore
13,982
return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: WavCondition) -> WavCondition: """Apply WavConditioner tokenization and populate cache if needed.""" x = super().tokenize(x) no_undefined_paths = all(p is not None for p in x.path) if self.cache is not None and no_undefined_paths: paths = [Path(p) for p in x.path if p is not None] self.cache.populate_embed_cache(paths, x) return x class ChromaChordConditioner(ChromaStemConditioner): """Chord Chroma conditioner based on stems. The ChromaChordConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(output_dim = output_dim, sample_rate = sample_rate, n_chroma = n_chroma, radix2_exp = radix2_exp, duration = duration, match_len_on_eval = match_len_on_eval, eval_wavs = eval_wavs, n_eval_wavs = n_eval_wavs, cache_path = cache_path, device = device) self.winhop = self.chroma.winhop self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('bass'), stem_sources.index('other')]).to(device) self.chroma_len = self._get_chroma_len() self.bar2chromabin = self.sample_rate / self.winhop self.chroma = ChordExtractor(device = device, sample_rate=sample_rate, n_chroma=n_chroma, max_duration = duration, chroma_len = self.chroma_len, winhop = self.winhop).to(device)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask class WavCondition(tp.NamedTuple): wav: torch.Tensor length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] class WavChordTextCondition(tp.NamedTuple): wav: tp.Union[torch.Tensor,str,tp.List[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] bpm : tp.List[tp.Optional[tp.Union[int, float]]] = [] meter : tp.List[tp.Optional[int]] = [] class JointEmbedCondition(tp.NamedTuple): wav: torch.Tensor text: tp.List[tp.Optional[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] @dataclass class ConditioningAttributes: text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) wav: tp.Dict[str, tp.Union[WavCondition,WavChordTextCondition]] = field(default_factory=dict) joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) def __getitem__(self, item): return getattr(self, item) @property def text_attributes(self): return self.text.keys() @property def wav_attributes(self): return self.wav.keys() @property def joint_embed_attributes(self): return self.joint_embed.keys() @property def attributes(self): return { "text": self.text_attributes, "wav": self.wav_attributes, "joint_embed": self.joint_embed_attributes, } def to_flat_dict(self): return { **{f"text.{k}": v for k, v in self.text.items()}, **{f"wav.{k}": v for k, v in self.wav.items()}, **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} } @classmethod def from_flat_dict(cls, x): out = cls() for k, v in x.items(): kind, att = k.split(".") out[kind][att] = v return out class SegmentWithAttributes(SegmentInfo): """Base class for all dataclasses that are used for conditioning. All child classes should implement `to_condition_attributes` that converts the existing attributes to a dataclass of type ConditioningAttributes. """ def to_condition_attributes(self) -> ConditioningAttributes: raise NotImplementedError() def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask def nullify_wav(cond: tp.Union[WavCondition,WavChordTextCondition]) -> tp.Union[WavCondition,WavChordTextCondition]: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ if not isinstance(cond, WavChordTextCondition): null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], ) else: return WavChordTextCondition( wav=['N']* len(cond.wav), length=torch.tensor([0] * len(cond.wav), device=cond.length.device), sample_rate=cond.sample_rate, path=[None], seek_time=[None], bpm = cond.bpm, meter = cond.meter ) def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], ) class Tokenizer: """Base tokenizer implementation (in case we want to introduce more advances tokenizers in the future). """ def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError() class WhiteSpaceTokenizer(Tokenizer): """This tokenizer should be used for natural language descriptions. For example: ["he didn't, know he's going home.", 'shorter sentence'] => [[78, 62, 31, 4, 78, 25, 19, 34], [59, 77, 0, 0, 0, 0, 0, 0]] """ PUNCTUATION = "?:!.,;" def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", lemma: bool = True, stopwords: bool = True) -> None: self.n_bins = n_bins self.pad_idx = pad_idx self.lemma = lemma self.stopwords = stopwords try: self.nlp = spacy.load(language) except IOError: spacy.cli.download(language) # type: ignore self.nlp = spacy.load(language) @tp.no_type_check def __call__(self, texts: tp.List[tp.Optional[str]], return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Take a list of strings and convert them to a tensor of indices. Args: texts (list[str]): List of strings. return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. Returns: tuple[torch.Tensor, torch.Tensor]: - Indices of words in the LUT. - And a mask indicating where the padding tokens are """ output, lengths = [], [] texts = deepcopy(texts) for i, text in enumerate(texts): # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(torch.Tensor([self.pad_idx])) lengths.append(0) continue # convert numbers to words text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore # normalize text text = self.nlp(text) # type: ignore # remove stopwords if self.stopwords: text = [w for w in text if not w.is_stop] # type: ignore # remove punctuation text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore # lemmatize if needed text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore texts[i] = " ".join(text) lengths.append(len(text)) # convert to tensor tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) output.append(tokens) mask = length_to_mask(torch.IntTensor(lengths)).int() padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() if return_text: return padded_output, mask, texts # type: ignore return padded_output, mask class NoopTokenizer(Tokenizer): """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will split it to ["Jeff", "Buckley"] and return an index per word. For example: ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] ["Metal", "Rock", "Classical"] => [0, 223, 51] """ def __init__(self, n_bins: int, pad_idx: int = 0): self.n_bins = n_bins self.pad_idx = pad_idx def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: output, lengths = [], [] for text in texts: # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(self.pad_idx) lengths.append(0) else: output.append(hash_trick(text, self.n_bins)) lengths.append(1) tokens = torch.LongTensor(output).unsqueeze(1) mask = length_to_mask(torch.IntTensor(lengths)).int() return tokens, mask class BaseConditioner(nn.Module): """Base model for all conditioner modules. We allow the output dim to be different than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; 2) make all condition dims consistent. Args: dim (int): Hidden dim of the model. output_dim (int): Output dim of the conditioner. """ def __init__(self, dim: int, output_dim: int): super().__init__() self.dim = dim self.output_dim = output_dim self.output_proj = nn.Linear(dim, output_dim) def tokenize(self, *args, **kwargs) -> tp.Any: """Should be any part of the processing that will lead to a synchronization point, e.g. BPE tokenization with transfer to the GPU. The returned value will be saved and return later when calling forward(). """ raise NotImplementedError() def forward(self, inputs: tp.Any) -> ConditionType: """Gets input that should be used as conditioning (e.g, genre, description or a waveform). Outputs a ConditionType, after the input data was embedded as a dense vector. Returns: ConditionType: - A tensor of size [B, T, D] where B is the batch size, T is the length of the output embedding and D is the dimension of the embedding. - And a mask indicating where the padding tokens. """ raise NotImplementedError() class TextConditioner(BaseConditioner): ... class LUTConditioner(TextConditioner): """Lookup table TextConditioner. Args: n_bins (int): Number of bins. dim (int): Hidden dim of the model (text-encoder/LUT). output_dim (int): Output dim of the conditioner. tokenizer (str): Name of the tokenizer. pad_idx (int, optional): Index for padding token. Defaults to 0. """ def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): super().__init__(dim, output_dim) self.embed = nn.Embedding(n_bins, dim) self.tokenizer: Tokenizer if tokenizer == 'whitespace': self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) elif tokenizer == 'noop': self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) else: raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: device = self.embed.weight.device tokens, mask = self.tokenizer(x) tokens, mask = tokens.to(device), mask.to(device) return tokens, mask def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: tokens, mask = inputs embeds = self.embed(tokens) embeds = self.output_proj(embeds) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class T5Conditioner(TextConditioner): """T5-based TextConditioner. Args: name (str): Name of the T5 model. output_dim (int): Output dim of the conditioner. finetune (bool): Whether to fine-tune T5 at train time. device (str): Device for T5 Conditioner. autocast_dtype (tp.Optional[str], optional): Autocast dtype. word_dropout (float, optional): Word dropout probability. normalize_text (bool, optional): Whether to apply text normalization. """ MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl", "google/flan-t5-xxl"] MODELS_DIMS = { "t5-small": 512, "t5-base": 768, "t5-large": 1024, "t5-3b": 1024, "t5-11b": 1024, "google/flan-t5-small": 512, "google/flan-t5-base": 768, "google/flan-t5-large": 1024, "google/flan-t5-3b": 1024, "google/flan-t5-11b": 1024, } def __init__(self, name: str, output_dim: int, finetune: bool, device: str, autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., normalize_text: bool = False): assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" super().__init__(self.MODELS_DIMS[name], output_dim) self.device = device self.name = name self.finetune = finetune self.word_dropout = word_dropout if autocast_dtype is None or self.device == 'cpu': self.autocast = TorchAutocast(enabled=False) if self.device != 'cpu': logger.warning("T5 has no autocast, this might lead to NaN") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # Let's disable logging temporarily because T5 will vomit some errors otherwise. # thanks https://gist.github.com/simon-weber/7853144 previous_level = logging.root.manager.disable logging.disable(logging.ERROR) with warnings.catch_warnings(): warnings.simplefilter("ignore") try: self.t5_tokenizer = T5Tokenizer.from_pretrained(name) t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) finally: logging.disable(previous_level) if finetune: self.t5 = t5 else: # this makes sure that the t5 models is not part # of the saved checkpoint self.__dict__['t5'] = t5.to(device) self.normalize_text = normalize_text if normalize_text: self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: # if current sample doesn't have a certain attribute, replace with empty string entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: WavCondition) -> WavCondition: """Apply WavConditioner tokenization and populate cache if needed.""" x = super().tokenize(x) no_undefined_paths = all(p is not None for p in x.path) if self.cache is not None and no_undefined_paths: paths = [Path(p) for p in x.path if p is not None] self.cache.populate_embed_cache(paths, x) return x class ChromaChordConditioner(ChromaStemConditioner): """Chord Chroma conditioner based on stems. The ChromaChordConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(output_dim = output_dim, sample_rate = sample_rate, n_chroma = n_chroma, radix2_exp = radix2_exp, duration = duration, match_len_on_eval = match_len_on_eval, eval_wavs = eval_wavs, n_eval_wavs = n_eval_wavs, cache_path = cache_path, device = device) self.winhop = self.chroma.winhop self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('bass'), stem_sources.index('other')]).to(device) self.chroma_len = self._get_chroma_len() self.bar2chromabin = self.sample_rate / self.winhop self.chroma = ChordExtractor(device = device, sample_rate=sample_rate, n_chroma=n_chroma, max_duration = duration, chroma_len = self.chroma_len, winhop = self.winhop).to(device)
self.chords = chords.Chords()
16
2023-10-09 09:52:24+00:00
16k
RVC-Project/Retrieval-based-Voice-Conversion
rvc/modules/vc/modules.py
[ { "identifier": "Config", "path": "rvc/configs/config.py", "snippet": "class Config:\n def __new__(cls):\n if not hasattr(cls, \"_instance\"):\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self):\n self.device: str = \"cuda:0\"\n self.is_half: bool = True\n self.use_jit: bool = False\n self.n_cpu: int = cpu_count()\n self.gpu_name: str | None = None\n self.json_config = self.load_config_json()\n self.gpu_mem: int | None = None\n self.instead: str | None = None\n (\n self.python_cmd,\n self.listen_port,\n self.noparallel,\n self.noautoopen,\n self.dml,\n ) = self.arg_parse()\n self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()\n\n @staticmethod\n def load_config_json() -> dict:\n return {\n config_file: json.load(open(config_file, \"r\"))\n for config_file in version_config_list\n }\n\n @staticmethod\n def arg_parse() -> tuple:\n parser: argparse.ArgumentParser = argparse.ArgumentParser()\n parser.add_argument(\"--port\", type=int, default=7865, help=\"Listen port\")\n parser.add_argument(\n \"--pycmd\",\n type=str,\n default=sys.executable or \"python\",\n help=\"Python command\",\n )\n parser.add_argument(\n \"--noparallel\", action=\"store_true\", help=\"Disable parallel processing\"\n )\n parser.add_argument(\n \"--noautoopen\",\n action=\"store_true\",\n help=\"Do not open in browser automatically\",\n )\n parser.add_argument(\n \"--dml\",\n action=\"store_true\",\n help=\"torch_dml\",\n )\n cmd_opts: argparse.Namespace = parser.parse_args()\n\n cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865\n\n return (\n cmd_opts.pycmd,\n cmd_opts.port,\n cmd_opts.noparallel,\n cmd_opts.noautoopen,\n cmd_opts.dml,\n )\n\n @staticmethod\n def has_mps() -> bool:\n return torch.backends.mps.is_available() and not torch.zeros(1).to(\n torch.device(\"mps\")\n )\n\n @staticmethod\n def has_xpu() -> bool:\n return hasattr(torch, \"xpu\") and torch.xpu.is_available()\n\n def use_fp32_config(self) -> None:\n for config_file, data in self.json_config.items():\n try:\n data[\"train\"][\"fp16_run\"] = False\n with open(config_file, \"w\") as json_file:\n json.dump(data, json_file, indent=4)\n except Exception as e:\n logger.info(f\"Error updating {config_file}: {str(e)}\")\n logger.info(\"overwrite configs.json\")\n\n def device_config(self) -> tuple:\n if torch.cuda.is_available():\n if self.has_xpu():\n self.device = self.instead = \"xpu:0\"\n self.is_half = True\n i_device = int(self.device.split(\":\")[-1])\n self.gpu_name = torch.cuda.get_device_name(i_device)\n if (\n (\"16\" in self.gpu_name and \"V100\" not in self.gpu_name.upper())\n or \"P40\" in self.gpu_name.upper()\n or \"P10\" in self.gpu_name.upper()\n or \"1060\" in self.gpu_name\n or \"1070\" in self.gpu_name\n or \"1080\" in self.gpu_name\n ):\n logger.info(f\"Found GPU {self.gpu_name}, force to fp32\")\n self.is_half = False\n self.use_fp32_config()\n else:\n logger.info(f\"Found GPU {self.gpu_name}\")\n self.gpu_mem = int(\n torch.cuda.get_device_properties(i_device).total_memory\n / 1024\n / 1024\n / 1024\n + 0.4\n )\n elif self.has_mps():\n logger.info(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"mps\"\n self.is_half = False\n self.use_fp32_config()\n elif self.dml:\n import torch_directml\n\n self.device = torch_directml.device(torch_directml.default_device())\n self.is_half = False\n else:\n logger.info(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"cpu\"\n self.is_half = False\n self.use_fp32_config()\n\n if self.gpu_mem is not None and self.gpu_mem <= 4:\n x_pad = 1\n x_query = 5\n x_center = 30\n x_max = 32\n elif self.is_half:\n # 6G PU_RAM conf\n x_pad = 3\n x_query = 10\n x_center = 60\n x_max = 65\n else:\n # 5G GPU_RAM conf\n x_pad = 1\n x_query = 6\n x_center = 38\n x_max = 41\n\n logger.info(f\"Use {self.dml or self.instead} instead\")\n logger.info(f\"is_half:{self.is_half}, device:{self.device}\")\n return x_pad, x_query, x_center, x_max" }, { "identifier": "load_audio", "path": "rvc/lib/audio.py", "snippet": "def load_audio(file, sr):\r\n if not os.path.exists(file):\r\n raise RuntimeError(\r\n \"You input a wrong audio path that does not exists, please fix it!\"\r\n )\r\n try:\r\n with open(file, \"rb\") as f:\r\n with BytesIO() as out:\r\n audio2(f, out, \"f32le\", sr)\r\n return np.frombuffer(out.getvalue(), np.float32).flatten()\r\n\r\n except AttributeError:\r\n audio = file[1] / 32768.0\r\n if len(audio.shape) == 2:\r\n audio = np.mean(audio, -1)\r\n return librosa.resample(audio, orig_sr=file[0], target_sr=16000)\r\n\r\n except Exception:\r\n raise RuntimeError(traceback.format_exc())\r" }, { "identifier": "wav2", "path": "rvc/lib/audio.py", "snippet": "def wav2(i, o, format):\r\n inp = av.open(i, \"rb\")\r\n if format == \"m4a\":\r\n format = \"mp4\"\r\n out = av.open(o, \"wb\", format=format)\r\n if format == \"ogg\":\r\n format = \"libvorbis\"\r\n if format == \"mp4\":\r\n format = \"aac\"\r\n\r\n ostream = out.add_stream(format)\r\n\r\n for frame in inp.decode(audio=0):\r\n for p in ostream.encode(frame):\r\n out.mux(p)\r\n\r\n for p in ostream.encode(None):\r\n out.mux(p)\r\n\r\n out.close()\r\n inp.close()\r" }, { "identifier": "SynthesizerTrnMs256NSFsid", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n pitchf: torch.Tensor,\n y: torch.Tensor,\n y_lengths: torch.Tensor,\n ds: Optional[torch.Tensor] = None,\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n assert isinstance(rate, torch.Tensor)\n head = int(z_p.shape[2] * (1 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs256NSFsid_nono", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid_nono", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "Pipeline", "path": "rvc/modules/vc/pipeline.py", "snippet": "class Pipeline(object):\n def __init__(self, tgt_sr, config):\n self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (\n config.x_pad,\n config.x_query,\n config.x_center,\n config.x_max,\n config.is_half,\n )\n self.sr = 16000 # hubert输入采样率\n self.window = 160 # 每帧点数\n self.t_pad = self.sr * self.x_pad # 每条前后pad时间\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # 查询切点前后查询时间\n self.t_center = self.sr * self.x_center # 查询切点位置\n self.t_max = self.sr * self.x_max # 免查询时长阈值\n self.device = config.device\n\n def get_f0(\n self,\n input_audio_path,\n x,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0=None,\n ):\n global input_audio_path2wav\n time_step = self.window / self.sr * 1000\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n if f0_method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif f0_method == \"harvest\":\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"crepe\":\n model = \"full\"\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n elif f0_method == \"rmvpe\":\n if not hasattr(self, \"model_rmvpe\"):\n from rvc.lib.rmvpe import RMVPE\n\n logger.info(\n \"Loading rmvpe model,%s\" % \"%s/rmvpe.pt\" % os.environ[\"rmvpe_root\"]\n )\n self.model_rmvpe = RMVPE(\n \"%s/rmvpe.pt\" % os.environ[\"rmvpe_root\"],\n is_half=self.is_half,\n device=self.device,\n )\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n\n if \"privateuseone\" in str(self.device): # clean ortruntime memory\n del self.model_rmvpe.model\n del self.model_rmvpe\n logger.info(\"Cleaning ortruntime memory\")\n\n f0 *= pow(2, f0_up_key / 12)\n # with open(\"test.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n tf0 = self.sr // self.window # 每秒f0点数\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n # with open(\"test_opt.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(np.int32)\n return f0_coarse, f0bak # 1-0\n\n def vc(\n self,\n model,\n net_g,\n sid,\n audio0,\n pitch,\n pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n ): # ,file_index,file_big_npy\n feats = torch.from_numpy(audio0)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)\n\n inputs = {\n \"source\": feats.to(self.device),\n \"padding_mask\": padding_mask,\n \"output_layer\": 9 if version == \"v1\" else 12,\n }\n t0 = ttime()\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n feats = model.final_proj(logits[0]) if version == \"v1\" else logits[0]\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = feats.clone()\n if (\n not isinstance(index, type(None))\n and not isinstance(big_npy, type(None))\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n # _, I = index.search(npy, 1)\n # npy = big_npy[I.squeeze()]\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(\n 0, 2, 1\n )\n t1 = ttime()\n p_len = audio0.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch is not None and pitchf is not None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n\n if protect < 0.5 and pitch is not None and pitchf is not None:\n pitchff = pitchf.clone()\n pitchff[pitchf > 0] = 1\n pitchff[pitchf < 1] = protect\n pitchff = pitchff.unsqueeze(-1)\n feats = feats * pitchff + feats0 * (1 - pitchff)\n feats = feats.to(feats0.dtype)\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n hasp = pitch is not None and pitchf is not None\n arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)\n audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()\n del hasp, arg\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = ttime()\n times[\"npy\"] += t1 - t0\n times[\"infer\"] += t2 - t1\n return audio1\n\n def pipeline(\n self,\n model,\n net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n index_rate,\n if_f0,\n filter_radius,\n tgt_sr,\n resample_sr,\n rms_mix_rate,\n version,\n protect,\n f0_file=None,\n ):\n if (\n file_index\n and file_index != \"\"\n # and file_big_npy != \"\"\n # and os.path.exists(file_big_npy) == True\n and os.path.exists(file_index)\n and index_rate != 0\n ):\n try:\n index = faiss.read_index(file_index)\n # big_npy = np.load(file_big_npy)\n big_npy = index.reconstruct_n(0, index.ntotal)\n except:\n traceback.print_exc()\n index = big_npy = None\n else:\n index = big_npy = None\n audio = signal.filtfilt(bh, ah, audio)\n audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += np.abs(audio_pad[i : i - self.window])\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n audio_sum[t - self.t_query : t + self.t_query]\n == audio_sum[t - self.t_query : t + self.t_query].min()\n )[0][0]\n )\n s = 0\n audio_opt = []\n t = None\n t1 = ttime()\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\"):\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n input_audio_path,\n audio_pad,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0,\n )\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if \"mps\" not in str(self.device) or \"xpu\" not in str(self.device):\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()\n t2 = ttime()\n times[\"f0\"] += t2 - t1\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n pitch[:, s // self.window : (t + self.t_pad2) // self.window],\n pitchf[:, s // self.window : (t + self.t_pad2) // self.window],\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window :] if t is not None else pitch,\n pitchf[:, t // self.window :] if t is not None else pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n if rms_mix_rate != 1:\n audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)\n if tgt_sr != resample_sr >= 16000:\n audio_opt = librosa.resample(\n audio_opt, orig_sr=tgt_sr, target_sr=resample_sr\n )\n audio_max = np.abs(audio_opt).max() / 0.99\n max_int16 = 32768\n if audio_max > 1:\n max_int16 /= audio_max\n audio_opt = (audio_opt * max_int16).astype(np.int16)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt" } ]
import logging import os import traceback import numpy as np import soundfile as sf import torch from collections import OrderedDict from io import BytesIO from pathlib import Path from rvc.configs.config import Config from rvc.lib.audio import load_audio, wav2 from rvc.lib.infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from rvc.modules.vc.pipeline import Pipeline from rvc.modules.vc.utils import *
13,121
self.net_g = ( self.net_g.half() if self.config.is_half else self.net_g.float() ) self.pipeline = Pipeline(self.tgt_sr, self.config) self.n_spk = self.cpt["config"][-3] index = get_index_path_from_model(sid) logger.info("Select index: " + index) return self.n_spk, return_protect, index def vc_single( self, sid: int, input_audio_path: Path, f0_up_key: int = 0, f0_method: str = "rmvpe", f0_file: Path | None = None, index_file: Path | None = None, index_rate: float = 0.75, filter_radius: int = 3, resample_sr: int = 0, rms_mix_rate: float = 0.25, protect: float = 0.33, hubert_path: str | None = None, ): hubert_path = os.getenv("hubert_path") if not hubert_path else hubert_path try: audio = load_audio(input_audio_path, 16000) audio_max = np.abs(audio).max() / 0.95 if audio_max > 1: audio /= audio_max times = {"npy": 0, "f0": 0, "infer": 0} if self.hubert_model is None: self.hubert_model = load_hubert(self.config, hubert_path) audio_opt = self.pipeline.pipeline( self.hubert_model, self.net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, index_file, index_rate, self.if_f0, filter_radius, self.tgt_sr, resample_sr, rms_mix_rate, self.version, protect, f0_file, ) tgt_sr = resample_sr if self.tgt_sr != resample_sr >= 16000 else self.tgt_sr return tgt_sr, audio_opt, times, None except Exception: info = traceback.format_exc() logger.warning(info) return None, None, None, info def vc_multi( self, sid: int, paths: list, opt_root: Path, f0_up_key: int = 0, f0_method: str = "rmvpe", f0_file: Path | None = None, index_file: Path | None = None, index_rate: float = 0.75, filter_radius: int = 3, resample_sr: int = 0, rms_mix_rate: float = 0.25, protect: float = 0.33, output_format: str = "wav", hubert_path: str | None = None, ): try: os.makedirs(opt_root, exist_ok=True) paths = [path.name for path in paths] infos = [] for path in paths: tgt_sr, audio_opt, _, info = self.vc_single( sid, Path(path), f0_up_key, f0_method, f0_file, index_file, index_rate, filter_radius, resample_sr, rms_mix_rate, protect, hubert_path, ) if info: try: if output_format in ["wav", "flac"]: sf.write( f"{opt_root}/{os.path.basename(path)}.{output_format}", audio_opt, tgt_sr, ) else: with BytesIO() as wavf: sf.write(wavf, audio_opt, tgt_sr, format="wav") wavf.seek(0, 0) with open( f"{opt_root}/{os.path.basename(path)}.{output_format}", "wb", ) as outf:
logger: logging.Logger = logging.getLogger(__name__) class VC: def __init__(self): self.n_spk: any = None self.tgt_sr: int | None = None self.net_g = None self.pipeline: Pipeline | None = None self.cpt: OrderedDict | None = None self.version: str | None = None self.if_f0: int | None = None self.version: str | None = None self.hubert_model: any = None self.config = Config() def get_vc(self, sid: str, *to_return_protect: int): logger.info("Get sid: " + sid) return_protect = [ to_return_protect[0] if self.if_f0 != 0 and to_return_protect else 0.5, to_return_protect[1] if self.if_f0 != 0 and to_return_protect else 0.33, ] person = f'{os.getenv("weight_root")}/{sid}' logger.info(f"Loading: {person}") self.cpt = torch.load(person, map_location="cpu") self.tgt_sr = self.cpt["config"][-1] self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk self.if_f0 = self.cpt.get("f0", 1) self.version = self.cpt.get("version", "v1") synthesizer_class = { ("v1", 1): SynthesizerTrnMs256NSFsid, ("v1", 0): SynthesizerTrnMs256NSFsid_nono, ("v2", 1): SynthesizerTrnMs768NSFsid, ("v2", 0): SynthesizerTrnMs768NSFsid_nono, } self.net_g = synthesizer_class.get( (self.version, self.if_f0), SynthesizerTrnMs256NSFsid )(*self.cpt["config"], is_half=self.config.is_half) del self.net_g.enc_q if sid == "" or []: logger.info("Clean model cache") del (self.hubert_model, self.tgt_sr, self.net_g) (self.net_g) = self.n_spk = index = None else: self.net_g.load_state_dict(self.cpt["weight"], strict=False) self.net_g.eval().to(self.config.device) self.net_g = ( self.net_g.half() if self.config.is_half else self.net_g.float() ) self.pipeline = Pipeline(self.tgt_sr, self.config) self.n_spk = self.cpt["config"][-3] index = get_index_path_from_model(sid) logger.info("Select index: " + index) return self.n_spk, return_protect, index def vc_single( self, sid: int, input_audio_path: Path, f0_up_key: int = 0, f0_method: str = "rmvpe", f0_file: Path | None = None, index_file: Path | None = None, index_rate: float = 0.75, filter_radius: int = 3, resample_sr: int = 0, rms_mix_rate: float = 0.25, protect: float = 0.33, hubert_path: str | None = None, ): hubert_path = os.getenv("hubert_path") if not hubert_path else hubert_path try: audio = load_audio(input_audio_path, 16000) audio_max = np.abs(audio).max() / 0.95 if audio_max > 1: audio /= audio_max times = {"npy": 0, "f0": 0, "infer": 0} if self.hubert_model is None: self.hubert_model = load_hubert(self.config, hubert_path) audio_opt = self.pipeline.pipeline( self.hubert_model, self.net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, index_file, index_rate, self.if_f0, filter_radius, self.tgt_sr, resample_sr, rms_mix_rate, self.version, protect, f0_file, ) tgt_sr = resample_sr if self.tgt_sr != resample_sr >= 16000 else self.tgt_sr return tgt_sr, audio_opt, times, None except Exception: info = traceback.format_exc() logger.warning(info) return None, None, None, info def vc_multi( self, sid: int, paths: list, opt_root: Path, f0_up_key: int = 0, f0_method: str = "rmvpe", f0_file: Path | None = None, index_file: Path | None = None, index_rate: float = 0.75, filter_radius: int = 3, resample_sr: int = 0, rms_mix_rate: float = 0.25, protect: float = 0.33, output_format: str = "wav", hubert_path: str | None = None, ): try: os.makedirs(opt_root, exist_ok=True) paths = [path.name for path in paths] infos = [] for path in paths: tgt_sr, audio_opt, _, info = self.vc_single( sid, Path(path), f0_up_key, f0_method, f0_file, index_file, index_rate, filter_radius, resample_sr, rms_mix_rate, protect, hubert_path, ) if info: try: if output_format in ["wav", "flac"]: sf.write( f"{opt_root}/{os.path.basename(path)}.{output_format}", audio_opt, tgt_sr, ) else: with BytesIO() as wavf: sf.write(wavf, audio_opt, tgt_sr, format="wav") wavf.seek(0, 0) with open( f"{opt_root}/{os.path.basename(path)}.{output_format}", "wb", ) as outf:
wav2(wavf, outf, output_format)
2
2023-10-14 09:52:31+00:00
16k
zhijie-group/LOVECon
video_diffusion/pipelines/stable_diffusion_controlnet.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n mid_block_type: str = \"UNetMidBlockPseudo3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n **kwargs\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n \n # input\n self.conv_in = PseudoConv3d(in_channels, block_out_channels[0], \n kernel_size=3, padding=(1, 1), model_config=kwargs)\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n kwargs_copy=copy.deepcopy(kwargs)\n temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n and (not is_final_block))\n kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n # kwargs_copy.update({'SparseCausalAttention_index': temporal_downsample_i} )\n if temporal_downsample_i:\n print(f'Initialize model temporal downsample at layer {i}')\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n # mid\n if mid_block_type == \"UNetMidBlockPseudo3DCrossAttn\":\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n model_config=kwargs\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n \n kwargs_copy=copy.deepcopy(kwargs)\n kwargs_copy.update({'temporal_downsample': \n i < (self.temporal_downsample_time-1)})\n if i < (self.temporal_downsample_time-1):\n print(f'Initialize model temporal updample at layer {i}')\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n self.conv_out = PseudoConv3d(block_out_channels[0], out_channels, \n kernel_size=3, padding=1, model_config=kwargs)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = (\n num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n )\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(\n module,\n (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D, CrossAttnUpBlockPseudo3D, UpBlockPseudo3D),\n ):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None, # None\n attention_mask: Optional[torch.Tensor] = None, # None\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNetPseudo3DConditionOutput, Tuple]:\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None: # None\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample: # False\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n new_down_block_res_samples += (down_block_res_sample + down_block_additional_residual,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n # for i in down_block_res_samples: print(i.shape) \n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n \n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n )\n # 6. post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNetPseudo3DConditionOutput(sample=sample)\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n config[\"up_block_types\"] = [convert_2d_to_3d_block(block) for block in config[\"up_block_types\"]]\n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "ControlNetPseudo3DModel", "path": "video_diffusion/models/controlnet_3d_condition.py", "snippet": "class ControlNetPseudo3DModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n **kwargs\n ):\n super().__init__()\n\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n # self.conv_in = PseudoConv3d(\n # in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n # )\n self.conv_in = InflatedConv3d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetPseudo3DConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n #non temperal \n # kwargs_copy=copy.deepcopy(kwargs)\n # temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n # and (not is_final_block))\n # kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n # model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n # controlnet_block = PseudoConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n # model_config=kwargs\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetPseudo3DOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n \n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb)\n\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n # print(sample.shape,controlnet_cond.shape)\n sample += controlnet_cond\n \n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetPseudo3DOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, control_temporal_idx=None, control_mid_temporal=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\"\n ]\n # config[\"control_temporal_idx\"] = control_temporal_idx\n # config[\"control_mid_temporal\"] = control_mid_temporal\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n\n state_dict = torch.load(model_file, map_location=\"cpu\")\n for k, v in model.state_dict().items():\n if '_temp.' in k:\n if 'conv' in k:\n state_dict.update({k: v})\n else:\n copyk = k\n copyk = copyk.replace('_temp.', '1.')\n state_dict.update({k: state_dict[copyk]})\n model.load_state_dict(state_dict)\n\n return model\n\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n \n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "attention_util", "path": "video_diffusion/prompt_attention/attention_util.py", "snippet": "class EmptyControl:\nclass AttentionControlEdit(AttentionStore, abc.ABC):\nclass AttentionReplace(AttentionControlEdit):\nclass AttentionRefine(AttentionControlEdit):\nclass AttentionReweight(AttentionControlEdit):\n def step_callback(self, x_t):\n def between_steps(self):\n def __call__(self, attn, is_cross: bool, place_in_unet: str):\n def step_callback(self, x_t):\n def replace_self_attention(self, attn_base, att_replace, reshaped_mask=None):\n def replace_cross_attention(self, attn_base, att_replace):\n def update_attention_position_dict(self, current_attention_key):\n def forward(self, attn, is_cross: bool, place_in_unet: str):\n def between_steps(self):\n def __init__(self, prompts, num_steps: int,\n cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],\n self_replace_steps: Union[float, Tuple[float, float]],\n latent_blend: Optional[SpatialBlender], tokenizer=None, \n additional_attention_store: AttentionStore =None,\n use_inversion_attention: bool=False,\n attention_blend: SpatialBlender= None,\n save_self_attention: bool=True,\n disk_store=False\n ):\n def replace_cross_attention(self, attn_base, att_replace):\n def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float,\n latent_blend: Optional[SpatialBlender] = None, tokenizer=None,\n additional_attention_store=None,\n use_inversion_attention = False,\n attention_blend: SpatialBlender=None,\n save_self_attention: bool = True,\n disk_store=False):\n def replace_cross_attention(self, attn_base, att_replace):\n def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float,\n latent_blend: Optional[SpatialBlender] = None, tokenizer=None,\n additional_attention_store=None,\n use_inversion_attention = False,\n attention_blend: SpatialBlender=None,\n save_self_attention : bool=True,\n disk_store = False\n ):\n def replace_cross_attention(self, attn_base, att_replace):\n def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, equalizer,\n latent_blend: Optional[SpatialBlender] = None, controller: Optional[AttentionControlEdit] = None, tokenizer=None,\n additional_attention_store=None,\n use_inversion_attention = False,\n attention_blend: SpatialBlender=None,\n save_self_attention:bool = True,\n disk_store = False\n ):\ndef get_equalizer(text: str, word_select: Union[int, Tuple[int, ...]], values: Union[List[float],\n Tuple[float, ...]], tokenizer=None):\ndef make_controller(tokenizer, prompts: List[str], is_replace_controller: bool,\n cross_replace_steps: Dict[str, float], self_replace_steps: float=0.0, \n blend_words=None, equilizer_params=None, \n additional_attention_store=None, use_inversion_attention = False, blend_th: float=(0.3, 0.3),\n NUM_DDIM_STEPS=None,\n blend_latents = False,\n blend_self_attention=False,\n save_path = None,\n save_self_attention = True,\n disk_store = False\n ) -> AttentionControlEdit:" } ]
import inspect import os, sys import PIL import torch import numpy as np import json import diffusers import bitsandbytes from dataclasses import dataclass from typing import Callable, List, Optional, Union,Dict,Any from einops import rearrange from tqdm import trange, tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from ..models.unet_3d_condition import UNetPseudo3DConditionModel from ..models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.prompt_attention import attention_util from accelerate import cpu_offload
11,273
# code mostly taken from https://github.com/huggingface/diffusers logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SpatioTemporalStableDiffusionControlnetPipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using Spatio-Temporal Stable Diffusion. """ _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
# code mostly taken from https://github.com/huggingface/diffusers logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SpatioTemporalStableDiffusionControlnetPipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using Spatio-Temporal Stable Diffusion. """ _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNetPseudo3DConditionModel,
0
2023-10-09 14:38:28+00:00
16k
mlpc-ucsd/MaskCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "maskclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # add MaskCLIP configs\n cfg.MODEL.CLIP_MODEL = CN()\n cfg.MODEL.CLIP_MODEL.NAME = 'ViT-L/14@336px'\n cfg.MODEL.CLIP_MODEL.INPUT_RESOLUTION = 336\n cfg.MODEL.CLIP_MODEL.PATCH_SIZE = 14\n cfg.MODEL.CLIP_MODEL.WIDTH = 1024\n cfg.MODEL.CLIP_MODEL.LAYERS = 24\n cfg.MODEL.CLIP_MODEL.HEADS = 16\n cfg.MODEL.CLIP_MODEL.OUTPUT_DIM = 768\n\n cfg.MODEL.CLIP_MODEL.TEMPERATURE = 0.01" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "maskclip/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "maskclip/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "maskclip/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n\n # # Build augmentation\n # augs = [\n # T.ResizeShortestEdge(\n # cfg.INPUT.MIN_SIZE_TRAIN,\n # cfg.INPUT.MAX_SIZE_TRAIN,\n # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n # )\n # ]\n # if cfg.INPUT.CROP.ENABLED:\n # augs.append(\n # T.RandomCrop(\n # cfg.INPUT.CROP.TYPE,\n # cfg.INPUT.CROP.SIZE,\n # )\n # )\n # if cfg.INPUT.COLOR_AUG_SSD:\n # augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n # augs.append(T.RandomFlip())\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "maskclip/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "maskclip/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n\n # Build augmentation\n # augs = [\n # T.ResizeShortestEdge(\n # cfg.INPUT.MIN_SIZE_TRAIN,\n # cfg.INPUT.MAX_SIZE_TRAIN,\n # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n # )\n # ]\n # if cfg.INPUT.CROP.ENABLED:\n # augs.append(\n # T.RandomCrop_CategoryAreaConstraint(\n # cfg.INPUT.CROP.TYPE,\n # cfg.INPUT.CROP.SIZE,\n # cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n # cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n # )\n # )\n # if cfg.INPUT.COLOR_AUG_SSD:\n # augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n # augs.append(T.RandomFlip())\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "maskclip/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "maskclip/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from maskclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
12,665
mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if not 'added_params' in module_name: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...") model = SemanticSegmentorWithTTA(cfg, model) evaluators = [ cls.build_evaluator( cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") ) for name in cfg.DATASETS.TEST ] res = cls.test(cfg, model, evaluators) res = OrderedDict({k + "_TTA": v for k, v in res.items()}) return res def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if not 'added_params' in module_name: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...") model = SemanticSegmentorWithTTA(cfg, model) evaluators = [ cls.build_evaluator( cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") ) for name in cfg.DATASETS.TEST ] res = cls.test(cfg, model, evaluators) res = OrderedDict({k + "_TTA": v for k, v in res.items()}) return res def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg)
add_maskformer2_config(cfg)
0
2023-10-13 02:32:25+00:00
16k
mlpc-ucsd/MasQCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "masqclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75" }, { "identifier": "add_masqclip_config", "path": "masqclip/config.py", "snippet": "def add_masqclip_config(cfg):\n \"\"\"\n Add config for MasQCLIP.\n \"\"\"\n cfg.MODEL.MASQ_CLIP = CN()\n cfg.MODEL.MASQ_CLIP.MODEL_NAME = [\"ViT-L/14@336px\"]\n \n cfg.MODEL.MASQ_CLIP.SCORE_THRESHOLD = 0.8\n cfg.MODEL.MASQ_CLIP.NMS_THRESHOLD = 0.1" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "masqclip/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "masqclip/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "masqclip/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # change_code_note\n\n # # Build augmentation\n # augs = [\n # T.ResizeShortestEdge(\n # cfg.INPUT.MIN_SIZE_TRAIN,\n # cfg.INPUT.MAX_SIZE_TRAIN,\n # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n # )\n # ]\n # if cfg.INPUT.CROP.ENABLED:\n # augs.append(\n # T.RandomCrop(\n # cfg.INPUT.CROP.TYPE,\n # cfg.INPUT.CROP.SIZE,\n # )\n # )\n # if cfg.INPUT.COLOR_AUG_SSD:\n # augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n # augs.append(T.RandomFlip())\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "masqclip/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "masqclip/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "masqclip/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "masqclip/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
import copy import itertools import logging import os import torch import detectron2.utils.comm as comm import warnings from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from masqclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, add_masqclip_config, )
11,257
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MasQCLIP Training Script. """ # MasQCLIP warnings.filterwarnings("ignore") class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MasQCLIP Training Script. """ # MasQCLIP warnings.filterwarnings("ignore") class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
4
2023-10-13 02:43:53+00:00
16k
ielab/llm-rankers
run.py
[ { "identifier": "SearchResult", "path": "rankers/rankers.py", "snippet": "class SearchResult:\n docid: str\n score: float\n text: str" }, { "identifier": "PointwiseLlmRanker", "path": "rankers/pointwise.py", "snippet": "class PointwiseLlmRanker(LlmRanker):\n\n def __init__(self, model_name_or_path, tokenizer_name_or_path, device, method=\"qlm\", batch_size=1, cache_dir=None):\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path,\n cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n\n self.device = device\n self.method = method\n self.batch_size = batch_size\n\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n if self.method == \"qlm\":\n prompt = \"Passage: {text}\\nPlease write a question based on this passage.\"\n data = [prompt.format(text=doc.text) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n\n labels = self.tokenizer.encode(f\"<pad> {query}\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.llm.device).repeat(self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_labels = labels if labels.shape[0] == len(batch_inputs['input_ids']) \\\n else labels[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n self.total_prompt_tokens += batch_labels.shape[0] * batch_labels.shape[\n 1] # we count decoder inputs as part of prompt.\n\n batch_inputs = batch_inputs.to(self.llm.device)\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n labels=batch_labels).logits\n\n loss_fct = torch.nn.CrossEntropyLoss(reduction=\"none\")\n scores = loss_fct(logits.view(-1, logits.size(-1)), batch_labels.view(-1))\n scores = -1 * scores.view(-1, batch_labels.size(-1)).sum(dim=1) # neg log prob\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n elif self.method == \"yes_no\":\n prompt = \"Passage: {text}\\nQuery: {query}\\nDoes the passage answer the query? Answer 'Yes' or 'No'\"\n yes_id = self.tokenizer.encode(\"Yes\", add_special_tokens=False)[0]\n no_id = self.tokenizer.encode(\"No\", add_special_tokens=False)[0]\n data = [prompt.format(text=doc.text, query=query) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n decoder_input_ids = torch.Tensor([self.tokenizer.pad_token_id]).to(self.llm.device, dtype=torch.long).repeat(self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_inputs = batch_inputs.to(self.llm.device)\n\n batch_decoder_input_ids = decoder_input_ids if decoder_input_ids.shape[0] == len(batch_inputs['input_ids']) \\\n else decoder_input_ids[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n\n self.total_prompt_tokens += batch_decoder_input_ids.shape[0] * batch_decoder_input_ids.shape[\n 1]\n\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n decoder_input_ids=batch_decoder_input_ids).logits\n yes_scores = logits[:, :, yes_id]\n no_scores = logits[:, :, no_id]\n batch_scores = torch.cat((yes_scores, no_scores), dim=1)\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n scores = batch_scores[:, 0]\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n ranking = sorted(ranking, key=lambda x: x.score, reverse=True)\n return ranking\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "MonoT5LlmRanker", "path": "rankers/pointwise.py", "snippet": "class MonoT5LlmRanker(PointwiseLlmRanker):\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n prompt = \"Query: {query} Document: {document} Relevant:\"\n data = [prompt.format(query=query, document=doc.text) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n decoder_input_ids = torch.Tensor([self.llm.config.decoder_start_token_id]).to(self.llm.device, dtype=torch.long).repeat(\n self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_inputs = batch_inputs.to(self.llm.device)\n\n batch_decoder_input_ids = decoder_input_ids if decoder_input_ids.shape[0] == len(\n batch_inputs['input_ids']) \\\n else decoder_input_ids[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n\n self.total_prompt_tokens += batch_decoder_input_ids.shape[0] * batch_decoder_input_ids.shape[\n 1]\n\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n decoder_input_ids=batch_decoder_input_ids).logits\n\n # 6136 and 1176 are the indexes of the tokens false and true in T5.\n batch_scores = logits[:, 0, [6136, 1176]]\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n scores = batch_scores[:, 1]\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n ranking = sorted(ranking, key=lambda x: x.score, reverse=True)\n return ranking" }, { "identifier": "SetwiseLlmRanker", "path": "rankers/setwise.py", "snippet": "class SetwiseLlmRanker(LlmRanker):\n CHARACTERS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\",\n \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\"] # \"Passage X\" and \"Passage Y\" will be tokenized into 3 tokens, so we dont use for now\n\n def __init__(self,\n model_name_or_path,\n tokenizer_name_or_path,\n device,\n num_child=3,\n k=10,\n scoring='generation',\n method=\"heapsort\",\n num_permutation=1,\n cache_dir=None):\n\n self.device = device\n self.num_child = num_child\n self.num_permutation = num_permutation\n self.k = k\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path,\n cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) if self.tokenizer else None\n\n test = []\n for i in range(len(self.CHARACTERS)):\n test.append(f'<pad> Passage {self.CHARACTERS[i]}')\n\n self.target_token_ids = self.tokenizer.batch_encode_plus([f'<pad> Passage {self.CHARACTERS[i]}'\n for i in range(len(self.CHARACTERS))],\n return_tensors=\"pt\",\n add_special_tokens=False,\n padding=True).input_ids[:, -1]\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n\n self.scoring = scoring\n self.method = method\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1 if self.num_permutation == 1 else self.num_permutation\n\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:'\n\n if self.scoring == 'generation':\n if self.config.model_type == 't5':\n\n if self.num_permutation == 1:\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids,\n max_new_tokens=2)[0]\n\n self.total_completion_tokens += output_ids.shape[0]\n\n output = self.tokenizer.decode(output_ids,\n skip_special_tokens=True).strip()\n output = output[-1]\n else:\n id_passage = [(i, p) for i, p in enumerate(docs)]\n labels = [self.CHARACTERS[i] for i in range(len(docs))]\n batch_data = []\n for _ in range(self.num_permutation):\n batch_data.append([random.sample(id_passage, len(id_passage)),\n random.sample(labels, len(labels))])\n\n batch_ref = []\n input_text = []\n for batch in batch_data:\n ref = []\n passages = []\n characters = []\n for p, c in zip(batch[0], batch[1]):\n ref.append(p[0])\n passages.append(p[1].text)\n characters.append(c)\n batch_ref.append((ref, characters))\n passages = \"\\n\\n\".join([f'Passage {characters[i]}: \"{passages[i]}\"' for i in range(len(passages))])\n input_text.append(f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:')\n\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1] * input_ids.shape[0]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids.repeat(input_ids.shape[0], 1),\n max_new_tokens=2)\n output = self.tokenizer.batch_decode(output_ids[:, self.decoder_input_ids.shape[1]:],\n skip_special_tokens=True)\n\n # vote\n candidates = []\n for ref, result in zip(batch_ref, output):\n result = result.strip().upper()\n docids, characters = ref\n if len(result) != 1 or result not in characters:\n print(f\"Unexpected output: {result}\")\n continue\n win_doc = docids[characters.index(result)]\n candidates.append(win_doc)\n\n if len(candidates) == 0:\n print(f\"Unexpected voting: {output}\")\n output = \"Unexpected voting.\"\n else:\n # handle tie\n candidate_counts = Counter(candidates)\n max_count = max(candidate_counts.values())\n most_common_candidates = [candidate for candidate, count in candidate_counts.items() if\n count == max_count]\n if len(most_common_candidates) == 1:\n output = self.CHARACTERS[most_common_candidates[0]]\n else:\n output = self.CHARACTERS[random.choice(most_common_candidates)]\n\n elif self.config.model_type == 'llama':\n conversation = [{\"role\": \"user\", \"content\": input_text}]\n\n prompt = self.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)\n prompt += \" Passage:\"\n\n input_ids = self.tokenizer(prompt, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n do_sample=False,\n temperature=0.0,\n top_p=None,\n max_new_tokens=1)[0]\n\n self.total_completion_tokens += output_ids.shape[0]\n\n output = self.tokenizer.decode(output_ids[input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n else:\n raise NotImplementedError\n\n elif self.scoring == 'likelihood':\n if self.config.model_type == 't5':\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n with torch.no_grad():\n logits = self.llm(input_ids=input_ids, decoder_input_ids=self.decoder_input_ids).logits[0][-1]\n distributions = torch.softmax(logits, dim=0)\n scores = distributions[self.target_token_ids[:len(docs)]]\n ranked = sorted(zip(self.CHARACTERS[:len(docs)], scores), key=lambda x: x[1], reverse=True)\n output = ranked[0][0]\n\n else:\n raise NotImplementedError\n\n if len(output) == 1 and output in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n\n return output\n\n def heapify(self, arr, n, i, query):\n # Find largest among root and children\n if self.num_child * i + 1 < n: # if there are children\n docs = [arr[i]] + arr[self.num_child * i + 1: min((self.num_child * (i + 1) + 1), n)]\n inds = [i] + list(range(self.num_child * i + 1, min((self.num_child * (i + 1) + 1), n)))\n output = self.compare(query, docs)\n try:\n best_ind = self.CHARACTERS.index(output)\n except ValueError:\n best_ind = 0\n try:\n largest = inds[best_ind]\n except IndexError:\n largest = i\n # If root is not largest, swap with largest and continue heapifying\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n self.heapify(arr, n, largest, query)\n\n def heapSort(self, arr, query, k):\n n = len(arr)\n ranked = 0\n # Build max heap\n for i in range(n // self.num_child, -1, -1):\n self.heapify(arr, n, i, query)\n for i in range(n - 1, 0, -1):\n # Swap\n arr[i], arr[0] = arr[0], arr[i]\n ranked += 1\n if ranked == k:\n break\n # Heapify root element\n self.heapify(arr, i, 0, query)\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"heapsort\":\n self.heapSort(ranking, query, self.k)\n ranking = list(reversed(ranking))\n\n # elif self.method == \"bubblesort\":\n # for i in range(k):\n # start_ind = len(ranking) - (self.num_child + 1)\n # end_ind = len(ranking)\n # while True:\n # if start_ind < i:\n # start_ind = i\n # output = self.compare(query, ranking[start_ind:end_ind])\n # try:\n # best_ind = self.CHARACTERS.index(output)\n # except ValueError:\n # best_ind = 0\n # if best_ind != 0:\n # ranking[start_ind], ranking[start_ind + best_ind] = ranking[start_ind + best_ind], ranking[start_ind]\n #\n # if start_ind == i:\n # break\n #\n # start_ind -= self.num_child\n # end_ind -= self.num_child\n elif self.method == \"bubblesort\":\n last_start = len(ranking) - (self.num_child + 1)\n\n for i in range(self.k):\n start_ind = last_start\n end_ind = last_start + (self.num_child + 1)\n is_change = False\n while True:\n if start_ind < i:\n start_ind = i\n output = self.compare(query, ranking[start_ind:end_ind])\n try:\n best_ind = self.CHARACTERS.index(output)\n except ValueError:\n best_ind = 0\n if best_ind != 0:\n ranking[start_ind], ranking[start_ind + best_ind] = ranking[start_ind + best_ind], ranking[start_ind]\n if not is_change:\n is_change = True\n if last_start != len(ranking) - (self.num_child + 1) \\\n and best_ind == len(ranking[start_ind:end_ind])-1:\n last_start += len(ranking[start_ind:end_ind])-1\n\n if start_ind == i:\n break\n\n if not is_change:\n last_start -= self.num_child\n\n start_ind -= self.num_child\n end_ind -= self.num_child\n\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n\n return results\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "OpenAiSetwiseLlmRanker", "path": "rankers/setwise.py", "snippet": "class OpenAiSetwiseLlmRanker(SetwiseLlmRanker):\n def __init__(self, model_name_or_path, api_key, num_child=3, method='heapsort', k=10):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.num_child = num_child\n self.method = method\n self.k = k\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n self.system_prompt = \"You are RankGPT, an intelligent assistant specialized in selecting the most relevant passage from a pool of passages based on their relevance to the query.\"\n openai.api_key = api_key\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage.'\n\n while True:\n try:\n response = openai.ChatCompletion.create(\n model=self.llm,\n messages=[\n {\"role\": \"system\", \"content\": self.system_prompt},\n {\"role\": \"user\", \"content\": input_text},\n ],\n temperature=0.0,\n request_timeout=15\n )\n\n self.total_completion_tokens += int(response['usage']['completion_tokens'])\n self.total_prompt_tokens += int(response['usage']['prompt_tokens'])\n\n output = response['choices'][0]['message']['content']\n matches = re.findall(r\"(Passage [A-Z])\", output, re.MULTILINE)\n if matches:\n output = matches[0][8]\n elif output.strip() in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n output = \"A\"\n return output\n\n except openai.error.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"OpenAI API returned an API Error: {e}\")\n time.sleep(5)\n continue\n except openai.error.APIConnectionError as e:\n # Handle connection error here\n print(f\"Failed to connect to OpenAI API: {e}\")\n time.sleep(5)\n continue\n except openai.error.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"OpenAI API request exceeded rate limit: {e}\")\n time.sleep(5)\n continue\n except openai.error.InvalidRequestError as e:\n # Handle invalid request error\n print(f\"OpenAI API request was invalid: {e}\")\n raise e\n except openai.error.AuthenticationError as e:\n # Handle authentication error\n print(f\"OpenAI API request failed authentication: {e}\")\n raise e\n except openai.error.Timeout as e:\n # Handle timeout error\n print(f\"OpenAI API request timed out: {e}\")\n time.sleep(5)\n continue\n except openai.error.ServiceUnavailableError as e:\n # Handle service unavailable error\n print(f\"OpenAI API request failed with a service unavailable error: {e}\")\n time.sleep(5)\n continue\n except Exception as e:\n print(f\"Unknown error: {e}\")\n raise e\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "PairwiseLlmRanker", "path": "rankers/pairwise.py", "snippet": "class PairwiseLlmRanker(LlmRanker):\n def __init__(self, model_name_or_path,\n tokenizer_name_or_path,\n device,\n method=\"allpair\",\n batch_size=2,\n k=10,\n cache_dir=None\n ):\n self.device = device\n self.method = method\n self.batch_size = batch_size\n self.k = k\n self.prompt = \"\"\"Given a query \"{query}\", which of the following two passages is more relevant to the query?\n\nPassage A: \"{doc1}\"\n\nPassage B: \"{doc2}\"\n\nOutput Passage A or Passage B:\"\"\"\n\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path, cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.llm.device)\n self.decoder_input_ids = self.decoder_input_ids.repeat(self.batch_size, 1)\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n\n self.tokenizer.pad_token = \"[PAD]\"\n self.tokenizer.padding_side = \"left\"\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n else:\n raise NotImplementedError\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n doc1, doc2 = docs[0], docs[1]\n input_texts = [self.prompt.format(query=query, doc1=doc1, doc2=doc2),\n self.prompt.format(query=query, doc1=doc2, doc2=doc1)]\n if self.config.model_type == 't5':\n input_ids = self.tokenizer(input_texts,\n padding='longest',\n return_tensors=\"pt\").input_ids.to(self.llm.device)\n\n self.total_prompt_tokens += input_ids.shape[0] * input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids,\n max_new_tokens=2)\n\n self.total_completion_tokens += output_ids.shape[0] * output_ids.shape[1]\n\n output = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n elif self.config.model_type == 'llama':\n conversation0 = [{\"role\": \"user\", \"content\": input_texts[0]}]\n conversation1 = [{\"role\": \"user\", \"content\": input_texts[1]}]\n\n prompt0 = self.tokenizer.apply_chat_template(conversation0, tokenize=False, add_generation_prompt=True)\n prompt0 += \" Passage:\"\n prompt1 = self.tokenizer.apply_chat_template(conversation1, tokenize=False, add_generation_prompt=True)\n prompt1 += \" Passage:\"\n\n input_ids = self.tokenizer([prompt0, prompt1], return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[0] * input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n do_sample=False,\n temperature=0.0,\n top_p=None,\n max_new_tokens=1)\n\n self.total_completion_tokens += output_ids.shape[0] * output_ids.shape[1]\n\n output0 = self.tokenizer.decode(output_ids[0][input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n output1 = self.tokenizer.decode(output_ids[1][input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n return [f'Passage {output0}', f'Passage {output1}']\n else:\n raise NotImplementedError\n\n return output\n\n def heapify(self, arr, n, i):\n # Find largest among root and children\n largest = i\n l = 2 * i + 1\n r = 2 * i + 2\n if l < n and arr[l] > arr[i]:\n largest = l\n\n if r < n and arr[r] > arr[largest]:\n largest = r\n\n # If root is not largest, swap with largest and continue heapifying\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n self.heapify(arr, n, largest)\n\n def heapSort(self, arr, k):\n n = len(arr)\n ranked = 0\n # Build max heap\n for i in range(n // 2, -1, -1):\n self.heapify(arr, n, i)\n for i in range(n - 1, 0, -1):\n # Swap\n arr[i], arr[0] = arr[0], arr[i]\n ranked += 1\n if ranked == k:\n break\n # Heapify root element\n self.heapify(arr, i, 0)\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"allpair\":\n doc_pairs = list(combinations(ranking, 2))\n allpairs = []\n for doc1, doc2 in tqdm(doc_pairs):\n allpairs.append(self.prompt.format(query=query, doc1=doc1.text, doc2=doc2.text))\n allpairs.append(self.prompt.format(query=query, doc1=doc2.text, doc2=doc1.text))\n\n allpairs_dataset = Text2TextGenerationDataset(allpairs, self.tokenizer)\n\n loader = DataLoader(\n allpairs_dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n\n outputs = []\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_outputs = self.llm.generate(batch_inputs['input_ids'].to(self.llm.device),\n decoder_input_ids=self.decoder_input_ids\n if self.decoder_input_ids.shape[0] == len(batch_inputs['input_ids'])\n else self.decoder_input_ids[:len(batch_inputs['input_ids']), :], # last batch might be smaller\n max_new_tokens=2)\n self.total_completion_tokens += batch_outputs.shape[0] * batch_outputs.shape[1]\n outputs.extend(batch_outputs.cpu().numpy())\n\n outputs = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)\n scores = defaultdict(float)\n for i in range(0, len(outputs), 2):\n doc1, doc2 = doc_pairs[i//2]\n output1 = outputs[i]\n output2 = outputs[i + 1]\n if output1 == \"Passage A\" and output2 == \"Passage B\":\n scores[doc1.docid] += 1\n elif output1 == \"Passage B\" and output2 == \"Passage A\":\n scores[doc2.docid] += 1\n else: # conflict\n scores[doc1.docid] += 0.5\n scores[doc2.docid] += 0.5\n\n ranking = sorted([SearchResult(docid=docid, score=score, text=None) for docid, score in scores.items()],\n key=lambda x: x.score, reverse=True)\n\n elif self.method == \"heapsort\":\n class ComparableDoc:\n def __init__(self, docid, text, ranker):\n self.docid = docid\n self.text = text\n self.ranker = ranker\n\n def __gt__(self, other):\n out = self.ranker.compare(query, [self.text, other.text])\n if out[0] == \"Passage A\" and out[1] == \"Passage B\":\n return True\n else:\n return False\n\n arr = [ComparableDoc(docid=doc.docid, text=doc.text, ranker=self) for doc in ranking]\n self.heapSort(arr, self.k)\n ranking = [SearchResult(docid=doc.docid, score=-i, text=None) for i, doc in enumerate(reversed(arr))]\n\n #\n # elif self.method == \"bubblesort\":\n # k = min(k, len(ranking))\n # for i in range(k):\n # current_ind = len(ranking) - 1\n # while True:\n # if current_ind == i:\n # break\n # doc1 = ranking[current_ind]\n # doc2 = ranking[current_ind - 1]\n # output = self.compare(query, [doc1.text, doc2.text])\n # if output[0] == \"Passage A\" and output[1] == \"Passage B\":\n # ranking[current_ind - 1], ranking[current_ind] = ranking[current_ind], ranking[current_ind - 1]\n # current_ind -= 1\n elif self.method == \"bubblesort\":\n k = min(self.k, len(ranking))\n\n last_end = len(ranking) - 1\n for i in range(k):\n current_ind = last_end\n is_change = False\n while True:\n if current_ind <= i:\n break\n doc1 = ranking[current_ind]\n doc2 = ranking[current_ind - 1]\n output = self.compare(query, [doc1.text, doc2.text])\n if output[0] == \"Passage A\" and output[1] == \"Passage B\":\n ranking[current_ind - 1], ranking[current_ind] = ranking[current_ind], ranking[current_ind - 1]\n\n if not is_change:\n is_change = True\n if last_end != len(ranking) - 1: # skip unchanged pairs at the bottom\n last_end += 1\n if not is_change:\n last_end -= 1\n current_ind -= 1\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n return results\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "DuoT5LlmRanker", "path": "rankers/pairwise.py", "snippet": "class DuoT5LlmRanker(PairwiseLlmRanker):\n def compare(self, query: str, docs: List[str]) -> bool:\n self.total_compare += 1\n self.prompt = 'Query: {query} Document0: {doc1} Document1: {doc2} Relevant:'\n\n inputs = [self.prompt.format(query=query, doc1=docs[0], doc2=docs[1]),\n self.prompt.format(query=query, doc1=docs[1], doc2=docs[0])]\n inputs = self.tokenizer(inputs, padding=True, truncation=True, return_tensors=\"pt\").to(self.llm.device)\n decode_ids = torch.full((2, 1),\n self.llm.config.decoder_start_token_id,\n dtype=torch.long, device=self.llm.device)\n\n self.total_prompt_tokens += inputs['input_ids'].shape[0] * inputs['input_ids'].shape[1]\n\n with torch.no_grad():\n logits = self.llm(input_ids=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n decoder_input_ids=decode_ids).logits\n # 6136 and 1176 are the indexes of the tokens false and true in T5.\n batch_scores = logits[:, 0, [6136, 1176]]\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n batch_probs = batch_scores[:, 1]\n return batch_probs[0] > batch_probs[1]\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"heapsort\":\n class ComparableDoc:\n def __init__(self, docid, text, ranker):\n self.docid = docid\n self.text = text\n self.ranker = ranker\n\n def __gt__(self, other):\n return self.ranker.compare(query, [self.text, other.text])\n arr = [ComparableDoc(docid=doc.docid, text=doc.text, ranker=self) for doc in ranking]\n self.heapSort(arr, self.k)\n ranking = [SearchResult(docid=doc.docid, score=-i, text=None) for i, doc in enumerate(reversed(arr))]\n\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n return results" }, { "identifier": "OpenAiPairwiseLlmRanker", "path": "rankers/pairwise.py", "snippet": "class OpenAiPairwiseLlmRanker(PairwiseLlmRanker):\n def __init__(self,\n model_name_or_path,\n api_key,\n method=\"heapsort\",\n batch_size=2,\n k=10):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.method = method\n self.k = k\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n self.CHARACTERS = [\"A\", \"B\"]\n self.system_prompt = \"You are RankGPT, an intelligent assistant specialized in selecting the most relevant passage from a pair of passages based on their relevance to the query.\"\n self.prompt = \"\"\"Given a query \"{query}\", which of the following two passages is more relevant to the query?\n \nPassage A: \"{doc1}\"\n\nPassage B: \"{doc2}\"\n\nOutput Passage A or Passage B:\"\"\"\n openai.api_key = api_key\n\n def _get_response(self, input_text):\n while True:\n try:\n response = openai.ChatCompletion.create(\n model=self.llm,\n messages=[\n {\"role\": \"system\", \"content\": self.system_prompt},\n {\"role\": \"user\", \"content\": input_text},\n ],\n temperature=0.0,\n request_timeout=15\n )\n self.total_completion_tokens += int(response['usage']['completion_tokens'])\n self.total_prompt_tokens += int(response['usage']['prompt_tokens'])\n\n output = response['choices'][0]['message']['content']\n matches = re.findall(r\"(Passage [A-B])\", output, re.MULTILINE)\n if matches:\n output = matches[0][8]\n elif output.strip() in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n output = \"A\"\n return output\n\n except openai.error.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"OpenAI API returned an API Error: {e}\")\n time.sleep(5)\n continue\n except openai.error.APIConnectionError as e:\n # Handle connection error here\n print(f\"Failed to connect to OpenAI API: {e}\")\n time.sleep(5)\n continue\n except openai.error.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"OpenAI API request exceeded rate limit: {e}\")\n time.sleep(5)\n continue\n except openai.error.InvalidRequestError as e:\n # Handle invalid request error\n print(f\"OpenAI API request was invalid: {e}\")\n raise e\n except openai.error.AuthenticationError as e:\n # Handle authentication error\n print(f\"OpenAI API request failed authentication: {e}\")\n raise e\n except openai.error.Timeout as e:\n # Handle timeout error\n print(f\"OpenAI API request timed out: {e}\")\n time.sleep(5)\n continue\n except openai.error.ServiceUnavailableError as e:\n # Handle service unavailable error\n print(f\"OpenAI API request failed with a service unavailable error: {e}\")\n time.sleep(5)\n continue\n except Exception as e:\n print(f\"Unknown error: {e}\")\n raise e\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n doc1, doc2 = docs[0], docs[1]\n input_texts = [self.prompt.format(query=query, doc1=doc1, doc2=doc2),\n self.prompt.format(query=query, doc1=doc2, doc2=doc1)]\n\n return [f'Passage {self._get_response(input_texts[0])}', f'Passage {self._get_response(input_texts[1])}']\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "OpenAiListwiseLlmRanker", "path": "rankers/listwise.py", "snippet": "class OpenAiListwiseLlmRanker(LlmRanker):\n def __init__(self, model_name_or_path, api_key, window_size, step_size, num_repeat):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.window_size = window_size\n self.step_size = step_size\n self.num_repeat = num_repeat\n openai.api_key = api_key\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n messages = create_permutation_instruction_chat(query, docs, self.llm)\n while True:\n try:\n completion = openai.ChatCompletion.create(\n model=self.llm,\n messages=messages,\n temperature=0.0,\n request_timeout=15)\n self.total_completion_tokens += int(completion['usage']['completion_tokens'])\n self.total_prompt_tokens += int(completion['usage']['prompt_tokens'])\n return completion['choices'][0]['message']['content']\n except Exception as e:\n print(str(e))\n if \"This model's maximum context length is\" in str(e):\n print('reduce_length')\n return 'ERROR::reduce_length'\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n\n for _ in range(self.num_repeat):\n ranking = copy.deepcopy(ranking)\n end_pos = len(ranking)\n start_pos = end_pos - self.window_size\n while start_pos >= 0:\n start_pos = max(start_pos, 0)\n result = self.compare(query, ranking[start_pos: end_pos])\n ranking = receive_permutation(ranking, result, start_pos, end_pos)\n end_pos = end_pos - self.step_size\n start_pos = start_pos - self.step_size\n\n for i, doc in enumerate(ranking):\n doc.score = -i\n return ranking\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "ListwiseLlmRanker", "path": "rankers/listwise.py", "snippet": "class ListwiseLlmRanker(OpenAiListwiseLlmRanker):\n CHARACTERS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\",\n \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\",\n \"W\"] # \"Passage X\" and \"Passage Y\" will be tokenized into 3 tokens, so we dont use for now\n\n def __init__(self, model_name_or_path, tokenizer_name_or_path, device, window_size, step_size,\n scoring='generation', num_repeat=1, cache_dir=None):\n\n self.scoring = scoring\n self.device = device\n self.window_size = window_size\n self.step_size = step_size\n self.num_repeat = num_repeat\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path, cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) if self.tokenizer else None\n self.target_token_ids = self.tokenizer.batch_encode_plus([f'<pad> Passage {self.CHARACTERS[i]}'\n for i in range(len(self.CHARACTERS))],\n return_tensors=\"pt\",\n add_special_tokens=False,\n padding=True).input_ids[:, -1]\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n else:\n raise NotImplementedError\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n if self.scoring == 'generation':\n if self.config.model_type == 't5':\n input_text = create_permutation_instruction_complete(query, docs)\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\", truncation=True).input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids)[0]\n self.total_completion_tokens += output_ids.shape[0]\n output = self.tokenizer.decode(output_ids,\n skip_special_tokens=True).strip()\n elif self.config.model_type == 'llama':\n input_text = create_permutation_instruction_chat(query, docs, model_name=None)\n input_ids = self.tokenizer.apply_chat_template(input_text, return_tensors=\"pt\",\n add_generation_prompt=True).to(self.device)\n\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids)[0]\n self.total_completion_tokens += output_ids.shape[0]\n output = self.tokenizer.decode(output_ids[input_ids.shape[1]:],\n skip_special_tokens=True).strip()\n\n elif self.scoring == 'likelihood':\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:'\n\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n with torch.no_grad():\n logits = self.llm(input_ids=input_ids, decoder_input_ids=self.decoder_input_ids).logits[0][-1]\n distributions = torch.softmax(logits, dim=0)\n scores = distributions[self.target_token_ids[:len(docs)]]\n ranked = sorted(zip([f\"[{str(i+1)}]\" for i in range(len(docs))], scores), key=lambda x: x[1], reverse=True)\n output = '>'.join(ranked[i][0] for i in range(len(ranked)))\n\n return output\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" } ]
import logging import ir_datasets import argparse import sys import json import time import random from pyserini.search.lucene import LuceneSearcher from pyserini.search._base import get_topics from rankers.rankers import SearchResult from rankers.pointwise import PointwiseLlmRanker, MonoT5LlmRanker from rankers.setwise import SetwiseLlmRanker, OpenAiSetwiseLlmRanker from rankers.pairwise import PairwiseLlmRanker, DuoT5LlmRanker, OpenAiPairwiseLlmRanker from rankers.listwise import OpenAiListwiseLlmRanker, ListwiseLlmRanker from tqdm import tqdm
13,913
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key:
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key:
ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path,
8
2023-10-14 01:39:38+00:00
16k
amazon-science/tabsyn
baselines/tabddpm/train.py
[ { "identifier": "make_dataset", "path": "utils_train.py", "snippet": "def make_dataset(\n data_path: str,\n T: src.Transformations,\n task_type,\n change_val: bool,\n concat = True,\n):\n\n # classification\n if task_type == 'binclass' or task_type == 'multiclass':\n X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) else None\n X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) else None\n y = {} if os.path.exists(os.path.join(data_path, 'y_train.npy')) else None\n\n for split in ['train', 'test']:\n X_num_t, X_cat_t, y_t = src.read_pure_data(data_path, split)\n if X_num is not None:\n X_num[split] = X_num_t\n if X_cat is not None:\n if concat:\n X_cat_t = concat_y_to_X(X_cat_t, y_t)\n X_cat[split] = X_cat_t \n if y is not None:\n y[split] = y_t\n else:\n # regression\n X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) else None\n X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) else None\n y = {} if os.path.exists(os.path.join(data_path, 'y_train.npy')) else None\n\n for split in ['train', 'test']:\n X_num_t, X_cat_t, y_t = src.read_pure_data(data_path, split)\n\n if X_num is not None:\n if concat:\n X_num_t = concat_y_to_X(X_num_t, y_t)\n X_num[split] = X_num_t\n if X_cat is not None:\n X_cat[split] = X_cat_t\n if y is not None:\n y[split] = y_t\n\n info = src.load_json(os.path.join(data_path, 'info.json'))\n\n D = src.Dataset(\n X_num,\n X_cat,\n y,\n y_info={},\n task_type=src.TaskType(info['task_type']),\n n_classes=info.get('n_classes')\n )\n\n if change_val:\n D = src.change_val(D)\n\n # def categorical_to_idx(feature):\n # unique_categories = np.unique(feature)\n # idx_mapping = {category: index for index, category in enumerate(unique_categories)}\n # idx_feature = np.array([idx_mapping[category] for category in feature])\n # return idx_feature\n\n # for split in ['train', 'val', 'test']:\n # D.y[split] = categorical_to_idx(D.y[split].squeeze(1))\n\n return src.transform_dataset(D, T, None)" }, { "identifier": "update_ema", "path": "utils_train.py", "snippet": "def update_ema(target_params, source_params, rate=0.999):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for target, source in zip(target_params, source_params):\n target.detach().mul_(rate).add_(source.detach(), alpha=1 - rate)" }, { "identifier": "MLPDiffusion", "path": "baselines/tabddpm/models/modules.py", "snippet": "class MLPDiffusion(nn.Module):\n def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 1024):\n super().__init__()\n self.dim_t = dim_t\n self.num_classes = num_classes\n self.is_y_cond = is_y_cond\n\n # d0 = rtdl_params['d_layers'][0]\n\n rtdl_params['d_in'] = dim_t\n rtdl_params['d_out'] = d_in\n\n self.mlp = MLP.make_baseline(**rtdl_params)\n\n if self.num_classes > 0 and is_y_cond:\n self.label_emb = nn.Embedding(self.num_classes, dim_t)\n elif self.num_classes == 0 and is_y_cond:\n self.label_emb = nn.Linear(1, dim_t)\n \n self.proj = nn.Linear(d_in, dim_t)\n self.time_embed = nn.Sequential(\n nn.Linear(dim_t, dim_t),\n nn.SiLU(),\n nn.Linear(dim_t, dim_t)\n )\n \n def forward(self, x, timesteps, y=None):\n emb = self.time_embed(timestep_embedding(timesteps, self.dim_t))\n if self.is_y_cond and y is not None:\n if self.num_classes > 0:\n y = y.squeeze()\n else:\n y = y.resize(y.size(0), 1).float()\n emb += F.silu(self.label_emb(y))\n x = self.proj(x) + emb\n\n return self.mlp(x)" }, { "identifier": "GaussianMultinomialDiffusion", "path": "baselines/tabddpm/models/gaussian_multinomial_distribution.py", "snippet": "class GaussianMultinomialDiffusion(torch.nn.Module):\n def __init__(\n self,\n num_classes: np.array,\n num_numerical_features: int,\n denoise_fn,\n num_timesteps=1000,\n gaussian_loss_type='mse',\n gaussian_parametrization='eps',\n multinomial_loss_type='vb_stochastic',\n parametrization='x0',\n scheduler='cosine',\n device=torch.device('cpu')\n ):\n\n super(GaussianMultinomialDiffusion, self).__init__()\n assert multinomial_loss_type in ('vb_stochastic', 'vb_all')\n assert parametrization in ('x0', 'direct')\n\n if multinomial_loss_type == 'vb_all':\n print('Computing the loss using the bound on _all_ timesteps.'\n ' This is expensive both in terms of memory and computation.')\n\n self.num_numerical_features = num_numerical_features\n self.num_classes = num_classes # it as a vector [K1, K2, ..., Km]\n self.num_classes_expanded = torch.from_numpy(\n np.concatenate([num_classes[i].repeat(num_classes[i]) for i in range(len(num_classes))])\n ).to(device)\n\n self.slices_for_classes = [np.arange(self.num_classes[0])]\n offsets = np.cumsum(self.num_classes)\n for i in range(1, len(offsets)):\n self.slices_for_classes.append(np.arange(offsets[i - 1], offsets[i]))\n self.offsets = torch.from_numpy(np.append([0], offsets)).to(device)\n\n self._denoise_fn = denoise_fn\n self.gaussian_loss_type = gaussian_loss_type\n self.gaussian_parametrization = gaussian_parametrization\n self.multinomial_loss_type = multinomial_loss_type\n self.num_timesteps = num_timesteps\n self.parametrization = parametrization\n self.scheduler = scheduler\n\n alphas = 1. - get_named_beta_schedule(scheduler, num_timesteps)\n alphas = torch.tensor(alphas.astype('float64')) # alpha2_t\n betas = 1. - alphas # beta2_t\n\n log_alpha = np.log(alphas)\n log_cumprod_alpha = np.cumsum(log_alpha)\n\n log_1_min_alpha = log_1_min_a(log_alpha)\n log_1_min_cumprod_alpha = log_1_min_a(log_cumprod_alpha)\n\n alphas_cumprod = np.cumprod(alphas, axis=0) # tilde_alpha2_t\n alphas_cumprod_prev = torch.tensor(np.append(1.0, alphas_cumprod[:-1])) # tilde_alpha2_{t-1}\n alphas_cumprod_next = torch.tensor(np.append(alphas_cumprod[1:], 0.0)) # tilde_alpha2_{t+1}\n sqrt_alphas_cumprod = np.sqrt(alphas_cumprod) # tilde_alpha_t\n sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - alphas_cumprod) # tilde_beta_t\n sqrt_recip_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod) # sqrt(1 / tilde_alpha_t)\n sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod - 1) # sqrt(tilde_beta_t / tilde_alpha_t )\n\n # Gaussian diffusion\n\n self.posterior_variance = (\n betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)\n )\n self.posterior_log_variance_clipped = torch.from_numpy(\n np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:]))\n ).float().to(device)\n self.posterior_mean_coef1 = (\n betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)\n ).float().to(device)\n self.posterior_mean_coef2 = (\n (1.0 - alphas_cumprod_prev)\n * np.sqrt(alphas.numpy())\n / (1.0 - alphas_cumprod)\n ).float().to(device)\n\n assert log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item() < 1.e-5\n assert log_add_exp(log_cumprod_alpha, log_1_min_cumprod_alpha).abs().sum().item() < 1e-5\n assert (np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item() < 1.e-5\n\n # Convert to float32 and register buffers.\n self.register_buffer('alphas', alphas.float().to(device))\n self.register_buffer('log_alpha', log_alpha.float().to(device))\n self.register_buffer('log_1_min_alpha', log_1_min_alpha.float().to(device))\n self.register_buffer('log_1_min_cumprod_alpha', log_1_min_cumprod_alpha.float().to(device))\n self.register_buffer('log_cumprod_alpha', log_cumprod_alpha.float().to(device))\n self.register_buffer('alphas_cumprod', alphas_cumprod.float().to(device))\n self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev.float().to(device))\n self.register_buffer('alphas_cumprod_next', alphas_cumprod_next.float().to(device))\n self.register_buffer('sqrt_alphas_cumprod', sqrt_alphas_cumprod.float().to(device))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', sqrt_one_minus_alphas_cumprod.float().to(device))\n self.register_buffer('sqrt_recip_alphas_cumprod', sqrt_recip_alphas_cumprod.float().to(device))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', sqrt_recipm1_alphas_cumprod.float().to(device))\n\n self.register_buffer('Lt_history', torch.zeros(num_timesteps))\n self.register_buffer('Lt_count', torch.zeros(num_timesteps))\n \n # Gaussian part\n def gaussian_q_mean_variance(self, x_start, t):\n mean = (\n extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n )\n variance = extract(1.0 - self.alphas_cumprod, t, x_start.shape)\n log_variance = extract(\n self.log_1_min_cumprod_alpha, t, x_start.shape\n )\n return mean, variance, log_variance\n \n def gaussian_q_sample(self, x_start, t, noise=None):\n if noise is None:\n noise = torch.randn_like(x_start)\n assert noise.shape == x_start.shape\n return (\n extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def gaussian_q_posterior_mean_variance(self, x_start, x_t, t):\n assert x_start.shape == x_t.shape\n posterior_mean = (\n extract(self.posterior_mean_coef1, t, x_t.shape) * x_start\n + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_variance = extract(self.posterior_variance, t, x_t.shape)\n posterior_log_variance_clipped = extract(\n self.posterior_log_variance_clipped, t, x_t.shape\n )\n assert (\n posterior_mean.shape[0]\n == posterior_variance.shape[0]\n == posterior_log_variance_clipped.shape[0]\n == x_start.shape[0]\n )\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def gaussian_p_mean_variance(\n self, model_output, x, t, clip_denoised=False, denoised_fn=None, model_kwargs=None\n ):\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n\n model_variance = torch.cat([self.posterior_variance[1].unsqueeze(0).to(x.device), (1. - self.alphas)[1:]], dim=0)\n # model_variance = self.posterior_variance.to(x.device)\n model_log_variance = torch.log(model_variance)\n\n model_variance = extract(model_variance, t, x.shape)\n model_log_variance = extract(model_log_variance, t, x.shape)\n\n\n if self.gaussian_parametrization == 'eps':\n pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n elif self.gaussian_parametrization == 'x0':\n pred_xstart = model_output\n else:\n raise NotImplementedError\n \n model_mean, _, _ = self.gaussian_q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n ), f'{model_mean.shape}, {model_log_variance.shape}, {pred_xstart.shape}, {x.shape}'\n\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def _vb_terms_bpd(\n self, model_output, x_start, x_t, t, clip_denoised=False, model_kwargs=None\n ):\n true_mean, _, true_log_variance_clipped = self.gaussian_q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )\n out = self.gaussian_p_mean_variance(\n model_output, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs\n )\n kl = normal_kl(\n true_mean, true_log_variance_clipped, out[\"mean\"], out[\"log_variance\"]\n )\n kl = mean_flat(kl) / np.log(2.0)\n\n decoder_nll = -discretized_gaussian_log_likelihood(\n x_start, means=out[\"mean\"], log_scales=0.5 * out[\"log_variance\"]\n )\n assert decoder_nll.shape == x_start.shape\n decoder_nll = mean_flat(decoder_nll) / np.log(2.0)\n\n # At the first timestep return the decoder NLL,\n # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))\n output = torch.where((t == 0), decoder_nll, kl)\n return {\"output\": output, \"pred_xstart\": out[\"pred_xstart\"], \"out_mean\": out[\"mean\"], \"true_mean\": true_mean}\n \n def _prior_gaussian(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n\n This term can't be optimized, as it only depends on the encoder.\n\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.gaussian_q_mean_variance(x_start, t)\n kl_prior = normal_kl(\n mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0\n )\n return mean_flat(kl_prior) / np.log(2.0)\n \n def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None):\n if model_kwargs is None:\n model_kwargs = {}\n\n terms = {}\n if self.gaussian_loss_type == 'mse':\n terms[\"loss\"] = mean_flat((noise - model_out) ** 2)\n elif self.gaussian_loss_type == 'kl':\n terms[\"loss\"] = self._vb_terms_bpd(\n model_output=model_out,\n x_start=x_start,\n x_t=x_t,\n t=t,\n clip_denoised=False,\n model_kwargs=model_kwargs,\n )[\"output\"]\n\n\n return terms['loss']\n \n def _predict_xstart_from_eps(self, x_t, t, eps):\n assert x_t.shape == eps.shape\n return (\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps\n )\n \n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - pred_xstart\n ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def gaussian_p_sample(\n self,\n model_out,\n x,\n t,\n clip_denoised=False,\n denoised_fn=None,\n model_kwargs=None,\n ):\n out = self.gaussian_p_mean_variance(\n model_out,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n noise = torch.randn_like(x)\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n\n sample = out[\"mean\"] + nonzero_mask * torch.exp(0.5 * out[\"log_variance\"]) * noise\n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n\n # Multinomial part\n\n def multinomial_kl(self, log_prob1, log_prob2):\n\n kl = (log_prob1.exp() * (log_prob1 - log_prob2)).sum(dim=1)\n\n return kl\n\n def q_pred_one_timestep(self, log_x_t, t):\n log_alpha_t = extract(self.log_alpha, t, log_x_t.shape)\n log_1_min_alpha_t = extract(self.log_1_min_alpha, t, log_x_t.shape)\n\n # alpha_t * E[xt] + (1 - alpha_t) 1 / K\n log_probs = log_add_exp(\n log_x_t + log_alpha_t,\n log_1_min_alpha_t - torch.log(self.num_classes_expanded)\n )\n\n return log_probs\n\n def q_pred(self, log_x_start, t):\n log_cumprod_alpha_t = extract(self.log_cumprod_alpha, t, log_x_start.shape)\n log_1_min_cumprod_alpha = extract(self.log_1_min_cumprod_alpha, t, log_x_start.shape)\n\n log_probs = log_add_exp(\n log_x_start + log_cumprod_alpha_t,\n log_1_min_cumprod_alpha - torch.log(self.num_classes_expanded)\n )\n\n return log_probs\n\n def predict_start(self, model_out, log_x_t, t):\n\n\n assert model_out.size(0) == log_x_t.size(0)\n assert model_out.size(1) == self.num_classes.sum(), f'{model_out.size()}'\n\n log_pred = torch.empty_like(model_out)\n for ix in self.slices_for_classes:\n log_pred[:, ix] = F.log_softmax(model_out[:, ix], dim=1)\n return log_pred\n\n def q_posterior(self, log_x_start, log_x_t, t):\n # q(xt-1 | xt, x0) = q(xt | xt-1, x0) * q(xt-1 | x0) / q(xt | x0)\n # where q(xt | xt-1, x0) = q(xt | xt-1).\n\n # EV_log_qxt_x0 = self.q_pred(log_x_start, t)\n\n # print('sum exp', EV_log_qxt_x0.exp().sum(1).mean())\n # assert False\n\n # log_qxt_x0 = (log_x_t.exp() * EV_log_qxt_x0).sum(dim=1)\n t_minus_1 = t - 1\n # Remove negative values, will not be used anyway for final decoder\n t_minus_1 = torch.where(t_minus_1 < 0, torch.zeros_like(t_minus_1), t_minus_1)\n log_EV_qxtmin_x0 = self.q_pred(log_x_start, t_minus_1)\n\n num_axes = (1,) * (len(log_x_start.size()) - 1)\n t_broadcast = t.to(log_x_start.device).view(-1, *num_axes) * torch.ones_like(log_x_start)\n log_EV_qxtmin_x0 = torch.where(t_broadcast == 0, log_x_start, log_EV_qxtmin_x0.to(torch.float32))\n\n # unnormed_logprobs = log_EV_qxtmin_x0 +\n # log q_pred_one_timestep(x_t, t)\n # Note: _NOT_ x_tmin1, which is how the formula is typically used!!!\n # Not very easy to see why this is true. But it is :)\n unnormed_logprobs = log_EV_qxtmin_x0 + self.q_pred_one_timestep(log_x_t, t)\n\n sliced = sliced_logsumexp(unnormed_logprobs, self.offsets)\n log_EV_xtmin_given_xt_given_xstart = unnormed_logprobs - sliced\n\n return log_EV_xtmin_given_xt_given_xstart\n\n def p_pred(self, model_out, log_x, t):\n if self.parametrization == 'x0':\n log_x_recon = self.predict_start(model_out, log_x, t=t)\n log_model_pred = self.q_posterior(\n log_x_start=log_x_recon, log_x_t=log_x, t=t)\n elif self.parametrization == 'direct':\n log_model_pred = self.predict_start(model_out, log_x, t=t)\n else:\n raise ValueError\n\n\n return log_model_pred\n\n @torch.no_grad()\n def p_sample(self, model_out, log_x, t):\n model_log_prob = self.p_pred(model_out, log_x=log_x, t=t)\n out = self.log_sample_categorical(model_log_prob)\n return out\n\n @torch.no_grad()\n def p_sample_loop(self, shape):\n device = self.log_alpha.device\n\n b = shape[0]\n # start with random normal image.\n img = torch.randn(shape, device=device)\n\n for i in reversed(range(1, self.num_timesteps)):\n img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))\n return img\n\n @torch.no_grad()\n def _sample(self, image_size, batch_size = 16):\n return self.p_sample_loop((batch_size, 3, image_size, image_size))\n\n @torch.no_grad()\n def interpolate(self, x1, x2, t = None, lam = 0.5):\n b, *_, device = *x1.shape, x1.device\n t = default(t, self.num_timesteps - 1)\n\n assert x1.shape == x2.shape\n\n t_batched = torch.stack([torch.tensor(t, device=device)] * b)\n xt1, xt2 = map(lambda x: self.q_sample(x, t=t_batched), (x1, x2))\n\n img = (1 - lam) * xt1 + lam * xt2\n for i in reversed(range(0, t)):\n img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))\n\n return img\n\n def log_sample_categorical(self, logits):\n full_sample = []\n for i in range(len(self.num_classes)):\n one_class_logits = logits[:, self.slices_for_classes[i]]\n uniform = torch.rand_like(one_class_logits)\n gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30)\n sample = (gumbel_noise + one_class_logits).argmax(dim=1)\n full_sample.append(sample.unsqueeze(1))\n full_sample = torch.cat(full_sample, dim=1)\n log_sample = index_to_log_onehot(full_sample, self.num_classes)\n return log_sample\n\n def q_sample(self, log_x_start, t):\n log_EV_qxt_x0 = self.q_pred(log_x_start, t)\n\n log_sample = self.log_sample_categorical(log_EV_qxt_x0)\n\n return log_sample\n\n def nll(self, log_x_start):\n b = log_x_start.size(0)\n device = log_x_start.device\n loss = 0\n for t in range(0, self.num_timesteps):\n t_array = (torch.ones(b, device=device) * t).long()\n\n kl = self.compute_Lt(\n log_x_start=log_x_start,\n log_x_t=self.q_sample(log_x_start=log_x_start, t=t_array),\n t=t_array)\n\n loss += kl\n\n loss += self.kl_prior(log_x_start)\n\n return loss\n\n def kl_prior(self, log_x_start):\n b = log_x_start.size(0)\n device = log_x_start.device\n ones = torch.ones(b, device=device).long()\n\n log_qxT_prob = self.q_pred(log_x_start, t=(self.num_timesteps - 1) * ones)\n log_half_prob = -torch.log(self.num_classes_expanded * torch.ones_like(log_qxT_prob))\n\n kl_prior = self.multinomial_kl(log_qxT_prob, log_half_prob)\n\n return sum_except_batch(kl_prior)\n\n def compute_Lt(self, model_out, log_x_start, log_x_t, t, detach_mean=False):\n log_true_prob = self.q_posterior(\n log_x_start=log_x_start, log_x_t=log_x_t, t=t)\n log_model_prob = self.p_pred(model_out, log_x=log_x_t, t=t)\n\n if detach_mean:\n log_model_prob = log_model_prob.detach()\n\n kl = self.multinomial_kl(log_true_prob, log_model_prob)\n\n # if torch.isinf(kl).nonzero().shape[0] != 0:\n # idx = torch.isinf(kl).nonzero()[0]\n # print('KL 0 :', kl[idx])\n\n kl = sum_except_batch(kl)\n\n decoder_nll = -log_categorical(log_x_start, log_model_prob)\n decoder_nll = sum_except_batch(decoder_nll)\n\n mask = (t == torch.zeros_like(t)).float()\n loss = mask * decoder_nll + (1. - mask) * kl \n\n return loss\n\n def sample_time(self, b, device, method='uniform'):\n if method == 'importance':\n if not (self.Lt_count > 10).all():\n return self.sample_time(b, device, method='uniform')\n\n Lt_sqrt = torch.sqrt(self.Lt_history + 1e-10) + 0.0001\n Lt_sqrt[0] = Lt_sqrt[1] # Overwrite decoder term with L1.\n pt_all = (Lt_sqrt / Lt_sqrt.sum()).to(device)\n\n t = torch.multinomial(pt_all, num_samples=b, replacement=True).to(device)\n\n pt = pt_all.gather(dim=0, index=t)\n\n return t, pt\n\n elif method == 'uniform':\n t = torch.randint(0, self.num_timesteps, (b,), device=device).long()\n\n pt = torch.ones_like(t).float() / self.num_timesteps\n return t, pt\n else:\n raise ValueError\n\n def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt):\n\n if self.multinomial_loss_type == 'vb_stochastic':\n\n kl = self.compute_Lt(\n model_out, log_x_start, log_x_t, t\n )\n kl_prior = self.kl_prior(log_x_start)\n # Upweigh loss term of the kl\n\n vb_loss = kl / pt + kl_prior\n\n\n return vb_loss\n\n elif self.multinomial_loss_type == 'vb_all':\n # Expensive, dont do it ;).\n # DEPRECATED\n return -self.nll(log_x_start)\n else:\n raise ValueError()\n\n def log_prob(self, x):\n b, device = x.size(0), x.device\n if self.training:\n return self._multinomial_loss(x)\n\n else:\n log_x_start = index_to_log_onehot(x, self.num_classes)\n\n t, pt = self.sample_time(b, device, 'importance')\n\n kl = self.compute_Lt(\n log_x_start, self.q_sample(log_x_start=log_x_start, t=t), t)\n\n kl_prior = self.kl_prior(log_x_start)\n\n # Upweigh loss term of the kl\n loss = kl / (pt + 1e-6) + kl_prior\n\n return -loss\n \n @torch.no_grad()\n def loss_at_step_t(self, x, step):\n\n b = x.shape[0]\n device = x.device\n\n t = (torch.ones((b,)) * step).long().to(device)\n pt = torch.ones_like(t).float() / self.num_timesteps\n\n x_num = x[:, :self.num_numerical_features]\n x_cat = x[:, self.num_numerical_features:]\n \n x_num_t = x_num\n log_x_cat_t = x_cat\n if x_num.shape[1] > 0:\n noise = torch.randn_like(x_num)\n x_num_t = self.gaussian_q_sample(x_num, t, noise=noise)\n if x_cat.shape[1] > 0:\n log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes)\n log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t)\n \n x_in = torch.cat([x_num_t, log_x_cat_t], dim=1)\n\n model_out = self._denoise_fn(\n x_in,\n t\n )\n\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n\n loss_multi = torch.zeros((1,)).float()\n loss_gauss = torch.zeros((1,)).float()\n if x_cat.shape[1] > 0:\n loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt) / len(self.num_classes)\n \n if x_num.shape[1] > 0:\n loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise)\n\n recon_x0_num = self.recon_x0(x_in, model_out, t)[:,:self.num_numerical_features]\n\n recon_loss = self._gaussian_loss(recon_x0_num, x_num, x_num_t, t, x_num)\n\n return loss_multi.mean(), loss_gauss.mean(), recon_loss.mean()\n \n @torch.no_grad()\n def recon_x0(self, x, model_out, t):\n # x_num = x[:, :self.num_numerical_features]\n\n x0 = extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * (x - model_out * extract(self.sqrt_one_minus_alphas_cumprod, t, x.shape))\n \n return x0\n\n def mixed_loss(self, x):\n b = x.shape[0]\n device = x.device\n t, pt = self.sample_time(b, device, 'uniform')\n\n x_num = x[:, :self.num_numerical_features]\n x_cat = x[:, self.num_numerical_features:]\n \n x_num_t = x_num\n log_x_cat_t = x_cat\n if x_num.shape[1] > 0:\n noise = torch.randn_like(x_num)\n x_num_t = self.gaussian_q_sample(x_num, t, noise=noise)\n if x_cat.shape[1] > 0:\n log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes)\n log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t)\n \n x_in = torch.cat([x_num_t, log_x_cat_t], dim=1)\n\n model_out = self._denoise_fn(\n x_in,\n t\n )\n\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n\n loss_multi = torch.zeros((1,)).float()\n loss_gauss = torch.zeros((1,)).float()\n\n if x_cat.shape[1] > 0:\n loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt) / len(self.num_classes)\n \n if x_num.shape[1] > 0:\n loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise)\n\n\n return loss_multi.mean(), loss_gauss.mean()\n \n @torch.no_grad()\n def mixed_elbo(self, x0):\n b = x0.size(0)\n device = x0.device\n\n x_num = x0[:, :self.num_numerical_features]\n x_cat = x0[:, self.num_numerical_features:]\n has_cat = x_cat.shape[1] > 0\n if has_cat:\n log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes).to(device)\n\n gaussian_loss = []\n xstart_mse = []\n mse = []\n mu_mse = []\n out_mean = []\n true_mean = []\n multinomial_loss = []\n for t in range(self.num_timesteps):\n t_array = (torch.ones(b, device=device) * t).long()\n noise = torch.randn_like(x_num)\n\n x_num_t = self.gaussian_q_sample(x_start=x_num, t=t_array, noise=noise)\n if has_cat:\n log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t_array)\n else:\n log_x_cat_t = x_cat\n\n model_out = self._denoise_fn(\n torch.cat([x_num_t, log_x_cat_t], dim=1),\n t_array\n )\n \n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n\n kl = torch.tensor([0.0])\n if has_cat:\n kl = self.compute_Lt(\n model_out=model_out_cat,\n log_x_start=log_x_cat,\n log_x_t=log_x_cat_t,\n t=t_array\n )\n\n out = self._vb_terms_bpd(\n model_out_num,\n x_start=x_num,\n x_t=x_num_t,\n t=t_array,\n clip_denoised=False\n )\n\n multinomial_loss.append(kl)\n gaussian_loss.append(out[\"output\"])\n xstart_mse.append(mean_flat((out[\"pred_xstart\"] - x_num) ** 2))\n # mu_mse.append(mean_flat(out[\"mean_mse\"]))\n out_mean.append(mean_flat(out[\"out_mean\"]))\n true_mean.append(mean_flat(out[\"true_mean\"]))\n\n eps = self._predict_eps_from_xstart(x_num_t, t_array, out[\"pred_xstart\"])\n mse.append(mean_flat((eps - noise) ** 2))\n\n gaussian_loss = torch.stack(gaussian_loss, dim=1)\n multinomial_loss = torch.stack(multinomial_loss, dim=1)\n xstart_mse = torch.stack(xstart_mse, dim=1)\n mse = torch.stack(mse, dim=1)\n # mu_mse = torch.stack(mu_mse, dim=1)\n out_mean = torch.stack(out_mean, dim=1)\n true_mean = torch.stack(true_mean, dim=1)\n\n\n\n prior_gauss = self._prior_gaussian(x_num)\n\n prior_multin = torch.tensor([0.0])\n if has_cat:\n prior_multin = self.kl_prior(log_x_cat)\n\n total_gauss = gaussian_loss.sum(dim=1) + prior_gauss\n total_multin = multinomial_loss.sum(dim=1) + prior_multin\n return {\n \"total_gaussian\": total_gauss,\n \"total_multinomial\": total_multin,\n \"losses_gaussian\": gaussian_loss,\n \"losses_multinimial\": multinomial_loss,\n \"xstart_mse\": xstart_mse,\n \"mse\": mse,\n # \"mu_mse\": mu_mse\n \"out_mean\": out_mean,\n \"true_mean\": true_mean\n }\n\n @torch.no_grad()\n def gaussian_ddim_step(\n self,\n model_out_num,\n x,\n t,\n t_prev,\n clip_denoised=False,\n denoised_fn=None,\n eta=1.0\n ):\n out = self.gaussian_p_mean_variance(\n model_out_num,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=None,\n )\n\n eps = self._predict_eps_from_xstart(x, t, out[\"pred_xstart\"])\n\n alpha_bar = extract(self.alphas_cumprod, t, x.shape)\n \n if t[0] != 0:\n alpha_bar_prev = extract(self.alphas_cumprod, t_prev, x.shape)\n else:\n alpha_bar_prev = extract(self.alphas_cumprod_prev, t_prev, x.shape)\n \n sigma = (\n eta\n * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))\n * torch.sqrt(1 - alpha_bar / alpha_bar_prev)\n )\n\n noise = torch.randn_like(x)\n mean_pred = (\n out[\"pred_xstart\"] * torch.sqrt(alpha_bar_prev)\n + torch.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps\n )\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n sample = mean_pred + nonzero_mask * sigma * noise\n\n return sample\n\n \n @torch.no_grad()\n def gaussian_ddim_sample(\n self,\n noise,\n T,\n eta=0.0\n ):\n x = noise\n b = x.shape[0]\n device = x.device\n for t in reversed(range(T)):\n print(f'Sample timestep {t:4d}', end='\\r')\n t_array = (torch.ones(b, device=device) * t).long()\n out_num = self._denoise_fn(x, t_array)\n x = self.gaussian_ddim_step(\n out_num,\n x,\n t_array\n )\n print()\n return x\n\n\n @torch.no_grad()\n def gaussian_ddim_reverse_step(\n self,\n model_out_num,\n x,\n t,\n clip_denoised=False,\n eta=0.0\n ):\n assert eta == 0.0, \"Eta must be zero.\"\n out = self.gaussian_p_mean_variance(\n model_out_num,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=None,\n model_kwargs=None,\n )\n\n eps = (\n extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * x\n - out[\"pred_xstart\"]\n ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x.shape)\n alpha_bar_next = extract(self.alphas_cumprod_next, t, x.shape)\n\n mean_pred = (\n out[\"pred_xstart\"] * torch.sqrt(alpha_bar_next)\n + torch.sqrt(1 - alpha_bar_next) * eps\n )\n\n return mean_pred\n\n @torch.no_grad()\n def gaussian_ddim_reverse_sample(\n self,\n x,\n T\n ):\n b = x.shape[0]\n device = x.device\n for t in range(T):\n print(f'Reverse timestep {t:4d}', end='\\r')\n t_array = (torch.ones(b, device=device) * t).long()\n out_num = self._denoise_fn(x, t_array)\n x = self.gaussian_ddim_reverse_step(\n out_num,\n x,\n t_array,\n eta=0.0\n )\n print()\n\n return x\n\n\n @torch.no_grad()\n def multinomial_ddim_step(\n self,\n model_out_cat,\n log_x_t,\n t,\n t_prev,\n eta=1.0\n ):\n # not ddim, essentially\n log_x0 = self.predict_start(model_out_cat, log_x_t=log_x_t, t=t)\n\n alpha_bar = extract(self.alphas_cumprod, t, log_x_t.shape)\n\n if t[0] != 0:\n alpha_bar_prev = extract(self.alphas_cumprod, t_prev, log_x_t.shape)\n else:\n alpha_bar_prev = extract(self.alphas_cumprod_prev, t_prev, log_x_t.shape)\n \n sigma = (\n eta\n * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))\n * torch.sqrt(1 - alpha_bar / alpha_bar_prev)\n )\n\n coef1 = sigma\n coef2 = alpha_bar_prev - sigma * alpha_bar\n coef3 = 1 - coef1 - coef2\n\n\n log_ps = torch.stack([\n torch.log(coef1) + log_x_t,\n torch.log(coef2) + log_x0,\n torch.log(coef3) - torch.log(self.num_classes_expanded)\n ], dim=2) \n\n log_prob = torch.logsumexp(log_ps, dim=2)\n\n out = self.log_sample_categorical(log_prob)\n\n return out\n\n @torch.no_grad()\n def sample_ddim(self, num_samples, steps = 1000):\n b = num_samples\n device = self.log_alpha.device\n z_norm = torch.randn((b, self.num_numerical_features), device=device)\n\n has_cat = self.num_classes[0] != 0\n log_z = torch.zeros((b, 0), device=device).float()\n if has_cat:\n uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device)\n log_z = self.log_sample_categorical(uniform_logits)\n \n interval = 1000 // steps\n timesteps = list(np.arange(999, -1, -interval))\n\n if timesteps[-1] != 0:\n timesteps.append(0)\n \n for i in range(0, len(timesteps)):\n\n print(f'Sample timestep {i:4d}', end='\\r')\n \n t = torch.full((b,), timesteps[i], device=device, dtype=torch.long)\n \n \n if i != len(timesteps) -1 :\n t_prev = torch.full((b,), timesteps[i+1], device=device, dtype=torch.long)\n else:\n t_prev = torch.full((b,), 0, device=device, dtype=torch.long)\n \n model_out = self._denoise_fn(\n torch.cat([z_norm, log_z], dim=1).float(),\n t\n )\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n z_norm = self.gaussian_ddim_step(model_out_num, z_norm, t, t_prev, clip_denoised=False)\n if has_cat:\n log_z = self.multinomial_ddim_step(model_out_cat, log_z, t, t_prev)\n\n print()\n z_ohe = torch.exp(log_z).round()\n z_cat = log_z\n if has_cat:\n z_cat = ohe_to_categories(z_ohe, self.num_classes)\n sample = torch.cat([z_norm, z_cat], dim=1).cpu()\n return sample\n\n\n @torch.no_grad()\n def sample(self, num_samples):\n b = num_samples\n device = self.log_alpha.device\n z_norm = torch.randn((b, self.num_numerical_features), device=device)\n\n has_cat = self.num_classes[0] != 0\n log_z = torch.zeros((b, 0), device=device).float()\n if has_cat:\n uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device)\n print(uniform_logits.shape)\n log_z = self.log_sample_categorical(uniform_logits)\n\n for i in reversed(range(0, self.num_timesteps)):\n print(f'Sample timestep {i:4d}', end='\\r')\n t = torch.full((b,), i, device=device, dtype=torch.long)\n model_out = self._denoise_fn(\n torch.cat([z_norm, log_z], dim=1).float(),\n t\n )\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n z_norm = self.gaussian_p_sample(model_out_num, z_norm, t, clip_denoised=False)['sample']\n if has_cat:\n log_z = self.p_sample(model_out_cat, log_z, t)\n\n print()\n z_ohe = torch.exp(log_z).round()\n z_cat = log_z\n if has_cat:\n z_cat = ohe_to_categories(z_ohe, self.num_classes)\n sample = torch.cat([z_norm, z_cat], dim=1).cpu()\n return sample\n \n def sample_all(self, num_samples, batch_size, ddim=False, steps = 1000):\n if ddim:\n print('Sample using DDIM.')\n sample_fn = self.sample_ddim\n else:\n sample_fn = self.sample\n \n b = batch_size\n\n all_samples = []\n num_generated = 0\n while num_generated < num_samples:\n if not ddim:\n sample = sample_fn(b)\n else:\n sample = sample_fn(b, steps=steps)\n mask_nan = torch.any(sample.isnan(), dim=1)\n sample = sample[~mask_nan]\n\n all_samples.append(sample)\n \n if sample.shape[0] != b:\n raise FoundNANsError\n num_generated += sample.shape[0]\n\n x_gen = torch.cat(all_samples, dim=0)[:num_samples]\n\n return x_gen" } ]
import os import sys import time import torch import numpy as np import pandas as pd import src from copy import deepcopy from utils_train import make_dataset, update_ema from baselines.tabddpm.models.modules import MLPDiffusion from baselines.tabddpm.models.gaussian_multinomial_distribution import GaussianMultinomialDiffusion
12,643
self.ema_model = deepcopy(self.diffusion._denoise_fn) for param in self.ema_model.parameters(): param.detach_() self.train_iter = train_iter self.steps = steps self.init_lr = lr self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) self.device = device self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) self.model_save_path = model_save_path columns = list(np.arange(5)*200) columns[0] = 1 columns = ['step'] + columns self.log_every = 50 self.print_every = 1 self.ema_every = 1000 def _anneal_lr(self, step): frac_done = step / self.steps lr = self.init_lr * (1 - frac_done) for param_group in self.optimizer.param_groups: param_group["lr"] = lr def _run_step(self, x): x = x.to(self.device) self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x) loss = loss_multi + loss_gauss loss.backward() self.optimizer.step() return loss_multi, loss_gauss def run_loop(self): step = 0 curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 self.print_every = 1 self.log_every = 1 best_loss = np.inf print('Steps: ', self.steps) while step < self.steps: start_time = time.time() x = next(self.train_iter)[0] batch_loss_multi, batch_loss_gauss = self._run_step(x) self._anneal_lr(step) curr_count += len(x) curr_loss_multi += batch_loss_multi.item() * len(x) curr_loss_gauss += batch_loss_gauss.item() * len(x) if (step + 1) % self.log_every == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) if np.isnan(gloss): print('Finding Nan') break if (step + 1) % self.print_every == 0: print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] np.set_printoptions(suppress=True) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 if mloss + gloss < best_loss: best_loss = mloss + gloss torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, 'model.pt')) if (step + 1) % 10000 == 0: torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, f'model_{step+1}.pt')) # update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) step += 1 # end_time = time.time() # print('Time: ', end_time - start_time) def train( model_save_path, real_data_path, steps = 1000, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, task_type = 'binclass', model_type = 'mlp', model_params = None, num_timesteps = 1000, gaussian_loss_type = 'mse', scheduler = 'cosine', T_dict = None, num_numerical_features = 0, device = torch.device('cuda:0'), seed = 0, change_val = False ): real_data_path = os.path.normpath(real_data_path) # zero.improve_reproducibility(seed) T = src.Transformations(**T_dict)
def get_model( model_name, model_params, n_num_features, category_sizes ): print(model_name) if model_name == 'mlp': model = MLPDiffusion(**model_params) else: raise "Unknown model!" return model class Trainer: def __init__(self, diffusion, train_iter, lr, weight_decay, steps, model_save_path, device=torch.device('cuda:1')): self.diffusion = diffusion self.ema_model = deepcopy(self.diffusion._denoise_fn) for param in self.ema_model.parameters(): param.detach_() self.train_iter = train_iter self.steps = steps self.init_lr = lr self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) self.device = device self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) self.model_save_path = model_save_path columns = list(np.arange(5)*200) columns[0] = 1 columns = ['step'] + columns self.log_every = 50 self.print_every = 1 self.ema_every = 1000 def _anneal_lr(self, step): frac_done = step / self.steps lr = self.init_lr * (1 - frac_done) for param_group in self.optimizer.param_groups: param_group["lr"] = lr def _run_step(self, x): x = x.to(self.device) self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x) loss = loss_multi + loss_gauss loss.backward() self.optimizer.step() return loss_multi, loss_gauss def run_loop(self): step = 0 curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 self.print_every = 1 self.log_every = 1 best_loss = np.inf print('Steps: ', self.steps) while step < self.steps: start_time = time.time() x = next(self.train_iter)[0] batch_loss_multi, batch_loss_gauss = self._run_step(x) self._anneal_lr(step) curr_count += len(x) curr_loss_multi += batch_loss_multi.item() * len(x) curr_loss_gauss += batch_loss_gauss.item() * len(x) if (step + 1) % self.log_every == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) if np.isnan(gloss): print('Finding Nan') break if (step + 1) % self.print_every == 0: print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] np.set_printoptions(suppress=True) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 if mloss + gloss < best_loss: best_loss = mloss + gloss torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, 'model.pt')) if (step + 1) % 10000 == 0: torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, f'model_{step+1}.pt')) # update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) step += 1 # end_time = time.time() # print('Time: ', end_time - start_time) def train( model_save_path, real_data_path, steps = 1000, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, task_type = 'binclass', model_type = 'mlp', model_params = None, num_timesteps = 1000, gaussian_loss_type = 'mse', scheduler = 'cosine', T_dict = None, num_numerical_features = 0, device = torch.device('cuda:0'), seed = 0, change_val = False ): real_data_path = os.path.normpath(real_data_path) # zero.improve_reproducibility(seed) T = src.Transformations(**T_dict)
dataset = make_dataset(
0
2023-10-10 18:06:31+00:00
16k
ThomasMrY/DisDiff
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n \n def kl_splits(self, latent_unit=6):\n mean_splits = self.mean.chunk(latent_unit, dim=-1)\n var_splits = self.var.chunk(latent_unit, dim=-1)\n logvar_splits = self.logvar.chunk(latent_unit, dim=-1)\n kl_loss = 0\n for mean, var, logvar in zip(mean_splits, var_splits, logvar_splits):\n kl_split = 0.5 * torch.sum(torch.pow(mean, 2)\n + var - 1.0 - logvar,\n dim=-1)\n kl_loss += torch.sum(kl_split) / kl_split.shape[0]\n return kl_loss/latent_unit\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n # h = self.encoder(x)\n # h = self.quant_conv(h)\n # quant, emb_loss, info = self.quantize(h)\n # return quant, emb_loss, info\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n log_dict_ae[\"train/epoch_num\"] = self.current_epoch\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev, ddim_coef = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_coef', ddim_coef)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(cond = conditioning, shape=size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,**kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(x = img, c=cond, t=ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,**kwargs):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t = return_wrap(e_t, torch.full((b, 1, 1, 1), self.ddim_coef[index], device=device))\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n # p.savez(\"data.npz\", z=z, x = x, xrec = xrec, x_T = x_T, time = time, alphas = alphas, alphas_prev = alphas_prev, sqrt_one_minus_alphas = sqrt_one_minus_alphas, sigmas = sigmas.cpu().numpy(),e_t = e_t)\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "return_wrap", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def return_wrap(inp, coef):\n if isinstance(inp, Return):\n return inp.pred\n elif isinstance(inp, Return_grad) or isinstance(inp, Return_grad_full):\n # return inp.out_grad\n return inp.pred + coef * inp.out_grad" } ]
import torch import torch.nn as nn import numpy as np import torch.nn.functional as F import pytorch_lightning as pl import copy import os import pandas as pd from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.diffusionmodules.util import return_wrap
10,855
if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(S = ddim_steps,batch_size = batch_size, shape = shape,conditioning = cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=8, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, plot_swapped_concepts = False, plot_decoded_xstart=False, plot_swapped_concepts_partial=True, **kwargs): use_ddim = ddim_steps is not None # plot_swapped_concepts = True log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) self.ce_loss = nn.CrossEntropyLoss(reduction = "none") if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) self.register_buffer("shift_coef", - to_torch(np.sqrt(alphas)) * (1. - self.alphas_cumprod_prev) / torch.sqrt(1. - self.alphas_cumprod)) self.register_buffer("ddim_coef", -self.sqrt_one_minus_alphas_cumprod) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") self.load_epoch = sd['epoch'] self.load_step = sd["global_step"] if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.ddim_coef, t, x.shape)) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=eps_pred) elif self.parameterization == "x0": x_recon = eps_pred if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.shift_coef, t, x_start.shape)) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(eps_pred, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): pass # _, loss_dict_no_ema = self.shared_step(batch) # with self.ema_scope(): # _, loss_dict_ema = self.shared_step(batch) # loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} # self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) # self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, dis_loss_flag = False, detach_flag = False, train_enc_flag = False, dis_weight = 1.0, dis_loss_type = "IM", *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key self.dis_loss_flag = dis_loss_flag self.detach_flag = detach_flag self.train_enc_flag = train_enc_flag self.dis_weight = dis_weight self.dis_loss_type = dis_loss_type try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if hasattr(self.model.diffusion_model,"scale_factor"): del self.scale_factor self.register_buffer('scale_factor', self.model.diffusion_model.scale_factor) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING Pre-Trained STD-RESCALING ###") else: del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] else: c = None xc = None out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None, sampled_index= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, sampled_index = sampled_index, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) @torch.no_grad() def test_step(self, batch, batch_idx): x = super().get_input(batch, self.cond_stage_key) cond = self.cond_stage_model(x) cond = torch.stack(cond.chunk(self.model.diffusion_model.latent_unit, dim = 1), dim=1) return {"cond":cond.detach().cpu()} @torch.no_grad() def test_step_end(self, batch_parts): return batch_parts["cond"] @torch.no_grad() def test_epoch_end(self, test_step_outputs): cond_cat = torch.cat(test_step_outputs, dim=0) cond_dir = os.path.join(self.logdir, "dis_repre","epoch={:06}.npz".format( self.current_epoch)) os.mkdir(os.path.join(self.logdir, "dis_repre")) np.savez(cond_dir, latents=cond_cat.numpy(), num_samples= np.array(self.global_step)) def dis_loss(self, model_forward, x_t, t, cond, sampled_concept): if not self.train_enc_flag: eval_encoder = copy.deepcopy(self.cond_stage_model) eval_encoder.requires_grad_(False) eval_encoder.eval() else: eval_encoder = self.cond_stage_model ddim_coef = extract_into_tensor(self.ddim_coef, t, x_t.shape) with torch.no_grad(): eps_hat = model_forward.pred z_start = self.predict_start_from_noise(x_t, t, eps_hat) pred_x0_t = self.differentiable_decode_first_stage(z_start, force_not_quantize=not self.detach_flag) if self.detach_flag: pred_x0_t = pred_x0_t.detach() else: pass pred_z = eval_encoder(pred_x0_t) z_parts = pred_z.chunk(self.model.diffusion_model.latent_unit, dim=1) pred_z = torch.stack(z_parts, dim=1) eps_new_hat = model_forward.pred + ddim_coef*model_forward.sub_grad z_start_new = self.predict_start_from_noise(x_t, t, eps_new_hat) pred_x0_new_t = self.differentiable_decode_first_stage(z_start_new, force_not_quantize=not self.detach_flag) if self.detach_flag: pred_x0_new_t = pred_x0_new_t.detach() else: pass pred_z_new = eval_encoder(pred_x0_new_t) z_parts = pred_z_new.chunk(self.model.diffusion_model.latent_unit, dim=1) cond = cond.chunk(self.model.diffusion_model.latent_unit, dim=1) pred_z_new = torch.stack(z_parts, dim=1) cond = torch.stack(cond, dim=1) with torch.no_grad(): norm_org = torch.norm(pred_z - cond.detach(), dim=-1) norm_Z = torch.norm(pred_z_new - cond.detach(), dim=-1) logits_deta = torch.norm(pred_z - pred_z_new, dim = -1) logits = norm_org - norm_Z dis_loss = self.ce_loss(logits, torch.from_numpy(sampled_concept).cuda()) dis_loss_deta = self.ce_loss(logits_deta, torch.from_numpy(sampled_concept).cuda()) if self.dis_loss_type == "IM": dis_weight = mean_flat((pred_x0_t - pred_x0_new_t.detach())**2) elif self.dis_loss_type == "Z": dis_weight = mean_flat((z_start - z_start_new.detach())**2) else: raise NotImplementedError return dis_weight * self.dis_weight * (dis_loss + dis_loss_deta) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) if self.dis_loss_flag: sampled_concept = np.random.randint(self.model.diffusion_model.latent_unit, size = x_noisy.shape[0]) model_output = self.apply_model(x_noisy, t, cond, sampled_concept = sampled_concept) dis_loss = self.dis_loss(model_output, x_noisy, t, cond, sampled_concept) else: model_output = self.apply_model(x_noisy, t, cond) eps_pred = return_wrap(model_output, extract_into_tensor(self.shift_coef, t, x_start.shape)) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(eps_pred, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t.cpu()].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) if self.dis_loss_flag: loss = self.l_simple_weight * loss.mean() + dis_loss.mean() else: loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(eps_pred, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) loss_dict.update({f'{prefix}/epoch_num': self.current_epoch}) loss_dict.update({f'{prefix}/step_num': self.global_step}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) eps_pred = return_wrap(model_out,extract_into_tensor(self.ddim_coef, t, x.shape)) if score_corrector is not None: assert self.parameterization == "eps" eps_pred = score_corrector.modify_score(self, eps_pred, x, t, c, **corrector_kwargs) if return_codebook_ids: eps_pred, logits = eps_pred if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=eps_pred) elif self.parameterization == "x0": x_recon = eps_pred else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(S = ddim_steps,batch_size = batch_size, shape = shape,conditioning = cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=8, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, plot_swapped_concepts = False, plot_decoded_xstart=False, plot_swapped_concepts_partial=True, **kwargs): use_ddim = ddim_steps is not None # plot_swapped_concepts = True log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
elif isimage(xc):
4
2023-10-07 09:58:07+00:00
16k
wiio12/LEGO-Prover
lego_prover/prover.py
[ { "identifier": "IsabelleEnv", "path": "lego_prover/env/isa_bridge.py", "snippet": "class IsabelleEnv(gym.Env):\n def __init__(\n self,\n logger=None,\n isabelle_path=\"/Users/wiio/Isabelle2022\",\n working_dir=\"miniF2F\",\n interactive_file=\"miniF2F/interactive.thy\",\n server_host=\"http://127.0.0.1\",\n server_port=8000,\n request_timeout=600,\n log_path=\"./logs\",\n ):\n self.logger = logger\n self.isabelle_path = isabelle_path\n self.working_dir = os.path.abspath(working_dir)\n self.interactive_file = os.path.abspath(interactive_file)\n self.server = f\"{server_host}:{server_port}\"\n self.server_port = server_port\n self.request_timeout = request_timeout\n self.log_path = log_path\n self.isabelle_server = self.get_isabelle_process(server_port)\n self.isabelle_server.run()\n self.stub = None\n \n # wait for isabelle server to run\n time.sleep(3)\n\n self.has_reset = False\n self.reset_options = None\n self.connected = False\n\n def get_isabelle_process(self, server_port):\n self.logger.info(f\"Starting isabelle server at port {server_port}\")\n U.f_mkdir(self.log_path, \"isabelle_server\")\n return SubprocessMonitor(\n commands=[\n \"bash\",\n \"run_server.sh\",\n str(server_port),\n ],\n name=\"isabelle_server\",\n ready_match=r\"Server is running. Press Ctrl-C to stop.\",\n log_path=U.f_join(self.log_path, \"isabelle_server\"),\n cwd=os.path.abspath(\"lego_prover/env/Portal-to-ISAbelle\"),\n server_port=server_port,\n )\n \n def step(\n self,\n code: str,\n formal_statement: str = None,\n quick_check: bool = False,\n ) -> Tuple[ObsType, SupportsFloat, bool, bool, Dict[str, Any]]:\n # if \"theory\" in code:\n # assert \"begin\" in code and \"end\" in code, \\\n # \"Outer syntax error: not complete theorem file\"\n # code = code[code.index(\"begin\") + len(\"begin\"): code.index(\"end\")].strip()\n \n # step 0: replace special token\n for symbol, value in SPECIAL_SYMBOL.items():\n if symbol in code:\n code = code.replace(symbol, value)\n\n # step 1: parse code\n parsed_code = self._get_parsed_code(code)\n\n # step 2: step by step verification\n verified_result = self._verify_step_by_step(parsed_code, quick_check=quick_check)\n if quick_check:\n return verified_result, None, None, None\n\n # step 3: post process error message\n verified_result, code, correct_partial_code, incorrect_code = self._post_process_error_msg(code, parsed_code, verified_result)\n\n # step 4: get skill code\n skill_codes = self._post_process_skill_code(correct_partial_code)\n\n # step 5: get request\n requests = self._get_request(code, skill_codes)\n \n return verified_result, code, skill_codes, requests\n\n def render(self):\n raise NotImplementedError(\"render is not implemented\")\n\n def reset(self, imports=None, hard_reset=False):\n # TODO: we fix the imports for now, we support update imports later.\n if self.stub is None or hard_reset:\n self.stub = create_stub(self.server_port)\n try:\n self.logger.info(self.stub.InitialiseIsabelle(server_pb2.IsaPath(path=self.isabelle_path)).message)\n self.logger.info(self.stub.IsabelleWorkingDirectory(server_pb2.IsaPath(path=self.working_dir)).message)\n self.logger.info(self.stub.IsabelleContext(server_pb2.IsaContext(context=self.interactive_file)).message)\n self.successful_starting = True\n except Exception as e:\n self.logger.info(\"Failure at initializing Isabelle process.\\n\"\n \"Make sure the path your provide is where the Isabelle executable is.\")\n self.logger.info(e)\n # This will reset all state\n self._post(f\"<initialise>\")\n return f\"Starting is successful: {self.successful_starting}\"\n else:\n self._post(\"reset_problem\")\n return f\"soft reset problem successful\"\n \n def close(self):\n if self.stub is not None:\n self._exit()\n self.isabelle_server.stop()\n return not self.connected\n \n # @func_set_timeout(1800, allowOverride=True)\n def _post(self, action):\n reset_retry_cnt = 3\n while reset_retry_cnt > 0:\n try:\n result = self.stub.IsabelleCommand(server_pb2.IsaCommand(command=action)).state\n return result\n except Exception as e:\n self.logger.info(f\"Isabelle environment exception: {e}\")\n self.isabelle_server.terminate()\n self.isabelle_server = self.get_isabelle_process(self.server_port)\n self.isabelle_server.run()\n time.sleep(3)\n self.reset(hard_reset=True)\n reset_retry_cnt -= 1\n assert False, \"Isabelle enviroment fail to reboot!\"\n \n\n def _exit(self):\n try:\n self._post('exit')\n except:\n self.logger.info(\"Post('exit') timed out, kill from system...\")\n os.system(\"ps aux | grep Isabelle | awk '{print $2}' | xargs kill -9 > /dev/null 2>&1\")\n os.system(\"ps aux | grep poly | awk '{print $2}' | xargs kill -9 > /dev/null 2>&1\")\n\n\n def _get_parsed_code(self, theory, tls_name='default') -> List[str]:\n steps = self._post(f\"<parse text> ${theory}\")\n steps = steps.split('<SEP>')\n steps = [s for s in steps if s.strip() != '']\n # remove weird '$' step and whitespace steps\n steps = [s for s in steps if s != '$' and s.strip() != '']\n return steps\n \n def _parse_hammer_output(self, obs):\n \"\"\"Parse the sledgehammer output, otherwise return an empty string\"\"\"\n if '<hammer>' in obs:\n output = obs.split('<hammer>')[1]\n else:\n output = ''\n return output\n\n def _verify_step_by_step(self, steps, quick_check=False):\n done = False\n reason = ''\n success = False\n step_results = []\n tls_name = 'default'\n error_step_index = None\n corrected_step = {}\n for i, step in enumerate(steps):\n try:\n step_time = time.time()\n if \"sledgehammer\" not in step:\n obs, reward, done, metadata, error = self._run_step(step, i, tls_name)\n strip_step = step.strip()\n\n if error is not None and quick_check is True:\n self._post(\"reset_problem\")\n return False\n \n # only fix \"by\" step\n if error is not None and strip_step.startswith(\"by\"):\n old_status = copy((obs, reward, done, metadata, error))\n # try correct the step with sledgehammer step\n one_line_error = error.replace('\\n', ' ')\n self.logger.info(f\"Error with step: [{step}], error: [{one_line_error}]\")\n self.logger.info(\"Trying hammer methods...\")\n obs, reward, done, metadata, error = self._run_sledgehammer(step, i, tls_name)\n if obs is not None:\n actual_step, obs = obs.split(\"<hammer>\")\n actual_step, obs = actual_step.strip(), obs.strip()\n corrected_step[i] = (step, actual_step)\n else:\n obs, reward, done, metadata, error = old_status\n else:\n if quick_check is True:\n self._post(\"reset_problem\")\n return False\n self.logger.info(\"Model use sledgehammer, Trying hammer methods...\")\n obs, reward, done, metadata, error = self._run_sledgehammer(step, i, tls_name)\n if obs is not None:\n actual_step, obs = obs.split(\"<hammer>\")\n actual_step, obs = actual_step.strip(), obs.strip()\n corrected_step[i] = (step, actual_step)\n\n step_time = time.time() - step_time\n step_results.append({\n \"index\": i,\n \"step\": step,\n \"output\": obs,\n \"step_time\": step_time,\n })\n if error is not None:\n reason = error\n success = False\n done = False\n error_step_index = i\n break\n except Exception as e:\n # Timeout - end the proof attempt\n success = False\n done = False\n reason = f'Python exception with error {str(e)}, at command \"{step}\" (line 1)'\n error_step_index = i\n step_results.append(dict(index=i, step=step, output=''))\n break\n\n # Change when successful\n tls_name = 'default_%d' % i\n\n if done and reward == 1.0:\n success = True\n\n result = {\n 'success': success,\n 'reason': reason,\n 'num_steps': len(steps),\n 'last_step': len(step_results),\n 'error_step_index': error_step_index,\n 'step_results': step_results,\n 'corrected_steps': corrected_step,\n }\n\n # This will reset all the problem status\n self._post(\"reset_problem\")\n if quick_check is True:\n return success\n return result\n\n def _run_sledgehammer(self, step, i, tls_name):\n # First try heuristics\n for heuristic in ['by auto', 'by simp', 'by blast', 'by fastforce', 'by force', 'by eval', 'by presburger', 'by sos', 'by arith', 'by linarith', 'by (auto simp: field_simps)', \"sledgehammer\"]:\n step_ = heuristic\n obs, reward, done, metadata, error = self._run_step(step_, i, tls_name) \n if error is None:\n if \"<hammer>\" not in obs:\n obs = '%s <hammer> %s' % (heuristic, obs)\n actual_step = obs.split(\"<hammer>\")[0].strip()\n self.logger.info(f\"Tried step: {step_}, success, replace step: [{step}] with step: [{actual_step}]\")\n return obs, reward, done, metadata, error\n else:\n if step_ == \"sledgehammer\":\n one_line_error = error.replace('\\n', ' ')\n self.logger.info(f\"Tried step: {step_} with error [{one_line_error}]\")\n if 'At command \"<malformed>\"' in one_line_error:\n error = \"Sledgehammer error (line 1): fail to finish the proof with sledgehammer\"\n return None, reward, done, metadata, error\n # Try sledgehammer\n # if error.replace('\\n', ' ').startswith(\"Step error: Outer syntax error (line 1): command expected\"):\n # error = \"Sledgehammer error (line 1): fail to finish the proof with sledgehammer\"\n return obs, reward, done, metadata, error\n\n def _run_step(self, step, i, tls_name):\n obs, reward, done, metadata = self.step_to_top_level_state(\n action=step,\n tls_name=tls_name,\n new_name='default_%d' % i\n )\n error = None\n if 'error:' in obs or 'Step error' in obs or 'Unknown error' in obs:\n error = obs\n return obs, reward, done, metadata, error\n\n def step_to_top_level_state(self, action, tls_name, new_name):\n # last_obs_string = self.stub.IsabelleCommand(server_pb2.IsaCommand(command=f\"<get state> {tls_name}\")).state\n obs_string = \"Step error\"\n try:\n obs_string = self._post(f\"<apply to top level state> {tls_name} <apply to top level state> {action} <apply to top level state> {new_name}\")\n # print(obs_string)\n except Exception as e:\n self.logger.info(\"***Something went wrong***\")\n self.logger.info(e)\n\n if \"error\" in obs_string:\n done = False\n else:\n done = self.is_finished(new_name)\n # done = True if (\"subgoal\" in last_obs_string and \"subgoal\" not in obs_string) else False\n return obs_string, self.reward(done), done, {}\n\n def reward(self, done):\n return 1 if done else 0\n\n def is_finished(self, name_of_tls):\n ret = self._post(f\"<is finished> {name_of_tls}\").strip()\n return ret.startswith(\"t\")\n \n def get_marker_statement(self, code):\n parsed = self._get_parsed_code(code)\n sl = []\n for code in parsed:\n code = code.strip()\n if code.startswith(\"lemma\") or code.startswith(\"theorem\") or code.startswith(\"fun\") or code.startswith(\"definition\"):\n sl.append(code)\n return sl[-1]\n\n \n def _post_process_error_msg(self, code, parsed_code, verified_result):\n old_code = copy(code)\n only_refresh_code = False\n if \"Timeout after\" in verified_result[\"reason\"]:\n verified_result[\"reason\"] = \\\n 'Step timeout error (line 1): the step takes more than 10 seconds to run. At command \"<cmd>\" (line 1)'\n if verified_result[\"success\"] is True:\n only_refresh_code = True\n elif re.search(r\"\\(line [0-9]+\\)\", verified_result[\"reason\"]) is None and \\\n re.search(r'At command \"(.?)+\"', verified_result[\"reason\"]) is None:\n self.logger.info(\"No line number or at command, skip...\")\n self.logger.info(\"The error is:\")\n self.logger.info(verified_result[\"reason\"])\n only_refresh_code = True\n \n matched_codes = []\n for ix, step in enumerate(verified_result[\"step_results\"]):\n step_code = step[\"step\"].strip()\n if step_code not in code:\n # This error is too complicated, I give up\n if len(step[\"output\"]) != 0:\n return verified_result, old_code, \"\".join(matched_codes), code\n else:\n if step_code.startswith(\"(*\"):\n start_index = code.index(\"(*\")\n self.logger.info(f\"Parsed code: {step_code}\")\n self.logger.info(f\"ori code: {code}\")\n for i in range(len(step_code)):\n if code[i+start_index] != step_code[i]:\n assert step_code[i] == \"?\"\n code = code[:i+start_index] + step_code[i] + code[i+start_index+1:]\n self.logger.info(f\"new code: {code}\")\n else:\n self.logger.info(f\"Parsed code: {step_code}\")\n self.logger.info(f\"ori code: {code}\")\n assert False, \"You should add the list!\"\n new_step = None\n if ix in verified_result[\"corrected_steps\"]:\n old_step, new_step = verified_result[\"corrected_steps\"][ix]\n assert old_step == step_code\n matched_code = code[:code.index(step_code) + len(step_code)]\n code = code[code.index(step_code) + len(step_code):]\n if new_step is not None:\n matched_code = matched_code.replace(step_code.strip(), new_step.strip())\n matched_codes.append(matched_code)\n \n correct_code = \"\".join(matched_codes)\n incorrect_code = code\n\n if not only_refresh_code:\n previous_code = \"\".join(matched_codes)\n line_number = previous_code.strip().count(\"\\n\") + 1\n\n error_msg = re.sub(r\"\\(line [0-9]+\\)\", f\"(line {line_number})\", verified_result[\"reason\"])\n error_msg = re.sub(r'At command \"(.?)+\"', f'At command \"{repr(step_code)}\"', error_msg)\n\n verified_result[\"reason\"] = error_msg\n \n new_code = \"\".join(matched_codes + [code])\n\n return verified_result, new_code, correct_code, incorrect_code\n \n def get_lemma_name(self, code):\n name = \"no_name\"\n try:\n if code.startswith('lemma'):\n name = re.findall(r\"lemma (.+):\", code)[0].strip()\n elif code.startswith('theorem'):\n name = re.findall(r\"theorem (.+):\", code)\n if len(name) == 0:\n name = \"theorem_with_no_name\"\n else:\n name = name[0].strip()\n elif code.startswith('fun') and not code.startswith('function'):\n name = re.findall(r\"fun (.+) ::\", code)[0].strip()\n elif code.startswith('function'):\n name = re.findall(r\"function (.+) ::\", code)[0].strip()\n elif code.startswith('definition'):\n name = re.findall(r\"definition (.+) ::\", code)[0].strip()\n else:\n assert False, f\"new code type: {code}\"\n except Exception as e:\n self.logger.info(f\"Error get lemma name, error: {e}, code: {code}\")\n return name\n \n def _post_process_skill_code(self, correct_partial_code):\n start_keyword = [\"lemma\", \"theorem\", \"definition\", \"fun\", \"end\"]\n \n parsed_code = self._get_parsed_code(correct_partial_code)\n all_codes = []\n current_code_set = []\n for code in parsed_code:\n if code.startswith(tuple(start_keyword)):\n if len(current_code_set) > 0:\n skill_code = \"\\n\".join(current_code_set)\n all_codes.append(skill_code.strip())\n current_code_set = [code]\n else:\n assert len(all_codes) == 0 or len(current_code_set) > 0\n if len(current_code_set) != 0:\n current_code_set.append(code)\n \n # remove empty code:\n tmp_code = []\n for code in all_codes:\n code = self._beautify(code, correct_partial_code)\n if len(code) == 0:\n continue\n tmp_code.append(code)\n all_codes = tmp_code\n\n # resolve dependence\n all_names = []\n for code in all_codes:\n all_names.append(self.get_lemma_name(code))\n \n name_and_codes = list(zip(all_names, all_codes))\n name_and_codes = sorted(name_and_codes, key=lambda x: len(x[0]), reverse=True)\n if len(name_and_codes) > 0:\n all_names, all_codes = list(zip(*name_and_codes))\n else:\n all_names, all_codes = [], []\n \n new_codes = []\n for ix, code in enumerate(all_codes):\n current_code = code\n escape_names = [all_names[ix]]\n while True:\n updated = False\n for jx, name in enumerate(all_names):\n if name in escape_names:\n continue\n if name in current_code:\n current_code = f\"{all_codes[jx]}\\n\\n{current_code}\"\n escape_names.append(name)\n updated = True\n if updated is False:\n break\n new_codes.append(current_code)\n \n return list(zip(all_codes, new_codes))\n\n def _beautify(self, ori_code, correct_partial_code):\n parsed_code = self._get_parsed_code(ori_code)\n if ori_code.startswith(\"lemma\") or ori_code.startswith(\"theorem\"):\n if len(parsed_code) <= 1:\n return \"\"\n else:\n return ori_code\n if parsed_code[0].strip() not in correct_partial_code:\n return ori_code\n\n formatted_code = correct_partial_code[correct_partial_code.index(parsed_code[0]):]\n matched_codes = []\n for ix, step_code in enumerate(parsed_code):\n step_code = step_code.strip()\n if step_code not in formatted_code:\n # This error is too complicated, I give up\n return ori_code\n matched_code = formatted_code[:formatted_code.index(step_code) + len(step_code)]\n formatted_code = formatted_code[formatted_code.index(step_code) + len(step_code):]\n matched_codes.append(matched_code)\n \n new_code = \"\".join(matched_codes)\n \n # remove all the comments\n # This regular expression pattern will find all comments in the Isabelle code\n pattern = re.compile(r\"\\(\\*(.*?)\\*\\)\", re.DOTALL)\n\n # Substitute found comments with an empty string\n new_code = re.sub(pattern, '', new_code).strip()\n new_code = '\\n'.join(line for line in new_code.splitlines() if line.strip())\n\n if len(self._get_parsed_code(new_code)) <= 1:\n return \"\"\n return new_code\n\n def _get_request(self, code, skill_codes):\n parsed = self._get_parsed_code(code)\n requests = []\n for line in parsed:\n if line.strip().startswith(\"lemma\"):\n requests.append(line)\n full_codes = [k[1] for k in skill_codes]\n full_code = \"\\n\\n\".join(full_codes)\n requests = list(filter(lambda x: x not in full_code, requests))\n return requests" }, { "identifier": "ActionAgent", "path": "lego_prover/agents/action.py", "snippet": "class ActionAgent:\n def __init__(\n self,\n logger=None,\n model_name=\"gpt-3.5-turbo\",\n temperature=0,\n request_timeout=120,\n ckpt_dir=\"ckpt\",\n ):\n self.logger = logger\n self.ckpt_dir = ckpt_dir\n U.f_mkdir(f\"{ckpt_dir}/action\")\n self.llm = LLMMixture(\n model_name=model_name,\n temperature=temperature,\n request_timeout=request_timeout,\n )\n\n # load decomposer examples:\n self.decomposer_examples = {}\n for file in os.listdir(\"data/decomposer_examples\"):\n with open(os.path.join(\"data/decomposer_examples\", file), \"r\") as f:\n text = f.read()\n self.decomposer_examples[file[:-4]] = text\n \n self.formalizer_examples = {}\n for file in os.listdir(\"data/formalizer_examples\"):\n with open(os.path.join(\"data/formalizer_examples\", file), \"r\") as f:\n text = f.read()\n self.formalizer_examples[file[:-4]] = text\n \n def retrieved_example_skills(self, retrieved_skills):\n random.shuffle(retrieved_skills)\n prompt_examples = []\n for ix, skills in enumerate(retrieved_skills):\n skill_code = skills[\"code\"]\n prompt_example = f\"\"\"###### useful skill {ix+1}: ######\n```isabelle\n{skill_code}\n```\n\"\"\"\n prompt_examples.append(prompt_example)\n \n example_programmes = \"\\n\\n\".join(prompt_examples)\n return example_programmes\n \n def decomposer(self, context):\n system_prompt_template = load_prompt(\"decomposer\")\n system_message = SystemMessage(content=system_prompt_template)\n\n human_prompt_template = load_prompt(\"decomposer_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n # post-process in-context-learning examples\n decomposer_examples = copy(self.decomposer_examples)\n if context[\"problem_name\"] in decomposer_examples:\n decomposer_examples.pop(context[\"problem_name\"])\n icl_examples = random.sample(list(decomposer_examples.values()), 3)\n icl_examples = \"\\n\\n####################\\n\\n\".join(icl_examples)\n\n context[\"informal_statement\"] = context[\"informal_statement\"].replace(\"\\n\", ' ').strip()\n context[\"informal_proof\"] = context[\"informal_proof\"].replace(\"\\n\", \" \").strip()\n\n human_message = human_prompt_template.format(\n examples=icl_examples,\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"],\n formal_statement=context[\"formal_statement\"],\n )\n\n conversation = {\n \"sys0\": system_message.content,\n \"human0\": human_message.content,\n }\n\n self.logger.info(\n f\"****decomposer system message****\\n{system_message.content}\"\n )\n\n self.logger.info(\n f\"****decomposer human message****\\n{human_message.content}\"\n )\n\n n_retry = 3\n informal_proof = context[\"informal_proof\"]\n skill_requests = []\n while n_retry > 0:\n try:\n ai_message = self.llm([system_message, human_message], temperature=0)\n self.logger.info(\n f\"****decomposer ai message****\\n{ai_message.content}\"\n )\n conversation[f\"ai{3-n_retry}\"] = ai_message.content\n message = ai_message.content\n if \"####################\" in message:\n message = message[:message.index(\"####################\")]\n # Extracting Error Analysis content\n informal_proof = re.search(r'## Structured informal proof\\n(.*?)\\n\\n#', message, re.DOTALL).group(1).strip()\n\n # Extracting each skill request's name and its content\n skill_requests = re.findall(r\"```isabelle\\n(.*?)\\n```\", message, re.DOTALL)\n break\n except AssertionError as e:\n if \"query too long\" in str(e):\n self.logger.warn(str(e))\n break\n except Exception as e:\n self.logger.info(f\"Error occur in decomposer: {str(e)}\")\n n_retry -= 1\n examples = random.sample(list(decomposer_examples.values()), 3)\n examples = \"\\n\\n####################\\n\\n\".join(examples)\n human_message = human_prompt_template.format(\n examples=examples,\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"],\n formal_statement=context[\"formal_statement\"],\n )\n time.sleep(5)\n ret_request = []\n for skill in skill_requests:\n if \"N/A\" in skill:\n continue\n ret_request.append(skill)\n\n if len(ret_request) > 5:\n self.logger.info(f\"skill request more than 5, with len {len(ret_request)}\")\n ret_request = random.sample(ret_request, 5)\n\n return informal_proof, ret_request, conversation\n\n def critic(self, context, code_last_round=None, error_last_round=None):\n system_prompt_template = load_prompt(\"critic_request\")\n system_prompt_template = SystemMessagePromptTemplate.from_template(system_prompt_template)\n system_message = system_prompt_template.format(examples=\"\")\n\n human_prompt_template = load_prompt(\"critic_request_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n if code_last_round is None:\n code_last_round = \"No code from last round...\"\n else:\n code_last_round = code_last_round.split('\\n')\n new_code = []\n for ix, line in enumerate(code_last_round):\n line = f\"#{ix+1} \" + line\n new_code.append(line)\n code_last_round = \"\\n\".join(new_code)\n \n if error_last_round is None:\n error_last_round = \"No error from last round...\"\n\n human_message = human_prompt_template.format(\n code=code_last_round,\n error=error_last_round,\n )\n\n # self.logger.info(\n # f\"****critic agent system message****\\n{system_message.content}\"\n # )\n\n self.logger.info(\n f\"****critic agent human message****\\n{human_message.content}\"\n )\n\n n_retry = 3\n error_analysis = \"No error analysis...\"\n skill_requests = []\n while n_retry > 0:\n try:\n ai_message = self.llm([system_message, human_message])\n self.logger.info(\n f\"****critic agent ai message****\\n{ai_message.content}\"\n )\n message = ai_message.content\n # Extracting Error Analysis content\n error_analysis = re.search(r'# Error analysis:\\n(.*?)\\n\\n#', message, re.DOTALL).group(1).strip()\n\n # Extracting each skill request's name and its content\n skill_requests = re.findall(r'## Skill \\d+: ([\\w_]+)\\n```isabelle\\n(.*?)\\n```', message, re.DOTALL)\n break\n except AssertionError as e:\n if \"query too long\" in str(e):\n self.logger.warn(str(e))\n break\n except Exception as e:\n self.logger.info(f\"Error occur in auto_formal_pre: {str(e)}\")\n n_retry -= 1\n time.sleep(5)\n\n return error_analysis, skill_requests\n \n def render_formalizer_system_message(self):\n system_template = load_prompt(\"formalizer\")\n return SystemMessage(content=system_template)\n \n def render_formalizer_human_message(\n self,\n skills,\n context,\n informal_proof=None,\n n_example=3,\n ) -> HumanMessage:\n human_prompt_template = load_prompt(\"formalizer_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n formalizer_examples = copy(self.formalizer_examples)\n if context[\"problem_name\"] in formalizer_examples:\n formalizer_examples.pop(context[\"problem_name\"])\n\n examples = random.sample(list(formalizer_examples.values()), n_example)\n examples = \"\\n\\n####################\\n\\n\".join(examples)\n context[\"informal_statement\"] = context[\"informal_statement\"].replace(\"\\n\", ' ').strip()\n context[\"informal_proof\"] = context[\"informal_proof\"].replace(\"\\n\", \" \").strip()\n\n skills = self.retrieved_example_skills(skills)\n \n human_message = human_prompt_template.format(\n skill_examples = skills,\n examples=examples,\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"] if informal_proof is None else informal_proof,\n formal_statement=context[\"formal_statement\"],\n )\n\n return human_message\n\n\n def render_human_message(\n self, \n context, \n code=None,\n error=None,\n error_analysis=None,\n informal_proof=None,\n ) -> HumanMessage:\n human_prompt_template = load_prompt(\"auto_formal2_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n if code is None:\n code = \"No code from last round...\"\n else:\n code = code.split('\\n')\n new_code = []\n for ix, line in enumerate(code):\n line = f\"#{ix+1} \" + line\n new_code.append(line)\n code = \"\\n\".join(new_code)\n \n if error is None:\n error = \"No error from last round...\"\n if error_analysis is None:\n error_analysis = \"No analysis...\"\n\n human_message = human_prompt_template.format(\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"] if informal_proof is None else informal_proof,\n formal_statement=context[\"formal_statement\"],\n code_last_round=code,\n error_last_round=error,\n error_analysis=error_analysis,\n )\n\n return human_message\n\n def process_ai_message(self, message, context):\n assert isinstance(message, AIMessage)\n\n retry = 3\n error = None\n while retry > 0:\n try:\n code_pattern = re.compile(r\"```(?:[i|I]sabelle)(.*?)```\", re.DOTALL)\n text = message.content[message.content.index(\"# Formalized Code\"):]\n code = \"\\n\".join(code_pattern.findall(text)).strip()\n return code\n except Exception as e:\n retry -= 1\n error = e\n time.sleep(1)\n self.logger.info(f\"Error parsing action response (before program execution): {error}\")\n return False" }, { "identifier": "CurriculumAgent", "path": "lego_prover/agents/curriculum.py", "snippet": "class CurriculumAgent:\n def __init__(\n self,\n logger=None,\n ckpt_dir=\"ckpt\",\n resume=False,\n miniF2F_tasks : mp.Queue = None,\n curriculum_task_type : str = \"simple_curriculum\",\n curriculum_agent_lock = U.WithEmpty()\n ):\n self.logger=logger\n self.miniF2F_tasks = miniF2F_tasks\n self.curriculum_task_type = curriculum_task_type\n self.curriculum_agent_lock = curriculum_agent_lock\n self.ckpt_dir = ckpt_dir\n U.f_mkdir(f\"{ckpt_dir}/curriculum/vectordb\")\n if resume:\n self.logger.info(f\"Loading Curriculum Agent from {ckpt_dir}/curriculum\")\n self.sync_checkpoint()\n else:\n self.completed_tasks = []\n self.failed_tasks = []\n \n def sync_checkpoint(self,):\n if os.path.exists(f\"{self.ckpt_dir}/curriculum/completed_tasks.json\"):\n self.completed_tasks = U.load_json(f\"{self.ckpt_dir}/curriculum/completed_tasks.json\")\n if os.path.exists(f\"{self.ckpt_dir}/curriculum/failed_tasks.json\"):\n self.failed_tasks = U.load_json(f\"{self.ckpt_dir}/curriculum/failed_tasks.json\")\n\n @property\n def easy_to_hard_curriculum(self):\n result = []\n for name in os.listdir(\"data/full_data/valid\"):\n path = os.path.join(\"data/full_data/valid\", name)\n context = U.load_json(path)\n result.append((path, len(context[\"informal_proof\"])))\n result = sorted(result, key=lambda x: x[1])\n result = [x[0] for x in result]\n return result\n\n @property\n def progress(self):\n return len(self.completed_tasks)\n\n def propose_next_task(self, max_retries=5, idx=None):\n if self.curriculum_task_type == \"example\":\n filename = os.listdir(\"data/examples\")[self.progress]\n task = filename[:-5]\n context = load_context(problem_name=os.path.join(\"data/examples\", filename))\n return task, context\n elif self.curriculum_task_type == \"simple_curriculum\":\n assert idx is not None\n file_path = self.easy_to_hard_curriculum[idx]\n task = file_path\n context = load_context(file_path)\n return task, context\n elif self.curriculum_task_type == \"queue_curriculum\":\n while True:\n if self.miniF2F_tasks.qsize() == 0:\n return \"\", None\n file_path = self.miniF2F_tasks.get()\n context = load_context(file_path)\n if file_path not in self.completed_tasks:\n break\n return file_path, context\n else:\n raise NotImplementedError\n\n def get_task_retry_count(self, task):\n cnt = 0\n for t in self.failed_tasks:\n if t == task:\n cnt += 1\n return cnt\n\n def propose_next_manual_task(self):\n confirmed = False\n task = \"\"\n while not confirmed:\n task = input(\"Enter task: \")\n print(f\"Task: {task}\")\n confirmed = input(\"Confirm? (y/n)\").lower() in [\"y\", \"\"]\n context = load_context(task)\n return task, context\n\n def update_exploration_progress(self, info):\n with self.curriculum_agent_lock:\n self.sync_checkpoint()\n\n task = info[\"task\"]\n if info[\"success\"]:\n self.logger.info(f\"Completed task {task}.\")\n self.completed_tasks.append(task)\n else:\n self.logger.info(\n f\"Failed to complete task {task}. Skipping to next task.\"\n )\n self.failed_tasks.append(task)\n\n # clean up tasks and dump to disk\n self.clean_up_tasks()\n\n def clean_up_tasks(self):\n updated_completed_tasks = []\n # record repeated failed tasks\n updated_failed_tasks = self.failed_tasks\n # dedup but keep order\n for task in self.completed_tasks:\n if task not in updated_completed_tasks:\n updated_completed_tasks.append(task)\n\n # remove completed tasks from failed tasks\n for task in updated_completed_tasks:\n while task in updated_failed_tasks:\n updated_failed_tasks.remove(task)\n\n self.completed_tasks = updated_completed_tasks\n self.failed_tasks = updated_failed_tasks\n\n # dump to json\n U.dump_json(\n self.completed_tasks, f\"{self.ckpt_dir}/curriculum/completed_tasks.json\"\n )\n U.dump_json(self.failed_tasks, f\"{self.ckpt_dir}/curriculum/failed_tasks.json\")" }, { "identifier": "SkillManager", "path": "lego_prover/agents/skill.py", "snippet": "class SkillManager:\n def __init__(\n self,\n rank = None,\n logger = None,\n ckpt_dir=\"ckpt\",\n skill_manager_lock=U.WithEmpty(),\n chroma_bridge: ChromaBridge = None\n ):\n self.rank = rank\n self.logger = logger\n self.skill_manager_lock = skill_manager_lock\n self.chroma_bridge = chroma_bridge\n U.f_mkdir(f\"{ckpt_dir}/skill/code\")\n U.f_mkdir(f\"{ckpt_dir}/skill/history_problem\")\n U.f_mkdir(f\"{ckpt_dir}/skill/requests\")\n U.f_mkdir(f\"{ckpt_dir}/skill/description\")\n U.f_mkdir(f\"{ckpt_dir}/skill/vectordb\")\n self.ckpt_dir = ckpt_dir\n self.encoder = tiktoken.encoding_for_model(\"gpt-4\")\n with self.skill_manager_lock:\n self.sync_checkpoint()\n \n def sync_checkpoint(self):\n if os.path.exists(f\"{self.ckpt_dir}/skill/skills.json\"):\n self.skills = U.load_json(f\"{self.ckpt_dir}/skill/skills.json\")\n else:\n self.skills = {}\n if os.path.exists(f\"{self.ckpt_dir}/skill/codes.json\"):\n self.codes = U.load_json(f\"{self.ckpt_dir}/skill/codes.json\")\n else:\n self.codes = {}\n if os.path.exists(f\"{self.ckpt_dir}/skill/skill_request.json\"):\n self.skill_requests = U.load_json(f\"{self.ckpt_dir}/skill/skill_request.json\")\n else:\n self.skill_requests = {}\n \n def add_new_problem(self, problem_name, formal_statement):\n data = (\"problem_add_text\", {\n \"add_text\": formal_statement,\n \"problem_name\": problem_name,\n })\n output = self.chroma_bridge.run_cmd(data)\n assert output[\"error\"] is None, \"error is not None\"\n print(output[\"output\"])\n\n def add_new_request(self, problem_name, formal_statement, init_update_count=0):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n\n exists_formal_statements = [value['formal_statement'] for value in self.skill_requests.values()]\n if len(get_close_matches(formal_statement, exists_formal_statements, n=1, cutoff=0.85)) != 0:\n return\n\n with self.skill_manager_lock:\n self.sync_checkpoint()\n request_name = f\"request_{len(self.skill_requests)}\"\n self.skill_requests[request_name] = {\n \"request_name\": request_name,\n \"problem_name\": problem_name,\n \"formal_statement\": formal_statement,\n \"update_count\": init_update_count,\n }\n \n\n data = (\"request_add_text\", {\n \"add_text\": formal_statement,\n \"request_name\": request_name,\n })\n \n assert self.chroma_bridge is not None\n output = self.chroma_bridge.run_cmd(data)\n if output[\"error\"] is None:\n # print(\"There are\", output[\"output\"], \"code\")\n assert output[\"output\"] == len(\n self.skill_requests\n ), (\"requestdb is not synced with skill_request.json, \"\n f\"there are {output['output']} in requestdb but {len(self.skill_requests)} in skill_request.json\")\n \n U.dump_text(\n formal_statement, f\"{self.ckpt_dir}/skill/requests/{request_name}.thy\"\n )\n U.dump_json(self.skill_requests, f\"{self.ckpt_dir}/skill/skill_request.json\")\n self.logger.info(f\"Added skill, marker:\\n ```isabelle\\n{formal_statement}```\\n\") \n\n def add_new_skill(self, skill_name, description, marker, full_code, origin=\"\", init_update_count=0):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n\n exists_markers = [value['marker'] for value in self.skills.values()]\n if len(self.encoder.encode(marker)) > 650:\n return\n if len(get_close_matches(marker, exists_markers, n=1, cutoff=0.85)) != 0:\n return\n\n if not bool(re.match(\"^[a-zA-Z0-9_']+$\", skill_name)):\n skill_name = f\"skill_{len(self.skills)}\"\n\n skill_name = skill_name.lower().strip().replace(\" \", \"_\")\n if skill_name in self.skills:\n i = 2\n while f\"{skill_name}V{i}\" in self.skills:\n i += 1\n skill_name = f\"{skill_name}V{i}\"\n\n with self.skill_manager_lock:\n self.sync_checkpoint()\n\n self.skills[skill_name] = {\n \"skill_name\": skill_name,\n \"marker\": marker,\n \"description\": description,\n \"full_code\": full_code,\n \"origin\": origin,\n \"update_count\": init_update_count,\n }\n\n # add_text = f\"code: {marker}, skill: {skill_name}, description: {description},\"\n add_text = marker\n \n # use chroma bridge to add skill to the chromadb\n assert self.chroma_bridge is not None\n data = (\"skill_add_text\",{\n \"skill_name\": skill_name,\n \"add_text\": add_text,\n })\n output = self.chroma_bridge.run_cmd(data)\n if output[\"error\"] is None:\n assert output[\"output\"] == len(\n self.skills\n ), (\"vectordb is not synced with skill.json\"\n f\"there are {output['output']} in skilldb but {len(self.skills)} in skills.json\")\n \n U.dump_text(\n marker, f\"{self.ckpt_dir}/skill/code/{skill_name}.thy\"\n )\n U.dump_text(\n description,\n f\"{self.ckpt_dir}/skill/description/{skill_name}.txt\",\n )\n U.dump_json(self.skills, f\"{self.ckpt_dir}/skill/skills.json\")\n self.logger.info(f\"Added skill, marker:\\n ```isabelle\\n{marker}```\\nfull_code:\\nisabelle\\n{full_code}\\n\")\n\n def update_count(self, skill_name):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n self.skills[skill_name][\"update_count\"] += 1\n U.dump_json(self.skills, f\"{self.ckpt_dir}/skill/skills.json\")\n \n def update_count_request(self, request_name):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n self.skill_requests[request_name][\"update_count\"] += 1\n U.dump_json(self.skill_requests, f\"{self.ckpt_dir}/skill/skill_request.json\")\n\n def retrieve_skills(self, query, k):\n ret_skill = []\n k = min(len(self.skills), k)\n if k != 0:\n self.logger.info(f\"Skill Manager retrieving for {k} skills\")\n with self.skill_manager_lock:\n # query = f\"informal statement: {context['informal_statement']}, informal proof: {context['informal_proof']}, formal_statement: {context['formal_statement']}\"\n data = (\"skill_query\", {\"query\": query, \"k\": k})\n outputs = self.chroma_bridge.run_cmd(data)\n ret_skill_name = []\n if outputs[\"error\"] is None:\n ret_skill_name = outputs[\"output\"]\n self.sync_checkpoint()\n self.logger.info(\n f\"Skill Manager retrieved skills for query:\\n ```\\n\"\n f\"{query}\\n```\\n\"\n f\"{', '.join(ret_skill_name)}\"\n )\n\n for skill_name in ret_skill_name:\n retrieved_skill = {\n \"skill\": skill_name,\n \"description\": self.skills[skill_name][\"description\"],\n \"code\": self.skills[skill_name][\"full_code\"],\n \"marker\": self.skills[skill_name][\"marker\"],\n }\n ret_skill.append(retrieved_skill)\n return ret_skill\n\n def retrieve_skills_with_context(self, context):\n ret_skill = []\n\n k = min(len(self.skills), 6)\n if k != 0:\n self.logger.info(f\"Skill Manager retrieving for {k} skills\")\n with self.skill_manager_lock:\n query = context['formal_statement']\n data = (\"skill_query\", {\"query\": query, \"k\": k})\n outputs = self.chroma_bridge.run_cmd(data)\n ret_skill_name = []\n if outputs[\"error\"] is None:\n ret_skill_name = outputs[\"output\"]\n self.sync_checkpoint()\n self.logger.info(\n f\"Skill Manager retrieved skills for query:\\n ```\\n\"\n f\"{query}\\n```\\n\"\n f\"{', '.join(ret_skill_name)}\"\n )\n \n for skill_name in ret_skill_name:\n retrieved_skill = {\n \"skill\": skill_name,\n \"description\": self.skills[skill_name][\"description\"],\n \"code\": self.skills[skill_name][\"full_code\"],\n \"marker\": self.skills[skill_name][\"marker\"],\n }\n ret_skill.append(retrieved_skill)\n\n return ret_skill" } ]
import os import random import re import time import multiprocessing as mp import tiktoken import lego_prover.utils as U import logging from lego_prover.env.isa_bridge import IsabelleEnv from .agents import ActionAgent from .agents import CurriculumAgent from .agents import SkillManager from langchain.schema import HumanMessage
11,421
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name
self.env = IsabelleEnv(
0
2023-10-09 04:23:43+00:00
16k
YingqingHe/ScaleCrafter-ptl
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False,\n tiled=False,\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.tiled = tiled\n\n if tiled:\n from ldm.modules.diffusionmodules.model_tiled import Decoder\n else:\n from ldm.modules.diffusionmodules.model import Decoder\n\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n \n if tiled:\n self.post_quant_conv = make_conv(embed_dim, ddconfig[\"z_channels\"], tiled=tiled, kernel_size=1)\n else:\n # original post_quant_conv\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n \n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n \n def decode_tiles(self, z):\n assert(self.tiled)\n return self.decode(z)\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"squaredcos_cap_v2\": # used for karlo prior\n # return early\n return betas_for_alpha_bar(\n n_timestep,\n lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,\n )\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", device=torch.device(\"cuda\"), **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.device = device\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != self.device:\n attr = attr.to(self.device)\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None, **kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n timestep_index=i,\n **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None, \n # redilation\n dilate=None, dilate_tau=None, dilate_skip=None, \n progress_dilate=False,\n dilate_cfg=None, dilate_cfg_skip=None,\n timestep_index=None,\n **kwargs):\n b, *_, device = *x.shape, x.device\n \n # redilation\n enable_dilate = (dilate is not None)\n if enable_dilate:\n if (self.ddim_timesteps.shape[0]-index) > dilate_tau:\n # close dilation in later denoising\n enable_dilate = False\n else:\n if progress_dilate:\n # adjust the dilation factor progressively\n assert(timestep_index is not None)\n dilate_list = list(range(2, math.ceil(dilate)+1))[::-1]\n n_stage = len(dilate_list)\n n_times_stage = math.ceil(dilate_tau / n_stage)\n stage_index = (timestep_index+1) // n_times_stage\n if stage_index > n_stage-1:\n stage_index = n_stage-1\n dilate = dilate_list[stage_index]\n make_dilate_model(self.model, enable_dilate=enable_dilate, dilate=dilate, nskip=dilate_skip)\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,763
if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def decode_first_stage_tiles(self, z, predict_cids=False, force_not_quantize=False): assert(isinstance(z, (list, tuple))) assert(predict_cids is False) z = [1. / self.scale_factor * z_ for z_ in z] return self.first_stage_model.decode_tiles(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
if ismap(xc):
3
2023-10-11 10:57:55+00:00
16k
bilibini/Lovely_Image_Downloader
dist/py/Python38/site-packages/charset_normalizer/cd.py
[ { "identifier": "FREQUENCIES", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"u\",\n \"m\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"y\",\n \"b\",\n \"v\",\n \"k\",\n \"x\",\n \"j\",\n \"z\",\n \"q\",\n ],\n \"English—\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"m\",\n \"u\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"b\",\n \"y\",\n \"v\",\n \"k\",\n \"j\",\n \"x\",\n \"z\",\n \"q\",\n ],\n \"German\": [\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"s\",\n \"t\",\n \"a\",\n \"d\",\n \"h\",\n \"u\",\n \"l\",\n \"g\",\n \"o\",\n \"c\",\n \"m\",\n \"b\",\n \"f\",\n \"k\",\n \"w\",\n \"z\",\n \"p\",\n \"v\",\n \"ü\",\n \"ä\",\n \"ö\",\n \"j\",\n ],\n \"French\": [\n \"e\",\n \"a\",\n \"s\",\n \"n\",\n \"i\",\n \"t\",\n \"r\",\n \"l\",\n \"u\",\n \"o\",\n \"d\",\n \"c\",\n \"p\",\n \"m\",\n \"é\",\n \"v\",\n \"g\",\n \"f\",\n \"b\",\n \"h\",\n \"q\",\n \"à\",\n \"x\",\n \"è\",\n \"y\",\n \"j\",\n ],\n \"Dutch\": [\n \"e\",\n \"n\",\n \"a\",\n \"i\",\n \"r\",\n \"t\",\n \"o\",\n \"d\",\n \"s\",\n \"l\",\n \"g\",\n \"h\",\n \"v\",\n \"m\",\n \"u\",\n \"k\",\n \"c\",\n \"p\",\n \"b\",\n \"w\",\n \"j\",\n \"z\",\n \"f\",\n \"y\",\n \"x\",\n \"ë\",\n ],\n \"Italian\": [\n \"e\",\n \"i\",\n \"a\",\n \"o\",\n \"n\",\n \"l\",\n \"t\",\n \"r\",\n \"s\",\n \"c\",\n \"d\",\n \"u\",\n \"p\",\n \"m\",\n \"g\",\n \"v\",\n \"f\",\n \"b\",\n \"z\",\n \"h\",\n \"q\",\n \"è\",\n \"à\",\n \"k\",\n \"y\",\n \"ò\",\n ],\n \"Polish\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"z\",\n \"w\",\n \"s\",\n \"c\",\n \"t\",\n \"k\",\n \"y\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"l\",\n \"j\",\n \"ł\",\n \"g\",\n \"b\",\n \"h\",\n \"ą\",\n \"ę\",\n \"ó\",\n ],\n \"Spanish\": [\n \"e\",\n \"a\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"i\",\n \"l\",\n \"d\",\n \"t\",\n \"c\",\n \"u\",\n \"m\",\n \"p\",\n \"b\",\n \"g\",\n \"v\",\n \"f\",\n \"y\",\n \"ó\",\n \"h\",\n \"q\",\n \"í\",\n \"j\",\n \"z\",\n \"á\",\n ],\n \"Russian\": [\n \"о\",\n \"а\",\n \"е\",\n \"и\",\n \"н\",\n \"с\",\n \"т\",\n \"р\",\n \"в\",\n \"л\",\n \"к\",\n \"м\",\n \"д\",\n \"п\",\n \"у\",\n \"г\",\n \"я\",\n \"ы\",\n \"з\",\n \"б\",\n \"й\",\n \"ь\",\n \"ч\",\n \"х\",\n \"ж\",\n \"ц\",\n ],\n # Jap-Kanji\n \"Japanese\": [\n \"人\",\n \"一\",\n \"大\",\n \"亅\",\n \"丁\",\n \"丨\",\n \"竹\",\n \"笑\",\n \"口\",\n \"日\",\n \"今\",\n \"二\",\n \"彳\",\n \"行\",\n \"十\",\n \"土\",\n \"丶\",\n \"寸\",\n \"寺\",\n \"時\",\n \"乙\",\n \"丿\",\n \"乂\",\n \"气\",\n \"気\",\n \"冂\",\n \"巾\",\n \"亠\",\n \"市\",\n \"目\",\n \"儿\",\n \"見\",\n \"八\",\n \"小\",\n \"凵\",\n \"県\",\n \"月\",\n \"彐\",\n \"門\",\n \"間\",\n \"木\",\n \"東\",\n \"山\",\n \"出\",\n \"本\",\n \"中\",\n \"刀\",\n \"分\",\n \"耳\",\n \"又\",\n \"取\",\n \"最\",\n \"言\",\n \"田\",\n \"心\",\n \"思\",\n \"刂\",\n \"前\",\n \"京\",\n \"尹\",\n \"事\",\n \"生\",\n \"厶\",\n \"云\",\n \"会\",\n \"未\",\n \"来\",\n \"白\",\n \"冫\",\n \"楽\",\n \"灬\",\n \"馬\",\n \"尸\",\n \"尺\",\n \"駅\",\n \"明\",\n \"耂\",\n \"者\",\n \"了\",\n \"阝\",\n \"都\",\n \"高\",\n \"卜\",\n \"占\",\n \"厂\",\n \"广\",\n \"店\",\n \"子\",\n \"申\",\n \"奄\",\n \"亻\",\n \"俺\",\n \"上\",\n \"方\",\n \"冖\",\n \"学\",\n \"衣\",\n \"艮\",\n \"食\",\n \"自\",\n ],\n # Jap-Katakana\n \"Japanese—\": [\n \"ー\",\n \"ン\",\n \"ス\",\n \"・\",\n \"ル\",\n \"ト\",\n \"リ\",\n \"イ\",\n \"ア\",\n \"ラ\",\n \"ッ\",\n \"ク\",\n \"ド\",\n \"シ\",\n \"レ\",\n \"ジ\",\n \"タ\",\n \"フ\",\n \"ロ\",\n \"カ\",\n \"テ\",\n \"マ\",\n \"ィ\",\n \"グ\",\n \"バ\",\n \"ム\",\n \"プ\",\n \"オ\",\n \"コ\",\n \"デ\",\n \"ニ\",\n \"ウ\",\n \"メ\",\n \"サ\",\n \"ビ\",\n \"ナ\",\n \"ブ\",\n \"ャ\",\n \"エ\",\n \"ュ\",\n \"チ\",\n \"キ\",\n \"ズ\",\n \"ダ\",\n \"パ\",\n \"ミ\",\n \"ェ\",\n \"ョ\",\n \"ハ\",\n \"セ\",\n \"ベ\",\n \"ガ\",\n \"モ\",\n \"ツ\",\n \"ネ\",\n \"ボ\",\n \"ソ\",\n \"ノ\",\n \"ァ\",\n \"ヴ\",\n \"ワ\",\n \"ポ\",\n \"ペ\",\n \"ピ\",\n \"ケ\",\n \"ゴ\",\n \"ギ\",\n \"ザ\",\n \"ホ\",\n \"ゲ\",\n \"ォ\",\n \"ヤ\",\n \"ヒ\",\n \"ユ\",\n \"ヨ\",\n \"ヘ\",\n \"ゼ\",\n \"ヌ\",\n \"ゥ\",\n \"ゾ\",\n \"ヶ\",\n \"ヂ\",\n \"ヲ\",\n \"ヅ\",\n \"ヵ\",\n \"ヱ\",\n \"ヰ\",\n \"ヮ\",\n \"ヽ\",\n \"゠\",\n \"ヾ\",\n \"ヷ\",\n \"ヿ\",\n \"ヸ\",\n \"ヹ\",\n \"ヺ\",\n ],\n # Jap-Hiragana\n \"Japanese——\": [\n \"の\",\n \"に\",\n \"る\",\n \"た\",\n \"と\",\n \"は\",\n \"し\",\n \"い\",\n \"を\",\n \"で\",\n \"て\",\n \"が\",\n \"な\",\n \"れ\",\n \"か\",\n \"ら\",\n \"さ\",\n \"っ\",\n \"り\",\n \"す\",\n \"あ\",\n \"も\",\n \"こ\",\n \"ま\",\n \"う\",\n \"く\",\n \"よ\",\n \"き\",\n \"ん\",\n \"め\",\n \"お\",\n \"け\",\n \"そ\",\n \"つ\",\n \"だ\",\n \"や\",\n \"え\",\n \"ど\",\n \"わ\",\n \"ち\",\n \"み\",\n \"せ\",\n \"じ\",\n \"ば\",\n \"へ\",\n \"び\",\n \"ず\",\n \"ろ\",\n \"ほ\",\n \"げ\",\n \"む\",\n \"べ\",\n \"ひ\",\n \"ょ\",\n \"ゆ\",\n \"ぶ\",\n \"ご\",\n \"ゃ\",\n \"ね\",\n \"ふ\",\n \"ぐ\",\n \"ぎ\",\n \"ぼ\",\n \"ゅ\",\n \"づ\",\n \"ざ\",\n \"ぞ\",\n \"ぬ\",\n \"ぜ\",\n \"ぱ\",\n \"ぽ\",\n \"ぷ\",\n \"ぴ\",\n \"ぃ\",\n \"ぁ\",\n \"ぇ\",\n \"ぺ\",\n \"ゞ\",\n \"ぢ\",\n \"ぉ\",\n \"ぅ\",\n \"ゐ\",\n \"ゝ\",\n \"ゑ\",\n \"゛\",\n \"゜\",\n \"ゎ\",\n \"ゔ\",\n \"゚\",\n \"ゟ\",\n \"゙\",\n \"ゕ\",\n \"ゖ\",\n ],\n \"Portuguese\": [\n \"a\",\n \"e\",\n \"o\",\n \"s\",\n \"i\",\n \"r\",\n \"d\",\n \"n\",\n \"t\",\n \"m\",\n \"u\",\n \"c\",\n \"l\",\n \"p\",\n \"g\",\n \"v\",\n \"b\",\n \"f\",\n \"h\",\n \"ã\",\n \"q\",\n \"é\",\n \"ç\",\n \"á\",\n \"z\",\n \"í\",\n ],\n \"Swedish\": [\n \"e\",\n \"a\",\n \"n\",\n \"r\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"d\",\n \"o\",\n \"m\",\n \"k\",\n \"g\",\n \"v\",\n \"h\",\n \"f\",\n \"u\",\n \"p\",\n \"ä\",\n \"c\",\n \"b\",\n \"ö\",\n \"å\",\n \"y\",\n \"j\",\n \"x\",\n ],\n \"Chinese\": [\n \"的\",\n \"一\",\n \"是\",\n \"不\",\n \"了\",\n \"在\",\n \"人\",\n \"有\",\n \"我\",\n \"他\",\n \"这\",\n \"个\",\n \"们\",\n \"中\",\n \"来\",\n \"上\",\n \"大\",\n \"为\",\n \"和\",\n \"国\",\n \"地\",\n \"到\",\n \"以\",\n \"说\",\n \"时\",\n \"要\",\n \"就\",\n \"出\",\n \"会\",\n \"可\",\n \"也\",\n \"你\",\n \"对\",\n \"生\",\n \"能\",\n \"而\",\n \"子\",\n \"那\",\n \"得\",\n \"于\",\n \"着\",\n \"下\",\n \"自\",\n \"之\",\n \"年\",\n \"过\",\n \"发\",\n \"后\",\n \"作\",\n \"里\",\n \"用\",\n \"道\",\n \"行\",\n \"所\",\n \"然\",\n \"家\",\n \"种\",\n \"事\",\n \"成\",\n \"方\",\n \"多\",\n \"经\",\n \"么\",\n \"去\",\n \"法\",\n \"学\",\n \"如\",\n \"都\",\n \"同\",\n \"现\",\n \"当\",\n \"没\",\n \"动\",\n \"面\",\n \"起\",\n \"看\",\n \"定\",\n \"天\",\n \"分\",\n \"还\",\n \"进\",\n \"好\",\n \"小\",\n \"部\",\n \"其\",\n \"些\",\n \"主\",\n \"样\",\n \"理\",\n \"心\",\n \"她\",\n \"本\",\n \"前\",\n \"开\",\n \"但\",\n \"因\",\n \"只\",\n \"从\",\n \"想\",\n \"实\",\n ],\n \"Ukrainian\": [\n \"о\",\n \"а\",\n \"н\",\n \"і\",\n \"и\",\n \"р\",\n \"в\",\n \"т\",\n \"е\",\n \"с\",\n \"к\",\n \"л\",\n \"у\",\n \"д\",\n \"м\",\n \"п\",\n \"з\",\n \"я\",\n \"ь\",\n \"б\",\n \"г\",\n \"й\",\n \"ч\",\n \"х\",\n \"ц\",\n \"ї\",\n ],\n \"Norwegian\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"s\",\n \"i\",\n \"o\",\n \"l\",\n \"d\",\n \"g\",\n \"k\",\n \"m\",\n \"v\",\n \"f\",\n \"p\",\n \"u\",\n \"b\",\n \"h\",\n \"å\",\n \"y\",\n \"j\",\n \"ø\",\n \"c\",\n \"æ\",\n \"w\",\n ],\n \"Finnish\": [\n \"a\",\n \"i\",\n \"n\",\n \"t\",\n \"e\",\n \"s\",\n \"l\",\n \"o\",\n \"u\",\n \"k\",\n \"ä\",\n \"m\",\n \"r\",\n \"v\",\n \"j\",\n \"h\",\n \"p\",\n \"y\",\n \"d\",\n \"ö\",\n \"g\",\n \"c\",\n \"b\",\n \"f\",\n \"w\",\n \"z\",\n ],\n \"Vietnamese\": [\n \"n\",\n \"h\",\n \"t\",\n \"i\",\n \"c\",\n \"g\",\n \"a\",\n \"o\",\n \"u\",\n \"m\",\n \"l\",\n \"r\",\n \"à\",\n \"đ\",\n \"s\",\n \"e\",\n \"v\",\n \"p\",\n \"b\",\n \"y\",\n \"ư\",\n \"d\",\n \"á\",\n \"k\",\n \"ộ\",\n \"ế\",\n ],\n \"Czech\": [\n \"o\",\n \"e\",\n \"a\",\n \"n\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"v\",\n \"r\",\n \"k\",\n \"d\",\n \"u\",\n \"m\",\n \"p\",\n \"í\",\n \"c\",\n \"h\",\n \"z\",\n \"á\",\n \"y\",\n \"j\",\n \"b\",\n \"ě\",\n \"é\",\n \"ř\",\n ],\n \"Hungarian\": [\n \"e\",\n \"a\",\n \"t\",\n \"l\",\n \"s\",\n \"n\",\n \"k\",\n \"r\",\n \"i\",\n \"o\",\n \"z\",\n \"á\",\n \"é\",\n \"g\",\n \"m\",\n \"b\",\n \"y\",\n \"v\",\n \"d\",\n \"h\",\n \"u\",\n \"p\",\n \"j\",\n \"ö\",\n \"f\",\n \"c\",\n ],\n \"Korean\": [\n \"이\",\n \"다\",\n \"에\",\n \"의\",\n \"는\",\n \"로\",\n \"하\",\n \"을\",\n \"가\",\n \"고\",\n \"지\",\n \"서\",\n \"한\",\n \"은\",\n \"기\",\n \"으\",\n \"년\",\n \"대\",\n \"사\",\n \"시\",\n \"를\",\n \"리\",\n \"도\",\n \"인\",\n \"스\",\n \"일\",\n ],\n \"Indonesian\": [\n \"a\",\n \"n\",\n \"e\",\n \"i\",\n \"r\",\n \"t\",\n \"u\",\n \"s\",\n \"d\",\n \"k\",\n \"m\",\n \"l\",\n \"g\",\n \"p\",\n \"b\",\n \"o\",\n \"h\",\n \"y\",\n \"j\",\n \"c\",\n \"w\",\n \"f\",\n \"v\",\n \"z\",\n \"x\",\n \"q\",\n ],\n \"Turkish\": [\n \"a\",\n \"e\",\n \"i\",\n \"n\",\n \"r\",\n \"l\",\n \"ı\",\n \"k\",\n \"d\",\n \"t\",\n \"s\",\n \"m\",\n \"y\",\n \"u\",\n \"o\",\n \"b\",\n \"ü\",\n \"ş\",\n \"v\",\n \"g\",\n \"z\",\n \"h\",\n \"c\",\n \"p\",\n \"ç\",\n \"ğ\",\n ],\n \"Romanian\": [\n \"e\",\n \"i\",\n \"a\",\n \"r\",\n \"n\",\n \"t\",\n \"u\",\n \"l\",\n \"o\",\n \"c\",\n \"s\",\n \"d\",\n \"p\",\n \"m\",\n \"ă\",\n \"f\",\n \"v\",\n \"î\",\n \"g\",\n \"b\",\n \"ș\",\n \"ț\",\n \"z\",\n \"h\",\n \"â\",\n \"j\",\n ],\n \"Farsi\": [\n \"ا\",\n \"ی\",\n \"ر\",\n \"د\",\n \"ن\",\n \"ه\",\n \"و\",\n \"م\",\n \"ت\",\n \"ب\",\n \"س\",\n \"ل\",\n \"ک\",\n \"ش\",\n \"ز\",\n \"ف\",\n \"گ\",\n \"ع\",\n \"خ\",\n \"ق\",\n \"ج\",\n \"آ\",\n \"پ\",\n \"ح\",\n \"ط\",\n \"ص\",\n ],\n \"Arabic\": [\n \"ا\",\n \"ل\",\n \"ي\",\n \"م\",\n \"و\",\n \"ن\",\n \"ر\",\n \"ت\",\n \"ب\",\n \"ة\",\n \"ع\",\n \"د\",\n \"س\",\n \"ف\",\n \"ه\",\n \"ك\",\n \"ق\",\n \"أ\",\n \"ح\",\n \"ج\",\n \"ش\",\n \"ط\",\n \"ص\",\n \"ى\",\n \"خ\",\n \"إ\",\n ],\n \"Danish\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"i\",\n \"s\",\n \"d\",\n \"l\",\n \"o\",\n \"g\",\n \"m\",\n \"k\",\n \"f\",\n \"v\",\n \"u\",\n \"b\",\n \"h\",\n \"p\",\n \"å\",\n \"y\",\n \"ø\",\n \"æ\",\n \"c\",\n \"j\",\n \"w\",\n ],\n \"Serbian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"р\",\n \"с\",\n \"у\",\n \"т\",\n \"к\",\n \"ј\",\n \"в\",\n \"д\",\n \"м\",\n \"п\",\n \"л\",\n \"г\",\n \"з\",\n \"б\",\n \"a\",\n \"i\",\n \"e\",\n \"o\",\n \"n\",\n \"ц\",\n \"ш\",\n ],\n \"Lithuanian\": [\n \"i\",\n \"a\",\n \"s\",\n \"o\",\n \"r\",\n \"e\",\n \"t\",\n \"n\",\n \"u\",\n \"k\",\n \"m\",\n \"l\",\n \"p\",\n \"v\",\n \"d\",\n \"j\",\n \"g\",\n \"ė\",\n \"b\",\n \"y\",\n \"ų\",\n \"š\",\n \"ž\",\n \"c\",\n \"ą\",\n \"į\",\n ],\n \"Slovene\": [\n \"e\",\n \"a\",\n \"i\",\n \"o\",\n \"n\",\n \"r\",\n \"s\",\n \"l\",\n \"t\",\n \"j\",\n \"v\",\n \"k\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"z\",\n \"b\",\n \"g\",\n \"h\",\n \"č\",\n \"c\",\n \"š\",\n \"ž\",\n \"f\",\n \"y\",\n ],\n \"Slovak\": [\n \"o\",\n \"a\",\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"v\",\n \"t\",\n \"s\",\n \"l\",\n \"k\",\n \"d\",\n \"m\",\n \"p\",\n \"u\",\n \"c\",\n \"h\",\n \"j\",\n \"b\",\n \"z\",\n \"á\",\n \"y\",\n \"ý\",\n \"í\",\n \"č\",\n \"é\",\n ],\n \"Hebrew\": [\n \"י\",\n \"ו\",\n \"ה\",\n \"ל\",\n \"ר\",\n \"ב\",\n \"ת\",\n \"מ\",\n \"א\",\n \"ש\",\n \"נ\",\n \"ע\",\n \"ם\",\n \"ד\",\n \"ק\",\n \"ח\",\n \"פ\",\n \"ס\",\n \"כ\",\n \"ג\",\n \"ט\",\n \"צ\",\n \"ן\",\n \"ז\",\n \"ך\",\n ],\n \"Bulgarian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"с\",\n \"в\",\n \"л\",\n \"к\",\n \"д\",\n \"п\",\n \"м\",\n \"з\",\n \"г\",\n \"я\",\n \"ъ\",\n \"у\",\n \"б\",\n \"ч\",\n \"ц\",\n \"й\",\n \"ж\",\n \"щ\",\n \"х\",\n ],\n \"Croatian\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"j\",\n \"s\",\n \"t\",\n \"u\",\n \"k\",\n \"l\",\n \"v\",\n \"d\",\n \"m\",\n \"p\",\n \"g\",\n \"z\",\n \"b\",\n \"c\",\n \"č\",\n \"h\",\n \"š\",\n \"ž\",\n \"ć\",\n \"f\",\n ],\n \"Hindi\": [\n \"क\",\n \"र\",\n \"स\",\n \"न\",\n \"त\",\n \"म\",\n \"ह\",\n \"प\",\n \"य\",\n \"ल\",\n \"व\",\n \"ज\",\n \"द\",\n \"ग\",\n \"ब\",\n \"श\",\n \"ट\",\n \"अ\",\n \"ए\",\n \"थ\",\n \"भ\",\n \"ड\",\n \"च\",\n \"ध\",\n \"ष\",\n \"इ\",\n ],\n \"Estonian\": [\n \"a\",\n \"i\",\n \"e\",\n \"s\",\n \"t\",\n \"l\",\n \"u\",\n \"n\",\n \"o\",\n \"k\",\n \"r\",\n \"d\",\n \"m\",\n \"v\",\n \"g\",\n \"p\",\n \"j\",\n \"h\",\n \"ä\",\n \"b\",\n \"õ\",\n \"ü\",\n \"f\",\n \"c\",\n \"ö\",\n \"y\",\n ],\n \"Thai\": [\n \"า\",\n \"น\",\n \"ร\",\n \"อ\",\n \"ก\",\n \"เ\",\n \"ง\",\n \"ม\",\n \"ย\",\n \"ล\",\n \"ว\",\n \"ด\",\n \"ท\",\n \"ส\",\n \"ต\",\n \"ะ\",\n \"ป\",\n \"บ\",\n \"ค\",\n \"ห\",\n \"แ\",\n \"จ\",\n \"พ\",\n \"ช\",\n \"ข\",\n \"ใ\",\n ],\n \"Greek\": [\n \"α\",\n \"τ\",\n \"ο\",\n \"ι\",\n \"ε\",\n \"ν\",\n \"ρ\",\n \"σ\",\n \"κ\",\n \"η\",\n \"π\",\n \"ς\",\n \"υ\",\n \"μ\",\n \"λ\",\n \"ί\",\n \"ό\",\n \"ά\",\n \"γ\",\n \"έ\",\n \"δ\",\n \"ή\",\n \"ω\",\n \"χ\",\n \"θ\",\n \"ύ\",\n ],\n \"Tamil\": [\n \"க\",\n \"த\",\n \"ப\",\n \"ட\",\n \"ர\",\n \"ம\",\n \"ல\",\n \"ன\",\n \"வ\",\n \"ற\",\n \"ய\",\n \"ள\",\n \"ச\",\n \"ந\",\n \"இ\",\n \"ண\",\n \"அ\",\n \"ஆ\",\n \"ழ\",\n \"ங\",\n \"எ\",\n \"உ\",\n \"ஒ\",\n \"ஸ\",\n ],\n \"Kazakh\": [\n \"а\",\n \"ы\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"л\",\n \"і\",\n \"д\",\n \"с\",\n \"м\",\n \"қ\",\n \"к\",\n \"о\",\n \"б\",\n \"и\",\n \"у\",\n \"ғ\",\n \"ж\",\n \"ң\",\n \"з\",\n \"ш\",\n \"й\",\n \"п\",\n \"г\",\n \"ө\",\n ],\n}" }, { "identifier": "KO_NAMES", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "KO_NAMES: Set[str] = {\"johab\", \"cp949\", \"euc_kr\"}" }, { "identifier": "LANGUAGE_SUPPORTED_COUNT", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)" }, { "identifier": "TOO_SMALL_SEQUENCE", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "TOO_SMALL_SEQUENCE: int = 32" }, { "identifier": "ZH_NAMES", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "ZH_NAMES: Set[str] = {\"big5\", \"cp950\", \"big5hkscs\", \"hz\"}" }, { "identifier": "is_suspiciously_successive_range", "path": "dist/py/Python38/site-packages/charset_normalizer/md.py", "snippet": "@lru_cache(maxsize=1024)\ndef is_suspiciously_successive_range(\n unicode_range_a: Optional[str], unicode_range_b: Optional[str]\n) -> bool:\n \"\"\"\n Determine if two Unicode range seen next to each other can be considered as suspicious.\n \"\"\"\n if unicode_range_a is None or unicode_range_b is None:\n return True\n\n if unicode_range_a == unicode_range_b:\n return False\n\n if \"Latin\" in unicode_range_a and \"Latin\" in unicode_range_b:\n return False\n\n if \"Emoticons\" in unicode_range_a or \"Emoticons\" in unicode_range_b:\n return False\n\n # Latin characters can be accompanied with a combining diacritical mark\n # eg. Vietnamese.\n if (\"Latin\" in unicode_range_a or \"Latin\" in unicode_range_b) and (\n \"Combining\" in unicode_range_a or \"Combining\" in unicode_range_b\n ):\n return False\n\n keywords_range_a, keywords_range_b = unicode_range_a.split(\n \" \"\n ), unicode_range_b.split(\" \")\n\n for el in keywords_range_a:\n if el in UNICODE_SECONDARY_RANGE_KEYWORD:\n continue\n if el in keywords_range_b:\n return False\n\n # Japanese Exception\n range_a_jp_chars, range_b_jp_chars = (\n unicode_range_a\n in (\n \"Hiragana\",\n \"Katakana\",\n ),\n unicode_range_b in (\"Hiragana\", \"Katakana\"),\n )\n if (range_a_jp_chars or range_b_jp_chars) and (\n \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b\n ):\n return False\n if range_a_jp_chars and range_b_jp_chars:\n return False\n\n if \"Hangul\" in unicode_range_a or \"Hangul\" in unicode_range_b:\n if \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n # Chinese/Japanese use dedicated range for punctuation and/or separators.\n if (\"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b) or (\n unicode_range_a in [\"Katakana\", \"Hiragana\"]\n and unicode_range_b in [\"Katakana\", \"Hiragana\"]\n ):\n if \"Punctuation\" in unicode_range_a or \"Punctuation\" in unicode_range_b:\n return False\n if \"Forms\" in unicode_range_a or \"Forms\" in unicode_range_b:\n return False\n\n return True" }, { "identifier": "CoherenceMatches", "path": "dist/py/Python38/site-packages/charset_normalizer/models.py", "snippet": "class CharsetMatch:\nclass CharsetMatches:\nclass CliDetectionResult:\n def __init__(\n self,\n payload: bytes,\n guessed_encoding: str,\n mean_mess_ratio: float,\n has_sig_or_bom: bool,\n languages: \"CoherenceMatches\",\n decoded_payload: Optional[str] = None,\n ):\n def __eq__(self, other: object) -> bool:\n def __lt__(self, other: object) -> bool:\n def multi_byte_usage(self) -> float:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def add_submatch(self, other: \"CharsetMatch\") -> None:\n def encoding(self) -> str:\n def encoding_aliases(self) -> List[str]:\n def bom(self) -> bool:\n def byte_order_mark(self) -> bool:\n def languages(self) -> List[str]:\n def language(self) -> str:\n def chaos(self) -> float:\n def coherence(self) -> float:\n def percent_chaos(self) -> float:\n def percent_coherence(self) -> float:\n def raw(self) -> bytes:\n def submatch(self) -> List[\"CharsetMatch\"]:\n def has_submatch(self) -> bool:\n def alphabets(self) -> List[str]:\n def could_be_from_charset(self) -> List[str]:\n def output(self, encoding: str = \"utf_8\") -> bytes:\n def fingerprint(self) -> str:\n def __init__(self, results: Optional[List[CharsetMatch]] = None):\n def __iter__(self) -> Iterator[CharsetMatch]:\n def __getitem__(self, item: Union[int, str]) -> CharsetMatch:\n def __len__(self) -> int:\n def __bool__(self) -> bool:\n def append(self, item: CharsetMatch) -> None:\n def best(self) -> Optional[\"CharsetMatch\"]:\n def first(self) -> Optional[\"CharsetMatch\"]:\n def __init__(\n self,\n path: str,\n encoding: Optional[str],\n encoding_aliases: List[str],\n alternative_encodings: List[str],\n language: str,\n alphabets: List[str],\n has_sig_or_bom: bool,\n chaos: float,\n coherence: float,\n unicode_path: Optional[str],\n is_preferred: bool,\n ):\n def __dict__(self) -> Dict[str, Any]: # type: ignore\n def to_json(self) -> str:" }, { "identifier": "is_accentuated", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_accentuated(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return (\n \"WITH GRAVE\" in description\n or \"WITH ACUTE\" in description\n or \"WITH CEDILLA\" in description\n or \"WITH DIAERESIS\" in description\n or \"WITH CIRCUMFLEX\" in description\n or \"WITH TILDE\" in description\n )" }, { "identifier": "is_latin", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_latin(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return \"LATIN\" in description" }, { "identifier": "is_multi_byte_encoding", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=128)\ndef is_multi_byte_encoding(name: str) -> bool:\n \"\"\"\n Verify is a specific encoding is a multi byte one based on it IANA name\n \"\"\"\n return name in {\n \"utf_8\",\n \"utf_8_sig\",\n \"utf_16\",\n \"utf_16_be\",\n \"utf_16_le\",\n \"utf_32\",\n \"utf_32_le\",\n \"utf_32_be\",\n \"utf_7\",\n } or issubclass(\n importlib.import_module(\"encodings.{}\".format(name)).IncrementalDecoder,\n MultibyteIncrementalDecoder,\n )" }, { "identifier": "is_unicode_range_secondary", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))\ndef is_unicode_range_secondary(range_name: str) -> bool:\n return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)" }, { "identifier": "unicode_range", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef unicode_range(character: str) -> Optional[str]:\n \"\"\"\n Retrieve the Unicode range official name from a single character.\n \"\"\"\n character_ord: int = ord(character)\n\n for range_name, ord_range in UNICODE_RANGES_COMBINED.items():\n if character_ord in ord_range:\n return range_name\n\n return None" } ]
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .constant import ( FREQUENCIES, KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES, ) from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, )
11,643
def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if ( is_suspiciously_successive_range(discovered_range, character_range) is False ): layer_target_range = discovered_range break if layer_target_range is None: layer_target_range = character_range if layer_target_range not in layers: layers[layer_target_range] = character.lower() continue layers[layer_target_range] += character.lower() return list(layers.values()) def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches: """ This function merge results previously given by the function coherence_ratio. The return type is the same as coherence_ratio. """ per_language_ratios: Dict[str, List[float]] = {} for result in results: for sub_result in result: language, ratio = sub_result if language not in per_language_ratios: per_language_ratios[language] = [ratio] continue per_language_ratios[language].append(ratio) merge = [ ( language, round( sum(per_language_ratios[language]) / len(per_language_ratios[language]), 4, ), ) for language in per_language_ratios ] return sorted(merge, key=lambda x: x[1], reverse=True) def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches: """ We shall NOT return "English—" in CoherenceMatches because it is an alternative of "English". This function only keeps the best match and remove the em-dash in it. """ index_results: Dict[str, List[float]] = dict() for result in results: language, ratio = result no_em_name: str = language.replace("—", "") if no_em_name not in index_results: index_results[no_em_name] = [] index_results[no_em_name].append(ratio) if any(len(index_results[e]) > 1 for e in index_results): filtered_results: CoherenceMatches = [] for language in index_results: filtered_results.append((language, max(index_results[language]))) return filtered_results return results @lru_cache(maxsize=2048) def coherence_ratio( decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None ) -> CoherenceMatches: """ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. A layer = Character extraction by alphabets/ranges. """ results: List[Tuple[str, float]] = [] ignore_non_latin: bool = False sufficient_match_count: int = 0 lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] if "Latin Based" in lg_inclusion_list: ignore_non_latin = True lg_inclusion_list.remove("Latin Based") for layer in alpha_unicode_split(decoded_sequence): sequence_frequencies: TypeCounter[str] = Counter(layer) most_common = sequence_frequencies.most_common() character_count: int = sum(o for c, o in most_common)
def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if ( is_suspiciously_successive_range(discovered_range, character_range) is False ): layer_target_range = discovered_range break if layer_target_range is None: layer_target_range = character_range if layer_target_range not in layers: layers[layer_target_range] = character.lower() continue layers[layer_target_range] += character.lower() return list(layers.values()) def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches: """ This function merge results previously given by the function coherence_ratio. The return type is the same as coherence_ratio. """ per_language_ratios: Dict[str, List[float]] = {} for result in results: for sub_result in result: language, ratio = sub_result if language not in per_language_ratios: per_language_ratios[language] = [ratio] continue per_language_ratios[language].append(ratio) merge = [ ( language, round( sum(per_language_ratios[language]) / len(per_language_ratios[language]), 4, ), ) for language in per_language_ratios ] return sorted(merge, key=lambda x: x[1], reverse=True) def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches: """ We shall NOT return "English—" in CoherenceMatches because it is an alternative of "English". This function only keeps the best match and remove the em-dash in it. """ index_results: Dict[str, List[float]] = dict() for result in results: language, ratio = result no_em_name: str = language.replace("—", "") if no_em_name not in index_results: index_results[no_em_name] = [] index_results[no_em_name].append(ratio) if any(len(index_results[e]) > 1 for e in index_results): filtered_results: CoherenceMatches = [] for language in index_results: filtered_results.append((language, max(index_results[language]))) return filtered_results return results @lru_cache(maxsize=2048) def coherence_ratio( decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None ) -> CoherenceMatches: """ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. A layer = Character extraction by alphabets/ranges. """ results: List[Tuple[str, float]] = [] ignore_non_latin: bool = False sufficient_match_count: int = 0 lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] if "Latin Based" in lg_inclusion_list: ignore_non_latin = True lg_inclusion_list.remove("Latin Based") for layer in alpha_unicode_split(decoded_sequence): sequence_frequencies: TypeCounter[str] = Counter(layer) most_common = sequence_frequencies.most_common() character_count: int = sum(o for c, o in most_common)
if character_count <= TOO_SMALL_SEQUENCE:
3
2023-10-11 09:08:57+00:00
16k
MTgeophysics/mtpy-v2
mtpy/modeling/occam1d/startup.py
[ { "identifier": "Occam1DData", "path": "mtpy/modeling/occam1d/data.py", "snippet": "class Occam1DData(object):\n \"\"\"\n reads and writes occam 1D data files\n\n ===================== =====================================================\n Attributes Description\n ===================== =====================================================\n _data_fn basename of data file *default* is Occam1DDataFile\n _header_line header line for description of data columns\n _ss string spacing *default* is 6*' '\n _string_fmt format of data *default* is '+.6e'\n data array of data\n data_fn full path to data file\n freq frequency array of data\n mode mode to invert for [ 'TE' | 'TM' | 'det' ]\n phase_te array of TE phase\n phase_tm array of TM phase\n res_te array of TE apparent resistivity\n res_tm array of TM apparent resistivity\n resp_fn full path to response file\n save_path path to save files to\n ===================== =====================================================\n\n\n ===================== =====================================================\n Methods Description\n ===================== =====================================================\n write_data_file write an Occam1D data file\n read_data_file read an Occam1D data file\n read_resp_file read a .resp file output by Occam1D\n ===================== =====================================================\n\n :Example: ::\n\n >>> import mtpy.modeling.occam1d as occam1d\n >>> #--> make a data file for TE mode\n >>> d1 = occam1d.Data()\n >>> d1.write_data_file(edi_file=r'/home/MT/mt01.edi', res_err=10, phase_err=2.5,\n >>> ... save_path=r\"/home/occam1d/mt01/TE\", mode='TE')\n\n \"\"\"\n\n def __init__(self, mt_dataframe, **kwargs):\n self.logger = logger\n self.mt_dataframe = MTDataFrame(data=mt_dataframe)\n\n self._string_fmt = \"+.6e\"\n self._ss = 6 * \" \"\n self._acceptable_modes = [\"te\" \"tm\", \"det\", \"detz\", \"tez\", \"tmz\"]\n self._data_fn = \"Occam1d_DataFile\"\n self._header_line = \"!{0}\\n\".format(\n \" \".join([\"Type\", \"Freq#\", \"TX#\", \"Rx#\", \"Data\", \"Std_Error\"])\n )\n self.mode = \"det\"\n self.data = None\n self.rotation_angle = 0\n\n self.data_1 = None\n self.data_1_error = None\n self.data_2 = None\n self.data_2_error = None\n\n self.save_path = Path().cwd()\n self.data_fn = self.save_path.joinpath(self._data_fn)\n\n for key in list(kwargs.keys()):\n setattr(self, key, kwargs[key])\n\n def __str__(self):\n lines = [\"Occam 1D Data:\"]\n lines.append(f\"\\tMode: {self.mode}\")\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()\n\n @property\n def mode_01(self):\n if self.mode == \"te\":\n return \"RhoZxy\"\n elif self.mode == \"tm\":\n return \"RhoZyx\"\n elif self.mode == \"det\":\n return \"RhoZxy\"\n elif self.mode == \"detz\":\n return \"RealZxy\"\n elif self.mode == \"tez\":\n return \"RealZxy\"\n elif self.mode == \"tmz\":\n return \"RealZyx\"\n\n @property\n def mode_02(self):\n if self.mode == \"te\":\n return \"PhsZxy\"\n elif self.mode == \"tm\":\n return \"PhsZyx\"\n elif self.mode == \"det\":\n return \"PhsZxy\"\n elif self.mode == \"detz\":\n return \"ImagZxy\"\n elif self.mode == \"tez\":\n return \"ImagZxy\"\n elif self.mode == \"tmz\":\n return \"ImagZyx\"\n\n @property\n def mode(self):\n return self._mode\n\n @mode.setter\n def mode(self, mode):\n\n if mode not in self._acceptable_modes:\n raise ValueError(\n f\"Mode {mode} not in accetable modes {self._acceptable_modes}\"\n )\n self._mode = mode\n\n def _get_sub_dataframe(self):\n if self._mode == \"te\":\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": self.mt_dataframe.dataframe.res_xy,\n \"data_1_error\": self.mt_dataframe.dataframe.res_xy_model_error,\n \"data_2\": self.mt_dataframe.dataframe.phase_xy,\n \"data_2_error\": self.mt_dataframe.dataframe.phase_xy_model_error,\n }\n )\n\n elif self._mode == \"tm\":\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": self.mt_dataframe.dataframe.res_yx,\n \"data_1_error\": self.mt_dataframe.dataframe.res_yx_model_error,\n \"data_2\": self.mt_dataframe.dataframe.phase_yx,\n \"data_2_error\": self.mt_dataframe.dataframe.phase_yx_model_error,\n }\n )\n\n elif self._mode == \"det\":\n z_obj = self.mt_dataframe.to_z_object()\n\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": z_obj.det.real,\n \"data_1_error\": z_obj.det_model_error,\n \"data_2\": z_obj.det.imag,\n \"data_2_error\": z_obj.det_model_error,\n }\n )\n\n elif self._mode == \"detz\":\n z_obj = self.mt_dataframe.to_z_object()\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": z_obj.det.real * np.pi * 4e-4,\n \"data_1_error\": z_obj.det_model_error * np.pi * 4e-4,\n \"data_2\": z_obj.det.imag * np.pi * 4e-4,\n \"data_2_error\": z_obj.det_model_error * np.pi * 4e-4,\n }\n )\n\n elif self.mode == \"tez\":\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": self.mt_dataframe.dataframe.zxy.real\n * np.pi\n * 4e-4,\n \"data_1_error\": self.mt_dataframe.dataframe.zxy_model_error\n * np.pi\n * 4e-4,\n \"data_2\": self.mt_dataframe.dataframe.zxy.imag\n * np.pi\n * 4e-4,\n \"data_2_error\": self.mt_dataframe.dataframe.zxy_model_error\n * np.pi\n * 4e-4,\n }\n )\n\n elif self.mode == \"tmz\":\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": self.mt_dataframe.dataframe.zyx.real\n * np.pi\n * 4e-4,\n \"data_1_error\": self.mt_dataframe.dataframe.zyx_model_error\n * np.pi\n * 4e-4,\n \"data_2\": self.mt_dataframe.dataframe.zyx.imag\n * np.pi\n * 4e-4,\n \"data_2_error\": self.mt_dataframe.dataframe.zyx_model_error\n * np.pi\n * 4e-4,\n }\n )\n\n sub_df = sub_df.sort_values(\"frequency\", ascending=False).reindex()\n\n return sub_df\n\n def write_data_file(\n self,\n filename,\n mode=\"det\",\n remove_outofquadrant=False,\n ):\n \"\"\"\n make1Ddatafile will write a data file for Occam1D\n\n Arguments:\n ---------\n **rp_tuple** : np.ndarray (freq, res, res_err, phase, phase_err)\n with res, phase having shape (num_freq, 2, 2).\n\n **edi_file** : string\n full path to edi file to be modeled.\n\n **save_path** : string\n path to save the file, if None set to dirname of\n station if edipath = None. Otherwise set to\n dirname of edipath.\n\n **thetar** : float\n rotation angle to rotate Z. Clockwise positive and N=0\n *default* = 0\n\n **mode** : [ 'te' | 'tm' | 'det']\n mode to model can be (*default*='both'):\n - 'te' for just TE mode (res/phase)\n - 'tm' for just TM mode (res/phase)\n - 'det' for the determinant of Z (converted to\n res/phase)\n add 'z' to any of these options to model\n impedance tensor values instead of res/phase\n\n\n **res_err** : float\n errorbar for resistivity values. Can be set to (\n *default* = 'data'):\n\n - 'data' for errorbars from the data\n - percent number ex. 10 for ten percent\n\n **phase_err** : float\n errorbar for phase values. Can be set to (\n *default* = 'data'):\n\n - 'data' for errorbars from the data\n - percent number ex. 10 for ten percent\n **res_errorfloor**: float\n error floor for resistivity values\n in percent\n **phase_errorfloor**: float\n error floor for phase in degrees\n **remove_outofquadrant**: True/False; option to remove the resistivity and\n phase values for points with phases out\n of the 1st/3rd quadrant (occam requires\n 0 < phase < 90 degrees; phases in the 3rd\n quadrant are shifted to the first by\n adding 180 degrees)\n\n :Example: ::\n\n >>> import mtpy.modeling.occam1d as occam1d\n >>> #--> make a data file\n >>> d1 = occam1d.Data()\n >>> d1.write_data_file(edi_file=r'/home/MT/mt01.edi', res_err=10,\n >>> ... phase_err=2.5, mode='TE',\n >>> ... save_path=r\"/home/occam1d/mt01/TE\")\n \"\"\"\n # be sure that the input mode is not case sensitive\n self.mode = mode.lower()\n\n sub_df = self._get_sub_dataframe()\n\n if remove_outofquadrant:\n self._remove_outofquadrant_phase()\n\n # --> write file\n # make sure the savepath exists, if not create it\n self.data_fn = Path(filename)\n\n # --> write file as a list of lines\n dlines = []\n\n dlines.append(\"Format: EMData_1.1 \\n\")\n dlines.append(f\"!mode: {mode.upper()}\\n\")\n dlines.append(f\"!rotation_angle = {self.rotation_angle:.2f}\\n\")\n\n # needs a transmitter to work so put in a dummy one\n dlines.append(\"# Transmitters: 1\\n\")\n dlines.append(\"0 0 0 0 0 \\n\")\n\n nf = sub_df.frequency.size\n # write frequencies\n dlines.append(f\"# Frequencies: {nf}\\n\")\n for ff in sub_df.frequency:\n dlines.append(f\" {ff:{self._string_fmt}}\\n\")\n\n # needs a receiver to work so put in a dummy one\n dlines.append(\"# Receivers: 1 \\n\")\n dlines.append(\"0 0 0 0 0 0 \\n\")\n\n # write data\n dlines.append(f\"# Data:{self._ss}{2 * nf}\\n\")\n num_data_line = len(dlines)\n\n dlines.append(self._header_line)\n data_count = 0\n\n for row in sub_df.itertuples():\n # write lines\n dlines.append(\n self._ss.join(\n [\n self.mode_01,\n str(row.Index + 1),\n \"0\",\n \"1\",\n f\"{row.data_1:{self._string_fmt}}\",\n f\"{row.data_1_error:{self._string_fmt}}\\n\",\n ]\n )\n )\n data_count += 1\n dlines.append(\n self._ss.join(\n [\n self.mode_02,\n str(row.Index + 1),\n \"0\",\n \"1\",\n f\"{row.data_2:{self._string_fmt}}\",\n f\"{row.data_2_error:{self._string_fmt}}\\n\",\n ]\n )\n )\n data_count += 1\n\n # --> write file\n dlines[num_data_line - 1] = f\"# Data:{self._ss}{data_count}\\n\"\n\n with open(self.data_fn, \"w\") as dfid:\n dfid.writelines(dlines)\n\n self.logger.info(f\"Wrote Data File to : {self.data_fn}\")\n\n def _remove_outofquadrant_phase(self, sub_df):\n \"\"\"\n remove out of quadrant phase from data\n \"\"\"\n # remove data points with phase out of quadrant\n if \"z\" in self.mode:\n sub_df.loc[\n (sub_df.data_1 / sub_df.data_2 > 0), [\"data_1\", \"data_2\"]\n ] = 0\n\n elif self.mode in [\"det\", \"te\", \"tm\"]:\n sub_df.loc[(sub_df.data_2 % 180 < 0), \"data_2\"] = 0\n\n return sub_df\n\n def _remove_zeros(self, sub_df):\n \"\"\"\n remove zeros from the data frame\n\n :param sub_df: DESCRIPTION\n :type sub_df: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n sub_df.loc[(sub_df != 0).any(axis=1)]\n return sub_df\n\n def read_data_file(self, data_fn):\n \"\"\"\n reads a 1D data file\n\n Arguments:\n ----------\n **data_fn** : full path to data file\n\n Returns:\n --------\n **Occam1D.rpdict** : dictionary with keys:\n\n *'freq'* : an array of frequencies with length nf\n\n *'resxy'* : TE resistivity array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *'resyx'* : TM resistivity array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *'phasexy'* : TE phase array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *'phaseyx'* : TM phase array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n :Example: ::\n\n >>> old = occam1d.Data()\n >>> old.data_fn = r\"/home/Occam1D/Line1/Inv1_TE/MT01TE.dat\"\n >>> old.read_data_file()\n \"\"\"\n\n self.data_fn = Path(data_fn)\n if not self.data_fn.exists():\n raise IOError(f\"Could not find {self.data_fn}, check path\")\n\n self.save_path = self.data_fn.parent\n\n with open(self.data_fn, \"r\") as fid:\n dlines = fid.readlines()\n\n # make a dictionary of all the fields found so can put them into arrays\n finddict = {}\n for ii, dline in enumerate(dlines):\n if dline.find(\"#\") <= 3:\n fkey = dline[2:].strip().split(\":\")[0]\n fvalue = ii\n finddict[fkey] = fvalue\n\n # get number of frequencies\n nfreq = int(\n dlines[finddict[\"Frequencies\"]][2:].strip().split(\":\")[1].strip()\n )\n\n # frequency list\n freq = np.array(\n [\n float(ff)\n for ff in dlines[\n finddict[\"Frequencies\"] + 1 : finddict[\"Receivers\"]\n ]\n ]\n )\n\n # data dictionary to put things into\n # check to see if there is alread one, if not make a new one\n data = {\n \"frequency\": freq,\n \"zxy\": np.zeros(nfreq, dtype=complex),\n \"zyx\": np.zeros(nfreq, dtype=complex),\n \"res_xy\": np.zeros(nfreq),\n \"res_yx\": np.zeros(nfreq),\n \"phase_xy\": np.zeros(nfreq),\n \"phase_yx\": np.zeros(nfreq),\n \"zxy_model_error\": np.zeros(nfreq),\n \"zyx_model_error\": np.zeros(nfreq),\n \"res_xy_model_error\": np.zeros(nfreq),\n \"res_yx_model_error\": np.zeros(nfreq),\n \"phase_xy_model_error\": np.zeros(nfreq),\n \"phase_yx_model_error\": np.zeros(nfreq),\n }\n\n # get data\n for dline in dlines[finddict[\"Data\"] + 1 :]:\n if dline.find(\"!\") == 0:\n pass\n else:\n dlst = dline.strip().split()\n dlst = [dd.strip() for dd in dlst]\n if len(dlst) > 4:\n jj = int(dlst[1]) - 1\n dvalue = float(dlst[4])\n derr = float(dlst[5])\n if dlst[0] in [\"RhoZxy\", \"103\"]:\n self.mode = \"te\"\n data[\"res_xy\"][jj] = dvalue\n data[\"res_xy_model_error\"][jj] = derr\n elif dlst[0] in [\"PhsZxy\", \"104\"]:\n self.mode = \"te\"\n data[\"phase_xy\"][jj] = dvalue\n data[\"phase_xy_model_error\"][jj] = derr\n elif dlst[0] in [\"RhoZyx\", \"105\"]:\n self.mode = \"tm\"\n data[\"res_yx\"][jj] = dvalue\n data[\"res_yx_model_error\"][jj] = derr\n elif dlst[0] in [\"PhsZyx\", \"106\"]:\n self.mode = \"TM\"\n data[\"phase_yx\"][jj] = dvalue\n data[\"phase_yx_model_error\"][jj] = derr\n elif dlst[0] in [\"RealZxy\", \"113\"]:\n self.mode = \"tez\"\n data[\"zxy\"][jj] += dvalue / (np.pi * 4e-4)\n data[\"zxy_model_error\"][jj] = derr / (np.pi * 4e-4)\n elif dlst[0] in [\"ImagZxy\", \"114\"]:\n self.mode = \"tez\"\n data[\"zxy\"][jj] += 1j * dvalue / (np.pi * 4e-4)\n data[\"zxy_model_error\"][jj] = derr / (np.pi * 4e-4)\n elif dlst[0] in [\"RealZyx\", \"115\"]:\n self.mode = \"tmz\"\n data[\"zyx\"][jj] += dvalue / (np.pi * 4e-4)\n data[\"zyx_model_error\"][jj] = derr / (np.pi * 4e-4)\n elif dlst[0] in [\"ImagZyx\", \"116\"]:\n self.mode = \"tmz\"\n data[\"zyx\"][jj] += 1j * dvalue / (np.pi * 4e-4)\n data[\"zyx_model_error\"][jj] = derr / (np.pi * 4e-4)\n\n df = pd.DataFrame(data)\n self.mt_dataframe = MTDataFrame(data=df)\n\n def read_resp_file(self, resp_fn=None, data_fn=None):\n \"\"\"\n read response file\n\n Arguments:\n ---------\n **resp_fn** : full path to response file\n\n **data_fn** : full path to data file\n\n Fills:\n --------\n\n *freq* : an array of frequencies with length nf\n\n *res_te* : TE resistivity array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *res_tm* : TM resistivity array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *phase_te* : TE phase array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *phase_tm* : TM phase array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n :Example: ::\n >>> o1d = occam1d.Data()\n >>> o1d.data_fn = r\"/home/occam1d/mt01/TE/Occam1D_DataFile_TE.dat\"\n >>> o1d.read_resp_file(r\"/home/occam1d/mt01/TE/TE_7.resp\")\n\n \"\"\"\n\n if resp_fn is not None:\n self.resp_fn = resp_fn\n if self.resp_fn is None:\n raise IOError(\"Need to input response file\")\n\n if data_fn is not None:\n self.data_fn = data_fn\n if self.data_fn is None:\n raise IOError(\"Need to input data file\")\n # --> read in data file\n self.read_data_file()\n\n # --> read response file\n dfid = open(self.resp_fn, \"r\")\n\n dlines = dfid.readlines()\n dfid.close()\n\n finddict = {}\n for ii, dline in enumerate(dlines):\n if dline.find(\"#\") <= 3:\n fkey = dline[2:].strip().split(\":\")[0]\n fvalue = ii\n finddict[fkey] = fvalue\n\n for dline in dlines[finddict[\"Data\"] + 1 :]:\n if dline.find(\"!\") == 0:\n pass\n else:\n dlst = dline.strip().split()\n if len(dlst) > 4:\n jj = int(dlst[1]) - 1\n dvalue = float(dlst[4])\n derr = float(dlst[5])\n rvalue = float(dlst[6])\n try:\n rerr = float(dlst[7])\n except ValueError:\n rerr = 1000.0\n if dlst[0] == \"RhoZxy\" or dlst[0] == \"103\":\n self.res_te[0, jj] = dvalue\n self.res_te[jj] = derr\n self.res_te[2, jj] = rvalue\n self.res_te[3, jj] = rerr\n if dlst[0] == \"PhsZxy\" or dlst[0] == \"104\":\n self.phase_te[0, jj] = dvalue\n self.phase_te[jj] = derr\n self.phase_te[2, jj] = rvalue\n self.phase_te[3, jj] = rerr\n if dlst[0] == \"RhoZyx\" or dlst[0] == \"105\":\n self.res_tm[0, jj] = dvalue\n self.res_tm[jj] = derr\n self.res_tm[2, jj] = rvalue\n self.res_tm[3, jj] = rerr\n if dlst[0] == \"PhsZyx\" or dlst[0] == \"106\":\n self.phase_tm[0, jj] = dvalue\n self.phase_tm[jj] = derr\n self.phase_tm[2, jj] = rvalue\n self.phase_tm[3, jj] = rerr\n if dlst[0] == \"RealZxy\" or dlst[0] == \"113\":\n self.mode = \"TEz\"\n self.data[\"zxy\"][0, jj] = dvalue / (np.pi * 4e-4)\n self.data[\"zxy\"][jj] = derr / (np.pi * 4e-4)\n self.data[\"zxy\"][2, jj] = rvalue / (np.pi * 4e-4)\n self.data[\"zxy\"][3, jj] = rerr\n if dlst[0] == \"ImagZxy\" or dlst[0] == \"114\":\n self.mode = \"TEz\"\n self.data[\"zxy\"][0, jj] += 1j * dvalue / (np.pi * 4e-4)\n self.data[\"zxy\"][jj] = derr / (np.pi * 4e-4)\n self.data[\"zxy\"][2, jj] += 1j * rvalue / (np.pi * 4e-4)\n self.data[\"zxy\"][3, jj] = rerr\n if dlst[0] == \"RealZyx\" or dlst[0] == \"115\":\n self.mode = \"TMz\"\n self.data[\"zyx\"][0, jj] = dvalue / (np.pi * 4e-4)\n self.data[\"zyx\"][jj] = derr / (np.pi * 4e-4)\n self.data[\"zyx\"][2, jj] = rvalue / (np.pi * 4e-4)\n self.data[\"zyx\"][3, jj] = rerr\n if dlst[0] == \"ImagZyx\" or dlst[0] == \"116\":\n self.mode = \"TMz\"\n self.data[\"zyx\"][0, jj] += 1j * dvalue / (np.pi * 4e-4)\n self.data[\"zyx\"][jj] = derr / (np.pi * 4e-4)\n self.data[\"zyx\"][2, jj] += 1j * rvalue / (np.pi * 4e-4)\n self.data[\"zyx\"][3, jj] = rerr\n if \"z\" in self.mode:\n if \"TE\" in self.mode:\n pol = \"xy\"\n elif \"TM\" in self.mode:\n pol = \"yx\"\n for ii in [0, 2]:\n self.data[\"res\" + pol][0 + ii] = (\n 0.2\n * np.abs(self.data[\"z\" + pol][0 + ii]) ** 2.0\n / self.freq\n )\n self.data[\"phase\" + pol][0 + ii] = np.rad2deg(\n np.arctan(\n self.data[\"z\" + pol][0 + ii].imag\n / self.data[\"z\" + pol][0 + ii].real\n )\n )\n\n self.data[\"res\" + pol][1 + ii] = (\n self.data[\"res\" + pol][0 + ii]\n * self.data[\"z\" + pol][1 + ii].real\n / np.abs(self.data[\"z\" + pol][0 + ii])\n )\n\n for jjj in range(len(self.freq)):\n self.data[\"phase\" + pol][\n 1 + ii, jjj\n ] = mtcc.z_error2r_phi_error(\n self.data[\"z\" + pol][0 + ii, jjj].real,\n self.data[\"z\" + pol][0 + ii, jjj].imag,\n self.data[\"z\" + pol][1 + ii, jjj].real,\n )[\n 1\n ]\n if pol == \"xy\":\n self.res_te = self.data[\"resxy\"]\n self.phase_te = self.data[\"phasexy\"]\n elif pol == \"yx\":\n self.res_tm = self.data[\"resyx\"]\n self.phase_tm = self.data[\"phaseyx\"]" }, { "identifier": "Occam1DModel", "path": "mtpy/modeling/occam1d/model.py", "snippet": "class Occam1DModel(object):\n \"\"\"\n read and write the model file fo Occam1D\n\n All depth measurements are in meters.\n\n ======================== ==================================================\n Attributes Description\n ======================== ==================================================\n _model_fn basename for model file *default* is Model1D\n _ss string spacing in model file *default* is 3*' '\n _string_fmt format of model layers *default* is '.0f'\n air_layer_height height of air layer *default* is 10000\n bottom_layer bottom of the model *default* is 50000\n itdict dictionary of values from iteration file\n iter_fn full path to iteration file\n model_depth array of model depths\n model_fn full path to model file\n model_penalty array of penalties for each model layer\n model_preference_penalty array of model preference penalties for each layer\n model_prefernce array of preferences for each layer\n model_res array of resistivities for each layer\n n_layers number of layers in the model\n num_params number of parameters to invert for (n_layers+2)\n pad_z padding of model at depth *default* is 5 blocks\n save_path path to save files\n target_depth depth of target to investigate\n z1_layer depth of first layer *default* is 10\n ======================== ==================================================\n\n ======================== ==================================================\n Methods Description\n ======================== ==================================================\n write_model_file write an Occam1D model file, where depth increases\n on a logarithmic scale\n read_model_file read an Occam1D model file\n read_iter_file read an .iter file output by Occam1D\n ======================== ==================================================\n\n :Example: ::\n\n >>> #--> make a model file\n >>> m1 = occam1d.Model()\n >>> m1.write_model_file(save_path=r\"/home/occam1d/mt01/TE\")\n \"\"\"\n\n def __init__(self, model_fn=None, **kwargs):\n self.model_fn = model_fn\n self.iter_fn = None\n\n self.n_layers = kwargs.pop(\"n_layers\", 100)\n self.bottom_layer = kwargs.pop(\"bottom_layer\", None)\n self.target_depth = kwargs.pop(\"target_depth\", None)\n self.pad_z = kwargs.pop(\"pad_z\", 5)\n self.z1_layer = kwargs.pop(\"z1_layer\", 10)\n self.air_layer_height = kwargs.pop(\"zir_layer_height\", 10000)\n self._set_layerdepth_defaults()\n\n self.save_path = kwargs.pop(\"save_path\", None)\n if self.model_fn is not None and self.save_path is None:\n self.save_path = os.path.dirname(self.model_fn)\n\n self._ss = \" \" * 3\n self._string_fmt = \".0f\"\n self._model_fn = \"Model1D\"\n self.model_res = None\n self.model_depth = None\n self.model_penalty = None\n self.model_prefernce = None\n self.model_preference_penalty = None\n self.num_params = None\n\n def _set_layerdepth_defaults(\n self, z1_threshold=3.0, bottomlayer_threshold=2.0\n ):\n \"\"\"\n set target depth, bottom layer and z1 layer, making sure all the layers\n are consistent with each other and will work in the inversion\n (e.g. check target depth is not deeper than bottom layer)\n \"\"\"\n\n if self.target_depth is None:\n if self.bottom_layer is None:\n # if neither target_depth nor bottom_layer are set, set defaults\n self.target_depth = 10000.0\n else:\n self.target_depth = mtcc.roundsf(self.bottom_layer / 5.0, 1.0)\n\n if self.bottom_layer is None:\n self.bottom_layer = 5.0 * self.target_depth\n # if bottom layer less than a factor of 2 greater than target depth then adjust deeper\n elif (\n float(self.bottom_layer) / self.target_depth < bottomlayer_threshold\n ):\n self.bottom_layer = bottomlayer_threshold * self.target_depth\n print(\n \"bottom layer not deep enough for target depth, set to {} m\".format(\n self.bottom_layer\n )\n )\n\n if self.z1_layer is None:\n self.z1_layer = mtcc.roundsf(self.target_depth / 1000.0, 0)\n elif self.target_depth / self.z1_layer < z1_threshold:\n self.z1_layer = self.target_depth / z1_threshold\n print(\n f\"z1 layer not deep enough for target depth, set to {self.z1_layer} m\"\n )\n\n def write_model_file(self, save_path=None, **kwargs):\n \"\"\"\n Makes a 1D model file for Occam1D.\n\n Arguments:\n ----------\n\n **save_path** :path to save file to, if just path saved as\n savepath\\model.mod, if None defaults to dirpath\n\n **n_layers** : number of layers\n\n **bottom_layer** : depth of bottom layer in meters\n\n **target_depth** : depth to target under investigation\n\n **pad_z** : padding on bottom of model past target_depth\n\n **z1_layer** : depth of first layer in meters\n\n **air_layer_height** : height of air layers in meters\n\n Returns:\n --------\n\n **Occam1D.modelfn** = full path to model file\n\n ..Note: This needs to be redone.\n\n :Example: ::\n\n >>> old = occam.Occam1D()\n >>> old.make1DModelFile(savepath=r\"/home/Occam1D/Line1/Inv1_TE\",\n >>> nlayers=50,bottomlayer=10000,z1layer=50)\n >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D\n \"\"\"\n if save_path is not None:\n self.save_path = save_path\n if os.path.isdir == False:\n os.mkdir(self.save_path)\n\n self.model_fn = os.path.join(self.save_path, self._model_fn)\n\n for key in list(kwargs.keys()):\n setattr(self, key, kwargs[key])\n\n if self.model_depth is None:\n # ---------create depth layers--------------------\n log_z = np.logspace(\n np.log10(self.z1_layer),\n np.log10(\n self.target_depth\n - np.logspace(\n np.log10(self.z1_layer),\n np.log10(self.target_depth),\n num=self.n_layers,\n )[-2]\n ),\n num=self.n_layers - self.pad_z,\n )\n ztarget = np.array(\n [zz - zz % 10 ** np.floor(np.log10(zz)) for zz in log_z]\n )\n log_zpad = np.logspace(\n np.log10(self.target_depth),\n np.log10(\n self.bottom_layer\n - np.logspace(\n np.log10(self.target_depth),\n np.log10(self.bottom_layer),\n num=self.pad_z,\n )[-2]\n ),\n num=self.pad_z,\n )\n zpadding = np.array(\n [zz - zz % 10 ** np.floor(np.log10(zz)) for zz in log_zpad]\n )\n z_nodes = np.append(ztarget, zpadding)\n self.model_depth = np.array(\n [z_nodes[: ii + 1].sum() for ii in range(z_nodes.shape[0])]\n )\n else:\n self.n_layers = len(self.model_depth)\n\n self.num_params = self.n_layers + 2\n # make the model file\n modfid = open(self.model_fn, \"w\")\n modfid.write(\"Format: Resistivity1DMod_1.0\" + \"\\n\")\n modfid.write(f\"#LAYERS: {self.num_params}\\n\")\n modfid.write(\"!Set free values to -1 or ? \\n\")\n modfid.write(\n \"!penalize between 1 and 0,\"\n + \"0 allowing jump between layers and 1 smooth. \\n\"\n )\n modfid.write(\n \"!preference is the assumed resistivity on linear scale. \\n\"\n )\n modfid.write(\n \"!pref_penalty needs to be put if preference is not 0 [0,1]. \\n\"\n )\n modfid.write(\n \"! {0}\\n\".format(\n self._ss.join(\n [\n \"top_depth\",\n \"resistivity\",\n \"penalty\",\n \"preference\",\n \"pref_penalty\",\n ]\n )\n )\n )\n modfid.write(\n self._ss.join(\n [\n str(-self.air_layer_height),\n \"1d12\",\n \"0\",\n \"0\",\n \"0\",\n \"!air layer\",\n \"\\n\",\n ]\n )\n )\n modfid.write(\n self._ss.join(\n [\"0\", \"-1\", \"0\", \"0\", \"0\", \"!first ground layer\", \"\\n\"]\n )\n )\n for ll in self.model_depth:\n modfid.write(\n self._ss.join(\n [\n f\"{np.ceil(ll):{{1}}}\",\n \"-1\",\n \"1\",\n \"0\",\n \"0\",\n \"\\n\",\n ]\n )\n )\n\n modfid.close()\n\n print(f\"Wrote Model file: {self.model_fn}\")\n\n def read_model_file(self, model_fn=None):\n \"\"\"\n\n will read in model 1D file\n\n Arguments:\n ----------\n **modelfn** : full path to model file\n\n Fills attributes:\n --------\n\n * model_depth' : depth of model in meters\n\n * model_res : value of resisitivity\n\n * model_penalty : penalty\n\n * model_preference : preference\n\n * model_penalty_preference : preference penalty\n\n :Example: ::\n\n >>> m1 = occam1d.Model()\n >>> m1.savepath = r\"/home/Occam1D/Line1/Inv1_TE\"\n >>> m1.read_model_file()\n \"\"\"\n if model_fn is not None:\n self.model_fn = model_fn\n if self.model_fn is None:\n raise IOError(\"Need to input a model file\")\n elif os.path.isfile(self.model_fn) == False:\n raise IOError(f\"Could not find{self.model_fn}, check path\")\n\n self._model_fn = os.path.basename(self.model_fn)\n self.save_path = os.path.dirname(self.model_fn)\n mfid = open(self.model_fn, \"r\")\n mlines = mfid.readlines()\n mfid.close()\n mdict = {}\n mdict[\"nparam\"] = 0\n for key in [\"depth\", \"res\", \"pen\", \"pref\", \"prefpen\"]:\n mdict[key] = []\n\n for mm, mline in enumerate(mlines):\n if mline.find(\"!\") == 0:\n pass\n elif mline.find(\":\") >= 0:\n mlst = mline.strip().split(\":\")\n mdict[mlst[0]] = mlst[1]\n else:\n mlst = mline.strip().split()\n mdict[\"depth\"].append(float(mlst[0]))\n if mlst[1] == \"?\":\n mdict[\"res\"].append(-1)\n elif mlst[1] == \"1d12\":\n mdict[\"res\"].append(1.0e12)\n else:\n try:\n mdict[\"res\"].append(float(mlst[1]))\n except ValueError:\n mdict[\"res\"].append(-1)\n mdict[\"pen\"].append(float(mlst[2]))\n mdict[\"pref\"].append(float(mlst[3]))\n mdict[\"prefpen\"].append(float(mlst[4]))\n if mlst[1] == \"-1\" or mlst[1] == \"?\":\n mdict[\"nparam\"] += 1\n\n # make everything an array\n for key in [\"depth\", \"res\", \"pen\", \"pref\", \"prefpen\"]:\n mdict[key] = np.array(mdict[key])\n\n # create an array with empty columns to put the TE and TM models into\n mres = np.zeros((len(mdict[\"res\"]), 2))\n mres[:, 0] = mdict[\"res\"]\n mdict[\"res\"] = mres\n\n # make attributes\n self.model_res = mdict[\"res\"]\n self.model_depth = mdict[\"depth\"]\n self.model_penalty = mdict[\"pen\"]\n self.model_prefernce = mdict[\"pref\"]\n self.model_preference_penalty = mdict[\"prefpen\"]\n self.num_params = mdict[\"nparam\"]\n\n def read_iter_file(self, iter_fn=None, model_fn=None):\n \"\"\"\n read an 1D iteration file\n\n Arguments:\n ----------\n **imode** : mode to read from\n\n Returns:\n --------\n **Occam1D.itdict** : dictionary with keys of the header:\n\n **model_res** : fills this array with the appropriate\n values (0) for data, (1) for model\n\n :Example: ::\n\n >>> m1 = occam1d.Model()\n >>> m1.model_fn = r\"/home/occam1d/mt01/TE/Model1D\"\n >>> m1.read_iter_file(r\"/home/Occam1D/Inv1_TE/M01TE_15.iter\")\n\n \"\"\"\n\n if iter_fn is not None:\n self.iter_fn = iter_fn\n\n if self.iter_fn is None:\n raise IOError(\"Need to input iteration file\")\n\n if model_fn is not None:\n self.model_fn = model_fn\n if self.model_fn is None:\n raise IOError(\"Need to input a model file\")\n else:\n self.read_model_file()\n\n freeparams = np.where(self.model_res == -1)[0]\n\n with open(self.iter_fn, \"r\") as ifid:\n ilines = ifid.readlines()\n\n self.itdict = {}\n model = []\n for ii, iline in enumerate(ilines):\n if iline.find(\":\") >= 0:\n ikey = iline[0:20].strip()\n ivalue = iline[20:].split(\"!\")[0].strip()\n self.itdict[ikey[:-1]] = ivalue\n else:\n try:\n ilst = iline.strip().split()\n for kk in ilst:\n model.append(float(kk))\n except ValueError:\n pass\n\n # put the model values into the model dictionary into the res array\n # for easy manipulation and access.\n model = np.array(model)\n self.model_res[freeparams, 1] = model" } ]
from pathlib import Path from mtpy.modeling.occam1d import Occam1DData, Occam1DModel import time import numpy as np
12,060
self.data_fn = data_fn self.model_fn = model_fn if self.data_fn is not None: self.save_path = self.data_fn.parent elif self.model_fn is not None: self.save_path = self.model_fn.parent self.startup_fn = None self.rough_type = 1 self.max_iter = 20 self.target_rms = 1 self.start_rho = 100 self.description = "1D_Occam_Inv" self.start_lagrange = 5.0 self.start_rough = 1.0e7 self.debug_level = 1 self.start_iter = 0 self.start_misfit = 100 self.min_max_bounds = None self.model_step = None self._startup_fn = "OccamStartup1D" self._ss = " " * 3 for key, value in kwargs.items(): setattr(self, key, value) @property def data_fn(self): return self._data_fn @data_fn.setter def data_fn(self, fn): if fn is not None: self._data_fn = Path(fn) else: self._data_fn = None @property def model_fn(self): return self._model_fn @model_fn.setter def model_fn(self, fn): if fn is not None: self._model_fn = Path(fn) else: self._model_fn = None def write_startup_file(self, save_path=None, **kwargs): """ Make a 1D input file for Occam 1D Arguments: --------- **savepath** : full path to save input file to, if just path then saved as savepath/input **model_fn** : full path to model file, if None then assumed to be in savepath/model.mod **data_fn** : full path to data file, if None then assumed to be in savepath/TE.dat or TM.dat **rough_type** : roughness type. *default* = 0 **max_iter** : maximum number of iterations. *default* = 20 **target_rms** : target rms value. *default* = 1.0 **start_rho** : starting resistivity value on linear scale. *default* = 100 **description** : description of the inversion. **start_lagrange** : starting Lagrange multiplier for smoothness. *default* = 5 **start_rough** : starting roughness value. *default* = 1E7 **debuglevel** : something to do with how Fortran debuggs the code Almost always leave at *default* = 1 **start_iter** : the starting iteration number, handy if the starting model is from a previous run. *default* = 0 **start_misfit** : starting misfit value. *default* = 100 Returns: -------- **Occam1D.inputfn** : full path to input file. :Example: :: >>> old = occam.Occam1D() >>> old.make1DdataFile('MT01',edipath=r"/home/Line1", >>> savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> mode='TE') >>> Wrote Data File: /home/Occam1D/Line1/Inv1_TE/MT01TE.dat >>> >>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> nlayers=50,bottomlayer=10000,z1layer=50) >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D >>> >>> old.make1DInputFile(rhostart=10,targetrms=1.5,maxiter=15) >>> Wrote Input File: /home/Occam1D/Line1/Inv1_TE/Input1D """ if save_path is not None: self.save_path = save_path if not self.save_path.is_dir(): self.save_path.mkdir() self.startup_fn = self.save_path.joinpath(self._startup_fn) # --> read data file if self.data_fn is None: raise IOError("Need to input data file name.") else:
# -*- coding: utf-8 -*- """ Created on Mon Oct 30 13:32:42 2023 @author: jpeacock """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Occam1DStartup(object): """ read and write input files for Occam1D ====================== ==================================================== Attributes Description ====================== ==================================================== _ss string spacing _startup_fn basename of startup file *default* is OccamStartup1D data_fn full path to data file debug_level debug level *default* is 1 description description of inversion for your self *default* is 1D_Occam_Inv max_iter maximum number of iterations *default* is 20 model_fn full path to model file rough_type roughness type *default* is 1 save_path full path to save files to start_iter first iteration number *default* is 0 start_lagrange starting lagrange number on log scale *default* is 5 start_misfit starting misfit value *default* is 100 start_rho starting resistivity value (halfspace) in log scale *default* is 100 start_rough starting roughness (ignored by Occam1D) *default* is 1E7 startup_fn full path to startup file target_rms target rms *default* is 1.0 ====================== ==================================================== """ def __init__(self, data_fn=None, model_fn=None, **kwargs): self.data_fn = data_fn self.model_fn = model_fn if self.data_fn is not None: self.save_path = self.data_fn.parent elif self.model_fn is not None: self.save_path = self.model_fn.parent self.startup_fn = None self.rough_type = 1 self.max_iter = 20 self.target_rms = 1 self.start_rho = 100 self.description = "1D_Occam_Inv" self.start_lagrange = 5.0 self.start_rough = 1.0e7 self.debug_level = 1 self.start_iter = 0 self.start_misfit = 100 self.min_max_bounds = None self.model_step = None self._startup_fn = "OccamStartup1D" self._ss = " " * 3 for key, value in kwargs.items(): setattr(self, key, value) @property def data_fn(self): return self._data_fn @data_fn.setter def data_fn(self, fn): if fn is not None: self._data_fn = Path(fn) else: self._data_fn = None @property def model_fn(self): return self._model_fn @model_fn.setter def model_fn(self, fn): if fn is not None: self._model_fn = Path(fn) else: self._model_fn = None def write_startup_file(self, save_path=None, **kwargs): """ Make a 1D input file for Occam 1D Arguments: --------- **savepath** : full path to save input file to, if just path then saved as savepath/input **model_fn** : full path to model file, if None then assumed to be in savepath/model.mod **data_fn** : full path to data file, if None then assumed to be in savepath/TE.dat or TM.dat **rough_type** : roughness type. *default* = 0 **max_iter** : maximum number of iterations. *default* = 20 **target_rms** : target rms value. *default* = 1.0 **start_rho** : starting resistivity value on linear scale. *default* = 100 **description** : description of the inversion. **start_lagrange** : starting Lagrange multiplier for smoothness. *default* = 5 **start_rough** : starting roughness value. *default* = 1E7 **debuglevel** : something to do with how Fortran debuggs the code Almost always leave at *default* = 1 **start_iter** : the starting iteration number, handy if the starting model is from a previous run. *default* = 0 **start_misfit** : starting misfit value. *default* = 100 Returns: -------- **Occam1D.inputfn** : full path to input file. :Example: :: >>> old = occam.Occam1D() >>> old.make1DdataFile('MT01',edipath=r"/home/Line1", >>> savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> mode='TE') >>> Wrote Data File: /home/Occam1D/Line1/Inv1_TE/MT01TE.dat >>> >>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> nlayers=50,bottomlayer=10000,z1layer=50) >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D >>> >>> old.make1DInputFile(rhostart=10,targetrms=1.5,maxiter=15) >>> Wrote Input File: /home/Occam1D/Line1/Inv1_TE/Input1D """ if save_path is not None: self.save_path = save_path if not self.save_path.is_dir(): self.save_path.mkdir() self.startup_fn = self.save_path.joinpath(self._startup_fn) # --> read data file if self.data_fn is None: raise IOError("Need to input data file name.") else:
data = Occam1DData()
0
2023-10-11 22:24:50+00:00
16k
Jacoo-ai/HIC-Yolov5
detect.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None, inplace=True, fuse=True):\n from models.yolo import Detect, Model\n\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n ckpt = torch.load(attempt_download(w), map_location=map_location) # load\n if fuse:\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n else:\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse\n\n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:\n m.inplace = inplace # pytorch 1.7.0 compatibility\n if type(m) is Detect:\n if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility\n delattr(m, 'anchor_grid')\n setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n\n if len(model) == 1:\n return model[-1] # return model\n else:\n print(f'Ensemble created with {weights}\\n')\n for k in ['names']:\n setattr(model, k, getattr(model[-1], k))\n model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride\n return model # return ensemble" }, { "identifier": "LoadImages", "path": "utils/datasets.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True):\n p = str(Path(path).resolve()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n if not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, 'Image Not Found ' + path\n print(f'image {self.count}/{self.nf} {path}: ', end='')\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadStreams", "path": "utils/datasets.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources, 'r') as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.auto = auto\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n print(f'{i + 1}/{n}: {s}... ', end='')\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n print(f\" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n print('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n print('WARNING: Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] *= 0\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "apply_classifier", "path": "utils/general.py", "snippet": "def apply_classifier(x, model, img, im0):\n # Apply a second stage classifier to yolo outputs\n im0 = [im0] if isinstance(im0, np.ndarray) else im0\n for i, d in enumerate(x): # per image\n if d is not None and len(d):\n d = d.clone()\n\n # Reshape and pad cutouts\n b = xyxy2xywh(d[:, :4]) # boxes\n b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square\n b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad\n d[:, :4] = xywh2xyxy(b).long()\n\n # Rescale boxes from img_size to im0 size\n scale_coords(img.shape[2:], d[:, :4], im0[i].shape)\n\n # Classes\n pred_cls1 = d[:, 5].long()\n ims = []\n for j, a in enumerate(d): # per item\n cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]\n im = cv2.resize(cutout, (224, 224)) # BGR\n # cv2.imwrite('example%i.jpg' % j, cutout)\n\n im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32\n im /= 255.0 # 0 - 255 to 0.0 - 1.0\n ims.append(im)\n\n pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction\n x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections\n\n return x" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_imshow", "path": "utils/general.py", "snippet": "def check_imshow():\n # Check if environment supports image displays\n try:\n assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n cv2.imshow('test', np.zeros((1, 1, 3)))\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n return True\n except Exception as e:\n print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n return False" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))" }, { "identifier": "check_suffix", "path": "utils/general.py", "snippet": "def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):\n # Check file(s) for acceptable suffixes\n if file and suffix:\n if isinstance(suffix, str):\n suffix = [suffix]\n for f in file if isinstance(file, (list, tuple)) else [file]:\n assert Path(f).suffix.lower() in suffix, f\"{msg}{f} acceptable suffix is {suffix}\"" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n suffix = path.suffix\n path = path.with_suffix('')\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n path = Path(f\"{path}{sep}{n}{suffix}\") # update path\n dir = path if path.suffix == '' else path.parent # directory\n if not dir.exists() and mkdir:\n dir.mkdir(parents=True, exist_ok=True) # make directory\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(name, opt):\n # Print argparser arguments\n print(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))" }, { "identifier": "save_one_box", "path": "utils/general.py", "snippet": "def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):\n # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop\n xyxy = torch.tensor(xyxy).view(-1, 4)\n b = xyxy2xywh(xyxy) # boxes\n if square:\n b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square\n b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad\n xyxy = xywh2xyxy(b).long()\n clip_coords(xyxy, im.shape)\n crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]\n if save:\n cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop)\n return crop" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "set_logging", "path": "utils/general.py", "snippet": "def set_logging(rank=-1, verbose=True):\n logging.basicConfig(\n format=\"%(message)s\",\n level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)" }, { "identifier": "strip_optimizer", "path": "utils/general.py", "snippet": "def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\n # Strip optimizer from 'f' to finalize training, optionally save as 's'\n x = torch.load(f, map_location=torch.device('cpu'))\n if x.get('ema'):\n x['model'] = x['ema'] # replace model with ema\n for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys\n x[k] = None\n x['epoch'] = -1\n x['model'].half() # to FP16\n for p in x['model'].parameters():\n p.requires_grad = False\n torch.save(x, s or f)\n mb = os.path.getsize(s or f) / 1E6 # filesize\n print(f\"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB\")" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "CONFIG_DIR = user_config_dir() # Ultralytics settings dir\nRANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_font(font='Arial.ttf', size=10):\n def __init__(self, im, line_width=None, font_size=None, font='', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255)):\n def result(self):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output):\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path('')):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):" }, { "identifier": "load_classifier", "path": "utils/torch_utils.py", "snippet": "def load_classifier(name='resnet101', n=2):\n # Loads a pretrained model reshaped to n-class output\n model = torchvision.models.__dict__[name](pretrained=True)\n\n # ResNet model properties\n # input_size = [3, 224, 224]\n # input_space = 'RGB'\n # input_range = [0, 1]\n # mean = [0.485, 0.456, 0.406]\n # std = [0.229, 0.224, 0.225]\n\n # Reshape output to n classes\n filters = model.fc.weight.shape[1]\n model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)\n model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)\n model.fc.out_features = n\n return model" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" } ]
import argparse import os import sys import cv2 import numpy as np import torch import torch.backends.cudnn as cudnn import onnxruntime import tensorflow as tf from pathlib import Path from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync
11,855
pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='/root/autodl-tmp/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/root/autodl-tmp/datasets/VisDrone2019/VisDrone2019-DET-train/images/0000150_01230_d_0000073.jpg', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5m.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=640, # inference size (pixels) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize set_logging() device = select_device(device) half &= device.type != 'cpu' # half precision only supported on CUDA # Load model w = str(weights[0] if isinstance(weights, list) else weights) classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] check_suffix(w, suffixes) # check weights have acceptable suffix pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names if half: model.half() # to FP16 if classify: # second-stage classifier modelc = load_classifier(name='resnet50', n=2) # initialize modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() elif onnx: if dnn: # check_requirements(('opencv-python>=4.5.4',)) net = cv2.dnn.readNetFromONNX(w) else: check_requirements(('onnx', 'onnxruntime')) session = onnxruntime.InferenceSession(w, None) else: # TensorFlow models check_requirements(('tensorflow>=2.4.1',)) if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), tf.nest.map_structure(x.graph.as_graph_element, outputs)) graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif saved_model: model = tf.keras.models.load_model(w) elif tflite: interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once dt, seen = [0.0, 0.0, 0.0], 0 for path, img, im0s, vid_cap in dataset: t1 = time_sync() if onnx: img = img.astype('float32') else: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='/root/autodl-tmp/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/root/autodl-tmp/datasets/VisDrone2019/VisDrone2019-DET-train/images/0000150_01230_d_0000073.jpg', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(FILE.stem, opt)
11
2023-10-12 08:52:01+00:00
16k
OmicsML/scDiff
scdiff/model.py
[ { "identifier": "Decoder", "path": "scdiff/modules/diffusion_model/decoder.py", "snippet": "class Decoder(nn.Module):\n def __init__(self, dim, out_dim, dropout=0., norm_type=\"layernorm\", num_layers=1, cond_num_dict=None,\n cond_emb_dim=None, cond_mask_ratio=0., act=\"gelu\", out_act=None):\n super().__init__()\n if isinstance(act, str) or act is None:\n act = create_activation(act)\n if isinstance(out_act, str) or out_act is None:\n out_act = create_activation(out_act)\n\n self.cond_num_dict = cond_num_dict\n if self.cond_num_dict is not None:\n cond_emb_dim = cond_emb_dim if cond_emb_dim is not None else dim\n self.cond_embed = EmbeddingDict(cond_num_dict, cond_emb_dim, 1, 1, None, mask_ratio=cond_mask_ratio)\n else:\n self.cond_embed = None\n\n self.layers = nn.ModuleList() # FIX: use MLP layer\n for _ in range(num_layers - 1):\n self.layers.append(nn.Sequential(\n nn.Linear(dim, dim),\n act,\n create_norm(norm_type, dim),\n nn.Dropout(dropout),\n ))\n self.layers.append(nn.Sequential(nn.Linear(dim, out_dim), out_act))\n\n def forward(self, x, conditions=None):\n if self.cond_embed is not None:\n cond_emb = self.cond_embed(conditions)[0]\n x = x + cond_emb.squeeze(1)\n\n for layer in self.layers:\n x = layer(x)\n\n return x" }, { "identifier": "Embedder", "path": "scdiff/modules/diffusion_model/embedder.py", "snippet": "class Embedder(nn.Module):\n def __init__(self, pretrained_gene_list, num_hidden, norm, activation='gelu', dropout=0.,\n gene_emb=None, fix_embedding=False):\n super().__init__()\n\n self.pretrained_gene_list = pretrained_gene_list\n self.gene_index = {j: i for i, j in enumerate(pretrained_gene_list)}\n\n if gene_emb is not None:\n self.emb = nn.Parameter(gene_emb, requires_grad=not fix_embedding)\n else:\n num_genes = len(pretrained_gene_list)\n self.emb = nn.Parameter(torch.randn([num_genes, num_hidden], dtype=torch.float32) * 0.005)\n\n if fix_embedding:\n self.emb.requires_grad = False\n\n self.post_layer = nn.Sequential(\n create_activation(activation),\n create_norm(norm, num_hidden),\n nn.Dropout(dropout),\n )\n\n def forward(self, x, pe_input=None, input_gene_list=None, input_gene_idx=None):\n assert pe_input is None # FIX: deprecate pe_input\n\n if input_gene_idx is not None:\n gene_idx = input_gene_idx\n elif input_gene_list is not None:\n gene_idx = torch.tensor([self.gene_index[o] for o in input_gene_list if o in self.gene_index]).long()\n else:\n if x.shape[1] != len(self.pretrained_gene_list):\n raise ValueError(\n 'The input gene size is not the same as the pretrained gene list. '\n 'Please provide the input gene list.',\n )\n gene_idx = torch.arange(x.shape[1]).long()\n gene_idx = gene_idx.to(x.device)\n\n feat = F.embedding(gene_idx, self.emb)\n out = torch.sparse.mm(x, feat)\n out = self.post_layer(out)\n\n return out, gene_idx" }, { "identifier": "Encoder", "path": "scdiff/modules/diffusion_model/encoder.py", "snippet": "class Encoder(nn.Module):\n\n def __init__(\n self,\n depth,\n dim,\n num_heads,\n dim_head,\n *,\n dropout=0.,\n cond_type='crossattn',\n cond_cat_input=False,\n ):\n super().__init__()\n\n self.cond_cat_input = cond_cat_input\n\n if cond_type == 'crossattn':\n self.blocks = nn.ModuleList([\n BasicTransformerBlock(dim, num_heads, dim_head, self_attn=False, cross_attn=True, context_dim=dim,\n qkv_bias=True, dropout=dropout, final_act=None)\n for _ in range(depth)])\n elif cond_type == 'mlp':\n self.blocks = nn.ModuleList([\n ConditionEncoderWrapper(nn.Sequential(\n nn.Linear(dim, dim),\n \"gelu\",\n create_norm(\"layernorm\", dim),\n nn.Dropout(dropout),\n )) for _ in range(depth)])\n elif cond_type == 'stackffn':\n self.blocks = nn.ModuleList([\n ConditionEncoderWrapper(\n FeedForward(dim, mult=4, glu=False, dropout=dropout)\n ) for _ in range(depth)])\n else:\n raise ValueError(f'Unknown conditioning type {cond_type!r}')\n\n def forward(self, x, context_list, cond_emb_list):\n # XXX: combine context_list and cond_emb_list in conditioner?..\n x = x.unsqueeze(1)\n\n stack = zip(self.blocks, reversed(context_list), reversed(cond_emb_list))\n for i, (blk, ctxt, cond_emb) in enumerate(stack):\n full_cond_emb_list = list(filter(lambda x: x is not None, (ctxt, cond_emb)))\n if self.cond_cat_input:\n full_cond_emb_list.append(x)\n full_cond_emb = torch.cat(full_cond_emb_list, dim=1) if full_cond_emb_list else None\n\n x = blk(x, context=full_cond_emb)\n\n return x.squeeze(1)" }, { "identifier": "denoising_eval", "path": "scdiff/evaluate.py", "snippet": "@torch.inference_mode()\ndef denoising_eval(true: TensorArray, pred: TensorArray, mask: TensorArray):\n true = as_tensor(true, assert_type=True)\n pred = as_tensor(pred, assert_type=True)\n mask = as_tensor(mask, assert_type=True).bool()\n\n rmse_normed = masked_rmse(pred, true, mask).item()\n corr_normed = masked_corr(pred, true, mask).item()\n global_corr_normed = PearsonCorr1d(pred[mask], true[mask]).item()\n\n # nonzero_masked = (true > 0) * mask\n # rmse_normed_nonzeros = masked_rmse(pred, true, nonzero_masked).item()\n # corr_normed_nonzeros = masked_corr(pred, true, nonzero_masked).item()\n\n corr_normed_all = PearsonCorr(pred, true).item()\n rmse_normed_all = F.mse_loss(pred, true).sqrt().item()\n\n r = scipy.stats.linregress(pred[mask].cpu().numpy(), true[mask].cpu().numpy())[2]\n # r_all = scipy.stats.linregress(pred.ravel().cpu().numpy(), true.ravel().cpu().numpy())[2]\n\n return {\n 'denoise_rmse_normed': rmse_normed,\n 'denoise_corr_normed': corr_normed,\n 'denoise_global_corr_normed': global_corr_normed,\n 'denoise_global_r2_normed': r ** 2,\n # 'denoise_rmse_normed_nonzeros': rmse_normed_nonzeros,\n # 'denoise_corr_normed_nonzeros': corr_normed_nonzeros,\n 'denoise_rmse_normed_all': rmse_normed_all,\n 'denoise_corr_normed_all': corr_normed_all,\n # 'denoise_global_r2_normed_all': r_all ** 2,\n }" }, { "identifier": "evaluate_annotation", "path": "scdiff/evaluate.py", "snippet": "@torch.inference_mode()\ndef evaluate_annotation(\n true: TensorArray,\n pred: TensorArray,\n name: Optional[str],\n) -> Dict[str, float]:\n true_array = as_array(true, assert_type=True)\n pred_array = as_array(pred, assert_type=True)\n\n le = LabelEncoder()\n le.classes_ = np.array(sorted(set(np.unique(true_array).tolist() + np.unique(pred_array).tolist())))\n\n true = torch.LongTensor(le.transform(true_array))\n pred = torch.LongTensor(le.transform(pred_array))\n\n num_classes = le.classes_.size\n # num_classes = int(max(true.max(), pred.max())) + 1\n # num_unique_classes = max(true.unique().numel(), pred.unique().numel())\n # if (num_classes == num_unique_classes + 1) and (0 not in true):\n # warnings.warn(\n # \"Implicitly removing null label (index 0)\",\n # UserWarning,\n # stacklevel=2,\n # )\n # true, pred, num_classes = true - 1, pred - 1, num_classes - 1\n # elif num_classes != num_unique_classes:\n # warnings.warn(\n # f\"Number of unique classes {num_unique_classes} mismatch the \"\n # f\"number of classes inferred by max index {num_classes}\",\n # UserWarning,\n # stacklevel=2,\n # )\n\n suffix = \"\" if name is None else f\"_{name}\"\n\n out = {}\n out[f\"acc{suffix}\"] = multiclass_accuracy(true, pred, num_classes).item()\n out[f\"f1{suffix}\"] = multiclass_f1_score(true, pred, num_classes).item()\n out[f\"precision{suffix}\"] = multiclass_precision(true, pred, num_classes).item()\n out[f\"recall{suffix}\"] = multiclass_recall(true, pred, num_classes).item()\n\n return out" }, { "identifier": "perturbation_eval", "path": "scdiff/evaluate.py", "snippet": "@torch.inference_mode()\ndef perturbation_eval(\n true,\n pred,\n control,\n true_conds=None,\n gene_names=None,\n path_to_save=None,\n de_gene_idx_dict=None,\n ndde20_idx_dict=None,\n de_gene_idx=None,\n ndde20_idx=None,\n):\n if true_conds is not None: # summarize condition wise evaluation\n assert de_gene_idx_dict is not None, \"GEARS eval require DE gene index dict\"\n assert ndde20_idx_dict is not None, \"GEARS eval require top20 none dropout DE gene index dict\"\n if path_to_save:\n warnings.warn(\n f\"Cant save with multiple conds, got {path_to_save=}. Ignoring save option\",\n UserWarning,\n stacklevel=2,\n )\n unique_true_conds = true_conds.unique(dim=0)\n score_dict_list = []\n for cond in unique_true_conds:\n cond_ind = (true_conds == cond).all(1)\n true_sub, pred_sub = true[cond_ind], pred[cond_ind]\n cond_idx_tuple = tuple(i for i in cond.tolist() if i != -1) # XXX: specificially designed for GEARS\n score_dict_list.append(perturbation_eval(true_sub, pred_sub, control, gene_names=gene_names,\n de_gene_idx=de_gene_idx_dict[cond_idx_tuple],\n ndde20_idx=ndde20_idx_dict[cond_idx_tuple]))\n scores = reduce_score_dict_list(score_dict_list)\n return scores\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n adata_pred = ad.AnnData(pred.detach().cpu().numpy(),\n obs={'condition': [\"pred\"] * len(pred)})\n adata_true = ad.AnnData(true.detach().cpu().numpy(),\n obs={'condition': [\"stim\"] * len(true)})\n adata_ctrl = ad.AnnData(control.detach().cpu().numpy(),\n obs={'condition': [\"ctrl\"] * len(control)})\n adata = ad.concat([adata_true, adata_ctrl])\n if gene_names is not None:\n adata.var.index = gene_names\n adata_pred.var.index = gene_names\n sc.tl.rank_genes_groups(adata, groupby='condition', method=\"wilcoxon\")\n diff_genes = adata.uns[\"rank_genes_groups\"][\"names\"]['stim']\n diff_genes_idx = [np.where(np.array(gene_names) == x)[0].item() for x in diff_genes]\n adata = ad.concat([adata, adata_pred])\n adata.obs_names_make_unique()\n scores = reg_mean_plot(\n adata,\n condition_key='condition',\n axis_keys={\"x\": \"pred\", \"y\": 'stim', \"x1\": \"ctrl\"},\n gene_list=diff_genes[:10] if gene_names is not None else None,\n top_100_genes=diff_genes[:100],\n labels={\"x\": \"predicted\", \"y\": \"ground truth\", \"x1\": \"ctrl\"},\n path_to_save=path_to_save,\n title='scDiff',\n show=False,\n legend=False,\n )\n\n true_mean = true.mean(0)\n pred_mean = pred.mean(0)\n control_mean = control.mean(0)\n true_delta_mean = true_mean - control_mean\n pred_delta_mean = pred_mean - control_mean\n\n scores.update({\n # MAE\n 'mae': (pred_mean - true_mean).abs().mean().item(),\n 'mae_top_100': (pred_mean[diff_genes_idx[:100]] - true_mean[diff_genes_idx[:100]]).abs().mean().item(),\n 'mae_delta': (pred_delta_mean - true_delta_mean).abs().mean().item(),\n # MSE\n 'mse': F.mse_loss(pred_mean, true_mean).item(),\n 'mse_top_100': F.mse_loss(pred_mean[diff_genes_idx[:100]], true_mean[diff_genes_idx[:100]]).item(),\n 'mse_delta': F.mse_loss(pred_delta_mean, true_delta_mean).item(),\n # RMSE\n 'rmse': np.sqrt(F.mse_loss(pred_mean, true_mean).item()),\n 'rmse_top_100': np.sqrt(F.mse_loss(pred_mean[diff_genes_idx[:100]],\n true_mean[diff_genes_idx[:100]]).item()),\n 'rmse_delta': np.sqrt(F.mse_loss(pred_delta_mean, true_delta_mean).item()),\n # Correlation\n 'corr': PearsonCorr1d(pred_mean, true_mean).item(),\n 'corr_top_100': PearsonCorr1d(pred_mean[diff_genes_idx[:100]],\n true_mean[diff_genes_idx[:100]]).item(),\n 'corr_delta': PearsonCorr1d(pred_delta_mean, true_delta_mean).item(),\n # # Cosine similarity\n # 'cos': F.cosine_similarity(pred_mean.unsqueeze(0), true_mean.unsqueeze(0))[0].item(),\n # 'cos_top_100': F.cosine_similarity(pred_mean[diff_genes_idx[:100]].unsqueeze(0),\n # true_mean[diff_genes_idx[:100]].unsqueeze(0))[0].item(),\n # 'cos_delta': F.cosine_similarity(pred_delta_mean.unsqueeze(0),\n # true_delta_mean.unsqueeze(0))[0].item(),\n })\n\n if de_gene_idx is not None:\n for num_de in (20, 50, 100, 200):\n if num_de > len(de_gene_idx):\n warnings.warn(\n f\"Skipping {num_de} DE gene num eval since max num DE available is {len(de_gene_idx)}\",\n UserWarning,\n stacklevel=2,\n )\n continue\n if num_de > true.shape[1]:\n warnings.warn(\n f\"Skipping {num_de} DE gene num eval since max num genes available is {true.shape[1]}\",\n UserWarning,\n stacklevel=2,\n )\n continue\n\n idx = de_gene_idx[:num_de]\n scores.update(de_eval(pred_mean[idx], true_mean[idx], control_mean[idx], f\"de{num_de}\"))\n\n if ndde20_idx is not None:\n scores.update(de_eval(pred_mean[ndde20_idx], true_mean[ndde20_idx], control_mean[ndde20_idx], \"ndde20\"))\n\n return scores" }, { "identifier": "calculate_batch_r_squared", "path": "scdiff/evaluate.py", "snippet": "def calculate_batch_r_squared(pred, true, conditions):\n conditions = dict_of_arrays_to_tensor(conditions)\n unique_cond = conditions.unique(dim=0)\n r_squared_list = []\n for i in range(len(unique_cond)):\n cond_flag = torch.all((conditions == unique_cond[i]), dim=1)\n x = pred[cond_flag].mean(0).numpy()\n y = true[cond_flag].mean(0).numpy()\n _, _, r_value, _, _ = scipy.stats.linregress(x, y)\n r_squared_list.append(r_value ** 2)\n return r_squared_list" }, { "identifier": "LitEma", "path": "scdiff/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "BasicTransformerBlock", "path": "scdiff/modules/layers/attention.py", "snippet": "class BasicTransformerBlock(nn.Module):\n def __init__(\n self,\n dim: int,\n n_heads: int,\n d_head: int = 64,\n self_attn: bool = True,\n cross_attn: bool = False,\n ts_cross_attn: bool = False,\n final_act: Optional[nn.Module] = None,\n dropout: float = 0.,\n context_dim: Optional[int] = None,\n gated_ff: bool = True,\n checkpoint: bool = False,\n qkv_bias: bool = False,\n linear_attn: bool = False,\n ):\n super().__init__()\n assert self_attn or cross_attn, 'At least on attention layer'\n self.self_attn = self_attn\n self.cross_attn = cross_attn\n self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)\n if ts_cross_attn:\n raise NotImplementedError(\"Deprecated, please remove.\") # FIX: remove ts_cross_attn option\n # assert not (self_attn or linear_attn)\n # attn_cls = TokenSpecificCrossAttention\n else:\n assert not linear_attn, \"Performer attention not setup yet.\" # FIX: remove linear_attn option\n attn_cls = CrossAttention\n if self.cross_attn:\n self.attn1 = attn_cls(\n query_dim=dim,\n context_dim=context_dim,\n heads=n_heads,\n dim_head=d_head,\n dropout=dropout,\n qkv_bias=qkv_bias,\n ) # is self-attn if context is none\n if self.self_attn:\n self.attn2 = attn_cls(\n query_dim=dim,\n heads=n_heads,\n dim_head=d_head,\n dropout=dropout,\n qkv_bias=qkv_bias,\n ) # is a self-attention\n self.norm1 = nn.LayerNorm(dim)\n self.norm2 = nn.LayerNorm(dim)\n self.norm3 = nn.LayerNorm(dim)\n self.act = final_act\n self.checkpoint = checkpoint\n assert not self.checkpoint, 'Checkpointing not available yet' # FIX: remove checkpiont option\n\n @BatchedOperation(batch_dim=0, plain_num_dim=2)\n def forward(self, x, context=None, cross_mask=None, self_mask=None, **kwargs):\n if self.cross_attn:\n x = self.attn1(self.norm1(x), context=context, mask=cross_mask, **kwargs) + x\n if self.self_attn:\n x = self.attn2(self.norm2(x), mask=self_mask, **kwargs) + x\n x = self.ff(self.norm3(x)) + x\n if self.act is not None:\n x = self.act(x)\n return x" }, { "identifier": "FeedForward", "path": "scdiff/modules/layers/basic.py", "snippet": "class FeedForward(nn.Module):\n def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):\n super().__init__()\n inner_dim = int(dim * mult)\n dim_out = default(dim_out, dim)\n project_in = nn.Sequential(\n nn.Linear(dim, inner_dim),\n nn.GELU()\n ) if not glu else GEGLU(dim, inner_dim)\n\n self.net = nn.Sequential(\n project_in,\n nn.Dropout(dropout),\n nn.Linear(inner_dim, dim_out)\n )\n\n def forward(self, x):\n return self.net(x)" }, { "identifier": "EmbeddingDict", "path": "scdiff/modules/layers/scmodel.py", "snippet": "class EmbeddingDict(nn.Module):\n TEXT_EMB_DIR = './data/ontology_resources'\n\n def __init__(self, num_embed_dict, embedding_dim, depth, embedding_tokens=1,\n norm_layer=None, freeze=False, mask_ratio=0.0, text_emb=None,\n text_emb_file=None, freeze_text_emb=True, text_proj_type='linear',\n stackfnn_glu_flag=False, text_proj_hidden_dim=512, text_proj_act=None,\n text_proj_num_layers=2, text_proj_norm=None, text_proj_dropout=0.,\n gears_flag=False, gears_mode=\"single\", num_perts=None, gears_hidden_size=64,\n gears_mlp_layers=2, gears_norm=None, num_go_gnn_layers=1):\n super().__init__()\n size = embedding_dim * embedding_tokens\n n = embedding_tokens\n d = embedding_dim\n\n self.keys = sorted(num_embed_dict) # ensure consistent ordering\n self.mask_ratio = mask_ratio\n\n self.emb_dict = nn.ModuleDict()\n for key in self.keys:\n self.emb_dict[key] = nn.ModuleList([\n nn.Sequential(\n nn.Embedding(\n num_embed_dict[key],\n size,\n _freeze=freeze,\n ),\n create_norm(norm_layer, size),\n Rearrange('b (n d) -> b n d', n=n, d=d),\n )\n for _ in range(depth)\n ])\n\n if text_emb is not None or text_emb_file is not None:\n if text_emb is None:\n text_emb = torch.load(f'{self.TEXT_EMB_DIR}/{text_emb_file}')\n if text_proj_type == 'linear':\n text_proj = nn.Linear(text_emb.shape[1], size)\n elif text_proj_type == 'stackffn':\n text_proj = FeedForward(text_emb.shape[1], dim_out=size, mult=4, glu=stackfnn_glu_flag)\n elif text_proj_type == 'mlp':\n text_proj = MLPLayers(text_emb.shape[1], size, text_proj_hidden_dim, text_proj_num_layers,\n text_proj_dropout, text_proj_norm, text_proj_act)\n else:\n raise NotImplementedError(f\"Unsupported text_proj_type {text_proj_type}\")\n\n text_act = create_activation(text_proj_act)\n if text_proj_norm is None and norm_layer is not None:\n text_norm = create_norm(norm_layer, size)\n else:\n text_norm = create_norm(text_proj_norm, size)\n self.keys.append(\"text\")\n self.emb_dict['text'] = nn.ModuleList([\n nn.Sequential(\n nn.Embedding.from_pretrained(text_emb, freeze=freeze_text_emb),\n text_proj,\n text_norm,\n text_act,\n Rearrange('b (n d) -> b n d', n=n, d=d),\n )\n for _ in range(depth)\n ])\n\n if num_perts is not None and gears_flag:\n self.keys.append('pert')\n self.gears_mode = gears_mode\n gears_kwargs = dict(num_perts=num_perts, out_dim=size, mode=gears_mode,\n hidden_size=gears_hidden_size, mlp_layers=gears_mlp_layers)\n if gears_mode == \"single\":\n self.emb_dict['pert'] = nn.ModuleList([\n nn.Sequential(\n GEARS_Conditioner(num_go_gnn_layers=num_go_gnn_layers, **gears_kwargs),\n create_norm(gears_norm, size),\n Rearrange('b (n d) -> b n d', n=n, d=d),\n )\n for _ in range(depth)\n ])\n else:\n self.emb_dict['pert'] = nn.ModuleList([\n GEARS_Conditioner(num_go_gnn_layers=depth, **gears_kwargs),\n nn.ModuleList([create_norm(gears_norm, size) for _ in range(depth)]),\n Rearrange('b (n d) -> b n d', n=n, d=d),\n ])\n\n def __iter__(self):\n yield from self.keys\n\n def __getitem__(self, key):\n return self.emb_dict[key]\n\n def forward(self, input: Dict[str, torch.Tensor], aug_graph=None) -> List[torch.Tensor]:\n # Outer list: condition types; inner list: layer depth\n out = []\n for key in self.keys:\n if self.training:\n # NOTE: NULL condition token added during dataset init, and is\n # set to be the first token (index zero).\n mask = torch.rand_like(input[key].float()) < self.mask_ratio\n masked_input = input[key].long()\n if key != 'text' and key != \"pert\":\n masked_input[mask] = 0\n else:\n masked_input = input[key].long()\n\n if (\n isinstance(self[key][0], GEARS_Conditioner) # single\n or isinstance(self[key][0][0], GEARS_Conditioner) # parallel | sequential\n ):\n emb_list = []\n if self.gears_mode == \"single\":\n for emb in self[key]:\n gears_out = emb[0](masked_input, aug_graph)\n emb_list.append(emb[1:](gears_out))\n else:\n gears_out = self[key][0](masked_input, aug_graph)\n stack = zip(gears_out, self[key][1], repeat(self[key][2]))\n for emb, norm, rearrange in stack:\n emb_list.append(rearrange(norm(emb)))\n else:\n emb_list = [emb(masked_input) for emb in self[key]]\n\n out.append(emb_list)\n\n # Consolidate by concatenating along the token dimention in each layer\n out = [torch.cat(embs, dim=1) for embs in zip(*out)]\n\n return out" }, { "identifier": "MaskedEncoderConditioner", "path": "scdiff/utils/diffusion.py", "snippet": "class MaskedEncoderConditioner(nn.Module):\n \"\"\"Use 2-layer MLP to encoder available feature number.\n\n The encoded feature number condition is added to the cell embddings. If\n disabled, then directly return the original cell embeddings.\n\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n mult: int = 4,\n use_ratio: bool = False,\n use_se: bool = False,\n use_semlp: bool = False,\n concat: bool = False,\n disable: bool = False,\n ):\n super().__init__()\n assert not (use_ratio and use_se), \"Cannot set use_se and use_ratio together\"\n assert not (use_se and use_semlp), \"Cannot set use_se and use_semlp together\"\n assert not (use_se and concat), \"Cannot set use_se and concat together\"\n self.dim = dim\n self.use_ratio = use_ratio\n self.use_se = use_se or use_semlp\n self.concat = concat\n self.disable = disable\n if not disable:\n dim_in = dim if self.use_se else 1\n dim_in = dim_in + dim if concat else dim_in\n dim_hid = dim * mult\n\n self.proj = nn.Sequential(\n nn.Linear(dim_in, dim_hid),\n nn.SiLU(),\n nn.Linear(dim_hid, dim),\n ) if not use_se else nn.Identity()\n\n def forward(self, x: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n if not self.disable and mask is not None:\n # Count the number of denoising input featues\n size = (mask.bool()).sum(1, keepdim=True).float()\n\n if self.use_ratio:\n h = size / x.shape[1]\n elif self.use_se:\n h = sinusoidal_embedding(size.ravel(), dim=self.dim, max_period=x.shape[1] + 1)\n else:\n h = size\n\n if self.concat:\n h = torch.cat((x, h), dim=-1)\n x = self.proj(h)\n else:\n h = self.proj(h)\n x = x + h\n\n return x" }, { "identifier": "timestep_embedding", "path": "scdiff/utils/diffusion.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n embedding = sinusoidal_embedding(timesteps, dim, max_period)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "make_beta_schedule", "path": "scdiff/utils/diffusion.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "as_1d_vec", "path": "scdiff/utils/misc.py", "snippet": "def as_1d_vec(x: torch.Tensor) -> torch.Tensor:\n if len(x.shape) == 1:\n x = x.unsqueeze(-1)\n elif len(x.shape) == 1:\n raise ValueError(f\"input must be one or two dimensional tensor, got {x.shape}\")\n return x" }, { "identifier": "exists", "path": "scdiff/utils/misc.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "count_params", "path": "scdiff/utils/misc.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "scdiff/utils/misc.py", "snippet": "def instantiate_from_config(\n config: Union[Dict, DictConfig, str],\n _target_key: str = \"target\",\n _params_key: str = \"params\",\n _catch_conflict: bool = True,\n **extra_kwargs: Any,\n):\n # Check target specificiation and handel special conditions\n if _target_key not in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(f\"Expected key `{_target_key}` to instantiate.\")\n\n # Obtain target object and kwargs\n cls = get_obj_from_str(config[\"target\"])\n kwargs = config.get(_params_key, dict())\n\n # Check conflict and merge kwargs\n if (common_keys := sorted(set(kwargs) & set(extra_kwargs))):\n diff_keys = []\n for key in common_keys:\n if kwargs[key] != extra_kwargs[key]:\n diff_keys.append(key)\n\n if diff_keys and _catch_conflict:\n conflicting_config_kwargs = {i: kwargs[i] for i in diff_keys}\n conflicting_extra_kwargs = {i: extra_kwargs[i] for i in diff_keys}\n raise ValueError(\n \"Conflicting parameters between configs and those that are \"\n \"additionally specified. Please resolve or set _catch_conflict \"\n f\"to False to bypass this issue.\\n{conflicting_config_kwargs=}\\n\"\n f\"{conflicting_extra_kwargs=}\\n\",\n )\n kwargs = {**kwargs, **extra_kwargs}\n\n # Instantiate object and handel exception during instantiation\n try:\n return cls(**kwargs)\n except Exception as e:\n raise RuntimeError(f\"Failed to instantiate {cls!r} with kwargs:\\n{pformat(kwargs)}\") from e" }, { "identifier": "default", "path": "scdiff/utils/misc.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "create_activation", "path": "scdiff/utils/modules.py", "snippet": "def create_activation(name):\n if name is None:\n return nn.Identity()\n elif name == \"relu\":\n return nn.ReLU()\n elif name == \"gelu\":\n return nn.GELU()\n elif name == \"glu\":\n return nn.GLU()\n elif name == \"sigmoid\":\n return nn.Sigmoid()\n elif name == \"prelu\":\n return nn.PReLU()\n elif name == \"elu\":\n return nn.ELU()\n else:\n raise NotImplementedError(f\"{name} is not implemented.\")" }, { "identifier": "create_norm", "path": "scdiff/utils/modules.py", "snippet": "def create_norm(name, n, h=16):\n if name is None:\n return nn.Identity()\n elif name == \"layernorm\":\n return nn.LayerNorm(n)\n elif name == \"batchnorm\":\n return nn.BatchNorm1d(n)\n elif name == \"groupnorm\":\n return nn.GroupNorm(h, n)\n elif name.startswith(\"groupnorm\"):\n inferred_num_groups = int(name.repalce(\"groupnorm\", \"\"))\n return nn.GroupNorm(inferred_num_groups, n)\n else:\n raise NotImplementedError(f\"{name} is not implemented.\")" }, { "identifier": "extract_into_tensor", "path": "scdiff/utils/modules.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "init_weights", "path": "scdiff/utils/modules.py", "snippet": "def init_weights(m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm1d)):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)" }, { "identifier": "mean_flat", "path": "scdiff/utils/modules.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "noise_like", "path": "scdiff/utils/modules.py", "snippet": "def noise_like(shape, device, repeat=False):\n if repeat:\n noise = torch.randn((1, *shape[1:]), device=device)\n repeat_noise = noise.repeat(shape[0], *((1,) * (len(shape) - 1)))\n return repeat_noise\n else:\n return torch.randn(shape, device=device)" } ]
import warnings import anndata as ad import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import pytorch_lightning as pl from contextlib import contextmanager from functools import partial from einops.layers.torch import Rearrange from scipy.sparse import csr_matrix from torch.optim.lr_scheduler import LambdaLR from tqdm import tqdm from scdiff.modules.diffusion_model import Decoder, Embedder, Encoder from scdiff.evaluate import ( denoising_eval, evaluate_annotation, perturbation_eval, calculate_batch_r_squared, ) from scdiff.modules.ema import LitEma from scdiff.modules.layers.attention import BasicTransformerBlock from scdiff.modules.layers.basic import FeedForward from scdiff.modules.layers.scmodel import EmbeddingDict from scdiff.utils.diffusion import MaskedEncoderConditioner, timestep_embedding from scdiff.utils.diffusion import make_beta_schedule from scdiff.utils.misc import as_1d_vec, exists, count_params, instantiate_from_config from scdiff.utils.misc import default from scdiff.utils.modules import create_activation, create_norm from scdiff.utils.modules import extract_into_tensor, init_weights, mean_flat, noise_like
11,109
self.cell_mask_ratio = cell_mask_ratio self.feat_mask_ratio = feat_mask_ratio self.mask_context = mask_context self.mask_mode = mask_mode self.mask_strategy = mask_strategy self.mask_value = mask_value self.pad_value = pad_value self.decoder_mask = decoder_mask # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE encoder specifics activation = create_activation(activation) # self.in_dim = len(input_gene_list) if input_gene_list is not None else len(pretrained_gene_list) self.in_dim = len(pretrained_gene_list) if pretrained_gene_list is not None else len(input_gene_list) self.pretrained_gene_list = pretrained_gene_list self.input_gene_list = input_gene_list pretrained_gene_index = dict(zip(self.pretrained_gene_list, list(range(len(self.pretrained_gene_list))))) self.input_gene_idx = torch.tensor([ pretrained_gene_index[o] for o in self.input_gene_list if o in pretrained_gene_index ]).long() if self.input_gene_list is not None else None assert embed_dim == decoder_embed_dim # XXX: this seems to be required for MAE (see forward dec)? full_embed_dim = embed_dim * cond_tokens self.post_encoder_layer = Rearrange('b (n d) -> b n d', n=cond_tokens, d=embed_dim) self.embedder = Embedder(pretrained_gene_list, full_embed_dim, 'layernorm', dropout=dropout) self.encoder_type = encoder_type if encoder_type == 'attn': self.blocks = nn.ModuleList([ BasicTransformerBlock(full_embed_dim, num_heads, dim_head, self_attn=True, cross_attn=False, dropout=dropout, qkv_bias=True, final_act=activation) for _ in range(depth)]) elif encoder_type in ('mlp', 'mlpparallel'): self.blocks = nn.ModuleList([ nn.Sequential( nn.Linear(full_embed_dim, full_embed_dim), activation, create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type in ('stackffn', 'ffnparallel'): self.blocks = nn.ModuleList([ # FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout) nn.Sequential( FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout), create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type == 'none': self.blocks = None else: raise ValueError(f'Unknown encoder type {encoder_type}') # self.encoder_proj = nn.Linear(full_embed_dim, latent_dim) # self.norm = create_norm(norm_layer, full_embed_dim) # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE decoder specifics self.subset_output = True self.decoder_embed_dim = decoder_embed_dim self.time_embed = nn.Sequential( nn.Linear(decoder_embed_dim, 4 * decoder_embed_dim), nn.SiLU(), nn.Linear(4 * decoder_embed_dim, decoder_embed_dim), ) if mlp_time_embed else nn.Identity() self.no_time_embed = no_time_embed self.cond_type = cond_type assert cond_strategy in ("full_mix", "pre_mix") self.cond_strategy = cond_strategy self.cond_emb_type = cond_emb_type self.cond_tokens = cond_tokens self.cond_cat_input = cond_cat_input if cond_dim is not None or cond_num_dict is not None: if cond_emb_type == 'linear': assert cond_dim is not None self.cond_embed = nn.Sequential( nn.Linear(cond_dim, decoder_embed_dim * cond_tokens), Rearrange('b (n d) -> b n d', n=cond_tokens, d=decoder_embed_dim), ) elif cond_emb_type == 'embedding': assert cond_num_dict is not None self.cond_embed = EmbeddingDict(cond_num_dict, decoder_embed_dim, depth, cond_tokens, mask_ratio=cond_mask_ratio, text_emb=text_emb, text_emb_file=text_emb_file, norm_layer=cond_emb_norm, freeze_text_emb=freeze_text_emb, text_proj_type=text_proj_type, text_proj_num_layers=text_proj_num_layers, stackfnn_glu_flag=stackfnn_glu_flag, text_proj_hidden_dim=text_proj_hidden_dim, text_proj_act=text_proj_act, text_proj_norm=text_proj_norm, # text_proj_dropout=dropout, G_go=G_go, # G_go_weight=G_go_weight, num_perts=num_perts, text_proj_dropout=dropout, gears_flag=gears_flag, num_perts=num_perts, gears_hidden_size=gears_hidden_size, gears_mode=gears_mode, gears_mlp_layers=gears_mlp_layers, gears_norm=gears_norm, num_go_gnn_layers=num_go_gnn_layers) elif cond_emb_type == 'none': self.cond_embed = None else: raise ValueError(f"Unknwon condition embedder type {cond_emb_type}") else: self.cond_embed = None self.encoder = Encoder(depth, decoder_embed_dim, decoder_num_heads, decoder_dim_head, dropout=dropout, cond_type=cond_type, cond_cat_input=cond_cat_input) # self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim)) self.decoder_embed_type = decoder_embed_type assert decoder_embed_type in ['linear', 'embedder', 'encoder'] if decoder_embed_type == 'linear': self.decoder_embed = nn.Linear(self.in_dim, decoder_embed_dim) elif decoder_embed_type == 'embedder': self.decoder_embed = Embedder(pretrained_gene_list, decoder_embed_dim, 'layernorm', dropout=dropout) elif decoder_embed_type == 'encoder': self.decoder_embed = self.embedder
""" Wild mixture of: https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8 Thank you! """ RESCALE_FACTOR = np.log(1e4) class DiffusionModel(nn.Module): def __init__(self, pretrained_gene_list, input_gene_list=None, dropout=0., cell_mask_ratio=0.75, mask_context=True, encoder_type='stackffn', embed_dim=1024, depth=4, dim_head=64, num_heads=4, feat_mask_ratio=0., decoder_embed_dim=512, decoder_embed_type='linear', decoder_num_heads=4, decoder_dim_head=64, cond_dim=None, cond_tokens=1, cond_type='crossattn', cond_strategy='full_mix', cond_emb_type='linear', cond_num_dict=None, cond_mask_ratio=0.5, cond_cat_input=False, post_cond_num_dict=None, post_cond_layers=2, post_cond_norm='layernorm', post_cond_mask_ratio=0.0, norm_layer='layernorm', mlp_time_embed=False, no_time_embed=False, activation='gelu', mask_strategy='random', mask_mode='v1', mask_dec_cond=False, mask_dec_cond_ratio=False, mask_dec_cond_se=False, mask_dec_cond_semlp=False, mask_dec_cond_concat=False, mask_value=0, pad_value=0, decoder_mask=None, text_emb=None, text_emb_file=None, freeze_text_emb=True, text_proj_type='linear', text_proj_act=None, stackfnn_glu_flag=False, text_proj_hidden_dim=512, text_proj_num_layers=2, text_proj_norm=None, cond_emb_norm=None, num_perts=None, gears_flag=False, gears_hidden_size=64, gears_mode="single", gears_mlp_layers=2, gears_norm=None, num_go_gnn_layers=1): super().__init__() self.depth = depth # -------------------------------------------------------------------------- # MAE masking options self.cell_mask_ratio = cell_mask_ratio self.feat_mask_ratio = feat_mask_ratio self.mask_context = mask_context self.mask_mode = mask_mode self.mask_strategy = mask_strategy self.mask_value = mask_value self.pad_value = pad_value self.decoder_mask = decoder_mask # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE encoder specifics activation = create_activation(activation) # self.in_dim = len(input_gene_list) if input_gene_list is not None else len(pretrained_gene_list) self.in_dim = len(pretrained_gene_list) if pretrained_gene_list is not None else len(input_gene_list) self.pretrained_gene_list = pretrained_gene_list self.input_gene_list = input_gene_list pretrained_gene_index = dict(zip(self.pretrained_gene_list, list(range(len(self.pretrained_gene_list))))) self.input_gene_idx = torch.tensor([ pretrained_gene_index[o] for o in self.input_gene_list if o in pretrained_gene_index ]).long() if self.input_gene_list is not None else None assert embed_dim == decoder_embed_dim # XXX: this seems to be required for MAE (see forward dec)? full_embed_dim = embed_dim * cond_tokens self.post_encoder_layer = Rearrange('b (n d) -> b n d', n=cond_tokens, d=embed_dim) self.embedder = Embedder(pretrained_gene_list, full_embed_dim, 'layernorm', dropout=dropout) self.encoder_type = encoder_type if encoder_type == 'attn': self.blocks = nn.ModuleList([ BasicTransformerBlock(full_embed_dim, num_heads, dim_head, self_attn=True, cross_attn=False, dropout=dropout, qkv_bias=True, final_act=activation) for _ in range(depth)]) elif encoder_type in ('mlp', 'mlpparallel'): self.blocks = nn.ModuleList([ nn.Sequential( nn.Linear(full_embed_dim, full_embed_dim), activation, create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type in ('stackffn', 'ffnparallel'): self.blocks = nn.ModuleList([ # FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout) nn.Sequential( FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout), create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type == 'none': self.blocks = None else: raise ValueError(f'Unknown encoder type {encoder_type}') # self.encoder_proj = nn.Linear(full_embed_dim, latent_dim) # self.norm = create_norm(norm_layer, full_embed_dim) # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE decoder specifics self.subset_output = True self.decoder_embed_dim = decoder_embed_dim self.time_embed = nn.Sequential( nn.Linear(decoder_embed_dim, 4 * decoder_embed_dim), nn.SiLU(), nn.Linear(4 * decoder_embed_dim, decoder_embed_dim), ) if mlp_time_embed else nn.Identity() self.no_time_embed = no_time_embed self.cond_type = cond_type assert cond_strategy in ("full_mix", "pre_mix") self.cond_strategy = cond_strategy self.cond_emb_type = cond_emb_type self.cond_tokens = cond_tokens self.cond_cat_input = cond_cat_input if cond_dim is not None or cond_num_dict is not None: if cond_emb_type == 'linear': assert cond_dim is not None self.cond_embed = nn.Sequential( nn.Linear(cond_dim, decoder_embed_dim * cond_tokens), Rearrange('b (n d) -> b n d', n=cond_tokens, d=decoder_embed_dim), ) elif cond_emb_type == 'embedding': assert cond_num_dict is not None self.cond_embed = EmbeddingDict(cond_num_dict, decoder_embed_dim, depth, cond_tokens, mask_ratio=cond_mask_ratio, text_emb=text_emb, text_emb_file=text_emb_file, norm_layer=cond_emb_norm, freeze_text_emb=freeze_text_emb, text_proj_type=text_proj_type, text_proj_num_layers=text_proj_num_layers, stackfnn_glu_flag=stackfnn_glu_flag, text_proj_hidden_dim=text_proj_hidden_dim, text_proj_act=text_proj_act, text_proj_norm=text_proj_norm, # text_proj_dropout=dropout, G_go=G_go, # G_go_weight=G_go_weight, num_perts=num_perts, text_proj_dropout=dropout, gears_flag=gears_flag, num_perts=num_perts, gears_hidden_size=gears_hidden_size, gears_mode=gears_mode, gears_mlp_layers=gears_mlp_layers, gears_norm=gears_norm, num_go_gnn_layers=num_go_gnn_layers) elif cond_emb_type == 'none': self.cond_embed = None else: raise ValueError(f"Unknwon condition embedder type {cond_emb_type}") else: self.cond_embed = None self.encoder = Encoder(depth, decoder_embed_dim, decoder_num_heads, decoder_dim_head, dropout=dropout, cond_type=cond_type, cond_cat_input=cond_cat_input) # self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim)) self.decoder_embed_type = decoder_embed_type assert decoder_embed_type in ['linear', 'embedder', 'encoder'] if decoder_embed_type == 'linear': self.decoder_embed = nn.Linear(self.in_dim, decoder_embed_dim) elif decoder_embed_type == 'embedder': self.decoder_embed = Embedder(pretrained_gene_list, decoder_embed_dim, 'layernorm', dropout=dropout) elif decoder_embed_type == 'encoder': self.decoder_embed = self.embedder
self.mask_decoder_conditioner = MaskedEncoderConditioner(
11
2023-10-13 14:20:34+00:00
16k
weavel-ai/promptmodel-python
promptmodel/chat_model.py
[ { "identifier": "DevClient", "path": "promptmodel/dev_app.py", "snippet": "class DevClient:\n \"\"\"DevClient main class\"\"\"\n\n def __init__(self):\n self.function_models: List[FunctionModelInterface] = []\n self.chat_models: List[ChatModelInterface] = []\n\n def register(self, func):\n instructions = list(dis.get_instructions(func))\n for idx in range(\n len(instructions) - 1\n ): # We check up to len-1 because we access idx+1 inside loop\n instruction = instructions[idx]\n # print(instruction)\n if instruction.opname in [\"LOAD_ATTR\", \"LOAD_METHOD\", \"LOAD_GLOBAL\"] and (\n instruction.argval == \"FunctionModel\"\n or instruction.argval == \"ChatModel\"\n ):\n next_instruction = instructions[idx + 1]\n\n # Check if the next instruction is LOAD_CONST with string value\n if next_instruction.opname == \"LOAD_CONST\" and isinstance(\n next_instruction.argval, str\n ):\n if instruction.argval == \"FunctionModel\":\n self.function_models.append(\n FunctionModelInterface(name=next_instruction.argval)\n )\n elif instruction.argval == \"ChatModel\":\n self.chat_models.append(\n ChatModelInterface(name=next_instruction.argval)\n )\n\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n def register_function_model(self, name):\n for function_model in self.function_models:\n if function_model.name == name:\n return\n\n self.function_models.append(FunctionModelInterface(name=name))\n\n def register_chat_model(self, name):\n for chat_model in self.chat_models:\n if chat_model.name == name:\n return\n\n self.chat_models.append(ChatModelInterface(name=name))\n\n def _get_function_model_name_list(self) -> List[str]:\n return [function_model.name for function_model in self.function_models]" }, { "identifier": "LLMProxy", "path": "promptmodel/llms/llm_proxy.py", "snippet": "class LLMProxy(LLM):\n def __init__(\n self,\n name: str,\n version: Optional[Union[str, int]] = \"deploy\",\n unit_config: Optional[UnitConfig] = None\n ):\n super().__init__()\n self._name = name\n self.version = version\n self.unit_config = unit_config\n\n def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = run_async_in_sync(\n LLMProxy.fetch_prompts(self._name, self.version)\n )\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n log_uuid = str(uuid4())\n\n # Call the generator with the arguments\n stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args)\n\n api_response = None\n dict_cache = {} # to store aggregated dictionary values\n string_cache = \"\" # to store aggregated string values\n error_occurs = False\n error_log = None\n for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n if item.parsed_outputs:\n dict_cache = update_dict(dict_cache, item.parsed_outputs)\n if item.raw_output:\n string_cache += item.raw_output\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n\n if error_occurs:\n # delete all promptmodel data in item\n item.raw_output = None\n item.parsed_outputs = None\n item.function_call = None\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n\n run_async_in_sync(\n self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=api_response,\n parsed_outputs=dict_cache,\n metadata=metadata,\n )\n )\n\n return wrapper\n\n def _wrap_async_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]:\n async def wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = await LLMProxy.fetch_prompts(\n self._name, self.version\n )\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n # Call async_gen with the arguments\n stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen(\n **call_args\n )\n\n log_uuid = str(uuid4())\n\n api_response = None\n dict_cache = {} # to store aggregated dictionary values\n string_cache = \"\" # to store aggregated string values\n error_occurs = False\n error_log = None\n api_response: Optional[ModelResponse] = None\n async for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n if item.parsed_outputs:\n dict_cache = update_dict(dict_cache, item.parsed_outputs)\n if item.raw_output:\n string_cache += item.raw_output\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n # # add string_cache in model_response\n # if api_response:\n # if \"message\" not in api_response.choices[0]:\n # api_response.choices[0].message = {}\n # if \"content\" not in api_response.choices[0].message:\n # api_response.choices[0].message[\"content\"] = string_cache\n # api_response.choices[0].message[\"role\"] = \"assistant\"\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n await self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=api_response,\n parsed_outputs=dict_cache,\n metadata=metadata,\n )\n\n # raise Exception(\"error_log\")\n\n return wrapper\n\n def _wrap_method(self, method: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = run_async_in_sync(\n LLMProxy.fetch_prompts(self._name, self.version)\n )\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n log_uuid = str(uuid4())\n if llm_response.parsed_outputs:\n run_async_in_sync(\n self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs=llm_response.parsed_outputs,\n metadata=metadata,\n )\n )\n else:\n run_async_in_sync(\n self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs={},\n metadata=metadata,\n )\n )\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return wrapper\n\n def _wrap_async_method(self, method: Callable[..., Any]) -> Callable[..., Any]:\n async def async_wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = await LLMProxy.fetch_prompts(\n self._name, self.version\n ) # messages, model, uuid = self._fetch_prompts()\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = await method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n log_uuid = str(uuid4())\n if llm_response.parsed_outputs:\n await self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs=llm_response.parsed_outputs,\n metadata=metadata,\n )\n else:\n await self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs={},\n metadata=metadata,\n )\n\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return async_wrapper\n\n def _wrap_chat(self, method: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(session_uuid: str, **kwargs):\n instruction, version_details, message_logs = run_async_in_sync(\n LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n )\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n api_response = None\n if llm_response.api_response:\n api_response = llm_response.api_response\n\n log_uuid = str(uuid4())\n\n run_async_in_sync(\n self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n message=llm_response.api_response.choices[\n 0\n ].message.model_dump(),\n uuid=log_uuid,\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n )\n\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return wrapper\n\n def _wrap_async_chat(self, method: Callable[..., Any]) -> Callable[..., Any]:\n async def async_wrapper(session_uuid: str, **kwargs):\n (\n instruction,\n version_details,\n message_logs,\n ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = await method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n api_response = None\n if llm_response.api_response:\n api_response = llm_response.api_response\n\n log_uuid = str(uuid4())\n await self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n uuid=log_uuid,\n message=llm_response.api_response.choices[\n 0\n ].message.model_dump(),\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return async_wrapper\n\n def _wrap_chat_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(session_uuid: str, **kwargs):\n instruction, version_details, message_logs = run_async_in_sync(\n LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n )\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n # Call the generator with the arguments\n stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args)\n\n api_response = None\n error_occurs = False\n error_log = None\n log_uuid = str(uuid4())\n for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n\n if error_occurs:\n # delete all promptmodel data in item\n item.raw_output = None\n item.parsed_outputs = None\n item.function_call = None\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n run_async_in_sync(\n self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n uuid=log_uuid,\n message=api_response.choices[0].message.model_dump(),\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n )\n\n return wrapper\n\n def _wrap_async_chat_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]:\n async def wrapper(session_uuid: str, **kwargs):\n (\n instruction,\n version_details,\n message_logs,\n ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n # Call the generator with the arguments\n stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen(\n **call_args\n )\n\n api_response = None\n error_occurs = False\n error_log = None\n log_uuid = str(uuid4())\n async for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n\n if error_occurs:\n # delete all promptmodel data in item\n item.raw_output = None\n item.parsed_outputs = None\n item.function_call = None\n\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n await self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n uuid=log_uuid,\n message=api_response.choices[0].message.model_dump(),\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n\n return wrapper\n\n def _prepare_call_args(\n self,\n prompts: List[Dict[str, str]],\n version_detail: Dict[str, Any],\n inputs: Dict[str, Any],\n kwargs,\n ):\n stringified_inputs = {key: str(value) for key, value in inputs.items()}\n messages = [\n {\n \"content\": prompt[\"content\"].format(**stringified_inputs),\n \"role\": prompt[\"role\"],\n }\n for prompt in prompts\n ]\n call_args = {\n \"messages\": messages,\n \"model\": version_detail[\"model\"] if version_detail else None,\n \"parsing_type\": version_detail[\"parsing_type\"] if version_detail else None,\n \"output_keys\": version_detail[\"output_keys\"] if version_detail else None,\n }\n if call_args[\"parsing_type\"] is None:\n del call_args[\"parsing_type\"]\n del call_args[\"output_keys\"]\n\n if \"functions\" in kwargs:\n call_args[\"functions\"] = kwargs[\"functions\"]\n\n if \"tools\" in kwargs:\n call_args[\"tools\"] = kwargs[\"tools\"]\n\n if \"api_key\" in kwargs:\n call_args[\"api_key\"] = kwargs[\"api_key\"]\n return call_args\n\n def _prepare_call_args_for_chat(\n self,\n messages: List[Dict[str, Any]],\n version_detail: Dict[str, Any],\n kwargs,\n ):\n call_args = {}\n token_per_tools = 0\n if \"functions\" in kwargs:\n call_args[\"functions\"] = kwargs[\"functions\"]\n token_per_tools = num_tokens_from_functions_input(\n functions=kwargs[\"functions\"],\n model=version_detail[\"model\"] if version_detail else \"gpt-3.5-turbo\",\n )\n\n if \"tools\" in kwargs:\n call_args[\"tools\"] = kwargs[\"tools\"]\n token_per_tools = num_tokens_from_functions_input(\n functions=kwargs[\"tools\"],\n model=version_detail[\"model\"] if version_detail else \"gpt-3.5-turbo\",\n )\n\n # truncate messages to make length <= model's max length\n model_max_tokens = get_max_tokens(\n model=version_detail[\"model\"] if version_detail else \"gpt-3.5-turbo\"\n )\n token_per_messages = num_tokens_for_messages_for_each(\n messages, version_detail[\"model\"]\n )\n token_limit_exceeded = (\n sum(token_per_messages) + token_per_tools\n ) - model_max_tokens\n if token_limit_exceeded > 0:\n while token_limit_exceeded > 0:\n # erase the second oldest message (first one is system prompt, so it should not be erased)\n if len(messages) == 1:\n # if there is only one message, Error cannot be solved. Just call LLM and get error response\n break\n token_limit_exceeded -= token_per_messages[1]\n del messages[1]\n del token_per_messages[1]\n\n call_args[\"messages\"] = messages\n call_args[\"model\"] = version_detail[\"model\"] if version_detail else None\n\n if \"api_key\" in kwargs:\n call_args[\"api_key\"] = kwargs[\"api_key\"]\n\n if \"tools\" in kwargs:\n call_args[\"tools\"] = kwargs[\"tools\"]\n\n return call_args\n\n async def _async_log_to_cloud(\n self,\n version_uuid: str,\n log_uuid: str,\n inputs: Optional[Dict] = None,\n api_response: Optional[ModelResponse] = None,\n parsed_outputs: Optional[Dict] = None,\n metadata: Optional[Dict] = None,\n ):\n config = read_config()\n if (\n \"project\" in config\n and \"mask_inputs\" in config[\"project\"]\n and config[\"project\"][\"mask_inputs\"] == True\n ):\n inputs = {key: \"PRIVATE LOGGING\" for key, value in inputs.items()}\n\n # Perform the logging asynchronously\n if api_response:\n api_response_dict = api_response.model_dump()\n api_response_dict[\"response_ms\"] = api_response._response_ms\n api_response_dict[\"_response_ms\"] = api_response._response_ms\n else:\n api_response_dict = None\n run_log_request_body = {\n \"uuid\": log_uuid,\n \"api_response\": api_response_dict,\n \"inputs\": inputs,\n \"parsed_outputs\": parsed_outputs,\n \"metadata\": metadata,\n }\n res = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/run_log\",\n params={\n \"version_uuid\": version_uuid,\n },\n json=run_log_request_body,\n use_cli_key=False,\n )\n if res.status_code != 200:\n print(f\"[red]Failed to log to cloud: {res.json()}[/red]\");\n \n if self.unit_config:\n res_connect = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/unit/connect\",\n json={\n \"unit_log_uuid\": self.unit_config.log_uuid,\n \"run_log_uuid\": log_uuid, \n },\n use_cli_key=False,\n )\n if res_connect.status_code != 200:\n print(f\"[red]Failed to connect prompt component to run log: {res_connect.json()}[/red]\")\n\n return res\n\n async def _async_chat_log_to_cloud(\n self,\n session_uuid: str,\n version_uuid: Optional[str] = None,\n chat_log_request_list: List[ChatLogRequest] = [],\n ):\n # Perform the logging asynchronously\n\n res = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/chat_log\",\n params={\n \"session_uuid\": session_uuid,\n \"version_uuid\": version_uuid,\n },\n json=[r.model_dump() for r in chat_log_request_list],\n use_cli_key=False,\n )\n if res.status_code != 200:\n print(f\"[red]Failed to log to cloud: {res.json()}[/red]\")\n return res\n\n async def _async_make_session_cloud(\n self,\n session_uuid: str,\n version_uuid: Optional[str] = None,\n ):\n # Perform the logging asynchronously\n res = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/make_session\",\n params={\n \"session_uuid\": session_uuid,\n \"version_uuid\": version_uuid,\n },\n use_cli_key=False,\n )\n if res.status_code != 200:\n print(f\"[red]Failed to make ChatSession in cloud: {res.json()}[/red]\")\n return res\n\n def make_kwargs(self, **kwargs):\n res = {}\n for key, value in kwargs.items():\n if value is not None:\n res[key] = value\n return res\n\n def run(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_method(super().run)(inputs, **kwargs)\n\n def arun(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_method(super().arun)(inputs, **kwargs)\n\n def stream(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_gen(super().stream)(inputs, **kwargs)\n\n def astream(\n self,\n inputs: Optional[Dict[str, Any]] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_gen(super().astream)(inputs, **kwargs)\n\n def run_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_method(super().run_and_parse)(inputs, **kwargs)\n\n def arun_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_method(super().arun_and_parse)(inputs, **kwargs)\n\n def stream_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_gen(super().stream_and_parse)(inputs, **kwargs)\n\n def astream_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_gen(super().astream_and_parse)(inputs, **kwargs)\n\n def chat_run(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_chat(super().run)(session_uuid, **kwargs)\n\n def chat_arun(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_chat(super().arun)(session_uuid, **kwargs)\n\n def chat_stream(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_chat_gen(super().stream)(session_uuid, **kwargs)\n\n def chat_astream(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_chat_gen(super().astream)(session_uuid, **kwargs)\n\n @staticmethod\n async def fetch_prompts(\n name,\n version: Optional[Union[str, int]] = \"deploy\",\n ) -> Tuple[List[Dict[str, str]], Dict[str, Any]]:\n \"\"\"fetch prompts.\n\n Args:\n name (str): name of FunctionModel\n\n Returns:\n Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail)\n \"\"\"\n # Check connection activate\n config = read_config()\n if (\n \"connection\" in config\n and \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"] == True\n ):\n return [], {}\n elif (\n \"connection\" in config\n and \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"] == True\n ):\n return [], {}\n else:\n if (\n \"project\" in config\n and \"use_cache\" in config[\"project\"]\n and config[\"project\"][\"use_cache\"] == True\n and version == \"deploy\"\n ):\n cache_manager = CacheManager()\n # call update_local API in background task\n cache_update_thread = Thread(\n target=cache_manager.cache_update_background_task, args=(config,)\n )\n cache_update_thread.daemon = True\n cache_update_thread.start()\n\n # get prompt from local DB by ratio\n prompt_rows, version_detail = get_deployed_prompts(name)\n if prompt_rows is None:\n return [], {}\n\n return [\n {\"role\": prompt.role, \"content\": prompt.content}\n for prompt in prompt_rows\n ], version_detail\n\n else:\n try:\n config_list = await AsyncAPIClient.execute(\n method=\"GET\",\n path=\"/function_model_versions\",\n params={\"function_model_name\": name, \"version\": version},\n use_cli_key=False,\n )\n config_list = config_list.json()\n except Exception as e:\n raise e\n\n function_model_versions = [\n x[\"function_model_version\"] for x in config_list\n ]\n\n if version == \"deploy\":\n for version in function_model_versions:\n if version[\"is_published\"] is True:\n version[\"ratio\"] = 1.0\n selected_version = select_version_by_ratio(function_model_versions)\n else:\n selected_version = function_model_versions[0]\n\n # config.prompts where config.function_model_version.uuid = selected_version.uuid\n prompt_rows = [\n config[\"prompts\"]\n for config in config_list\n if config[\"function_model_version\"][\"uuid\"]\n == selected_version[\"uuid\"]\n ][0]\n\n # sort prompt_rows by step\n prompt_rows = sorted(prompt_rows, key=lambda prompt: prompt[\"step\"])\n\n version_detail = {\n \"model\": selected_version[\"model\"],\n \"version\": selected_version[\"version\"],\n \"uuid\": selected_version[\"uuid\"],\n \"parsing_type\": selected_version[\"parsing_type\"],\n \"output_keys\": selected_version[\"output_keys\"],\n }\n\n if prompt_rows is None:\n return [], {}\n\n return [\n {\"role\": prompt[\"role\"], \"content\": prompt[\"content\"]}\n for prompt in prompt_rows\n ], version_detail\n\n @staticmethod\n async def fetch_chat_model(\n name: str,\n session_uuid: Optional[str] = None,\n version: Optional[Union[str, int]] = \"deploy\",\n ) -> Tuple[str, Dict[str, Any], List[Dict]]:\n \"\"\"fetch instruction and version detail\n\n Args:\n name (str): name of ChatModel\n\n Returns:\n Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail)\n \"\"\"\n # Check connection activate\n config = read_config()\n if (\n \"connection\" in config\n and \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"] == True\n ):\n return \"\", {}, []\n elif (\n \"connection\" in config\n and \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"] == True\n ):\n return \"\", {}, []\n else:\n try:\n res_data = await AsyncAPIClient.execute(\n method=\"GET\",\n path=\"/chat_model_versions_with_logs\",\n params={\n \"chat_model_name\": name,\n \"session_uuid\": session_uuid,\n \"version\": version,\n },\n use_cli_key=False,\n )\n res_data = res_data.json()\n except Exception as e:\n raise e\n chat_model_versions = res_data[\"chat_model_versions\"]\n\n if (\n session_uuid is None\n ): # if this is the initial call for deployed chat model\n if version == \"deploy\":\n for version in chat_model_versions:\n if version[\"is_published\"] is True:\n version[\"ratio\"] = 1.0\n selected_version = select_version_by_ratio(chat_model_versions)\n else:\n selected_version = chat_model_versions[0]\n else:\n selected_version = chat_model_versions[0]\n\n instruction: str = selected_version[\"system_prompt\"]\n\n version_detail = {\n \"model\": selected_version[\"model\"],\n \"uuid\": selected_version[\"uuid\"],\n \"version\": selected_version[\"version\"],\n }\n if session_uuid:\n chat_logs: List[Dict] = res_data[\"chat_logs\"]\n chat_logs = [{\"role\": \"system\", \"content\": instruction}] + chat_logs\n else:\n chat_logs = []\n\n # delete columns which value is None in each chat log\n for chat_log in chat_logs:\n for key in list(chat_log.keys()):\n if chat_log[key] is None:\n del chat_log[key]\n\n return instruction, version_detail, chat_logs\n\n # @staticmethod\n # async def fetch_chat_log(\n # session_uuid: str,\n # version: Optional[Union[str, int]] = \"deploy\",\n # ) -> List[Dict[str, Any]]:\n # \"\"\"fetch conversation log for session_uuid and version detail\n\n # Args:\n # session_uuid (str): session_uuid\n\n # Returns:\n # List[Dict[str, Any]] : list of conversation log\n # \"\"\"\n # config = read_config()\n # if \"connection\" in config and config[\"connection\"][\"initializing\"] == True:\n # return []\n # elif \"connection\" in config and config[\"connection\"][\"reloading\"] == True:\n # return []\n # else:\n # try:\n # res_data = await AsyncAPIClient.execute(\n # method=\"GET\",\n # path=\"/fetch_chat_logs\",\n # params={\"session_uuid\": session_uuid},\n # use_cli_key=False,\n # )\n # res_data = res_data.json()\n # except Exception as e:\n # raise e\n\n # # filter out unnecessary data\n # res_data = [\n # {\n # \"role\": message[\"role\"],\n # \"content\": message[\"content\"],\n # \"function_call\": message[\"function_call\"],\n # }\n # for message in res_data[\"chat_logs\"]\n # ]\n # return res_data" }, { "identifier": "logger", "path": "promptmodel/utils/logger.py", "snippet": "def debug(msg: Any, *args):\ndef success(msg: Any, *args):\ndef info(msg: Any, *args):\ndef warning(msg: Any, *args):\ndef error(msg: Any, *args):" }, { "identifier": "read_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def read_config():\n \"\"\"\n Reads the configuration from the given filename.\n\n :return: A dictionary containing the configuration.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n\n with open(CONFIG_FILE, \"r\") as file:\n config = yaml.safe_load(file) or {}\n return config" }, { "identifier": "upsert_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def upsert_config(new_config: Dict[str, Any], section: str = None):\n \"\"\"\n Upserts the given configuration file with the given configuration.\n\n :param new_config: A dictionary containing the new configuration.\n :param section: The section of the configuration to update.\n \"\"\"\n config = read_config()\n if section:\n config_section = config.get(section, {})\n new_config = {section: merge_dict(config_section, new_config)}\n config = merge_dict(config, new_config)\n # If . directory does not exist, create it\n if not os.path.exists(\"./.promptmodel\"):\n os.mkdir(\"./.promptmodel\")\n\n with open(CONFIG_FILE, \"w\") as file:\n yaml.safe_dump(config, file, default_flow_style=False)" }, { "identifier": "check_connection_status_decorator", "path": "promptmodel/utils/config_utils.py", "snippet": "def check_connection_status_decorator(method):\n if asyncio.iscoroutinefunction(method):\n\n @wraps(method)\n async def async_wrapper(self, *args, **kwargs):\n config = read_config()\n if \"connection\" in config and (\n (\n \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"]\n )\n or (\n \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"]\n )\n ):\n return\n else:\n if \"config\" not in kwargs:\n kwargs[\"config\"] = config\n return await method(self, *args, **kwargs)\n\n # async_wrapper.__name__ = method.__name__\n # async_wrapper.__doc__ = method.__doc__\n return async_wrapper\n else:\n\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n config = read_config()\n if \"connection\" in config and (\n (\n \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"]\n )\n or (\n \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"]\n )\n ):\n return\n else:\n return method(self, *args, **kwargs)\n\n # wrapper.__name__ = method.__name__\n # wrapper.__doc__ = method.__doc__\n return wrapper" }, { "identifier": "run_async_in_sync", "path": "promptmodel/utils/async_utils.py", "snippet": "def run_async_in_sync(coro: Coroutine):\n try:\n loop = asyncio.get_running_loop()\n except RuntimeError: # No running loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n result = loop.run_until_complete(coro)\n # loop.close()\n return result\n\n return loop.run_until_complete(coro)" }, { "identifier": "LLMStreamResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMStreamResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[ChoiceDeltaFunctionCall] = None\n tool_calls: Optional[List[ChoiceDeltaToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "LLMResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[FunctionCall] = None\n tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "ChatModelConfig", "path": "promptmodel/types/response.py", "snippet": "class ChatModelConfig(BaseModel):\n system_prompt: str\n model: str\n name: str\n version_uuid: str\n version: int\n message_logs: Optional[List[Dict]] = []" }, { "identifier": "InstanceType", "path": "promptmodel/types/enums.py", "snippet": "class InstanceType(str, Enum):\n ChatLog = \"ChatLog\"\n RunLog = \"RunLog\"\n ChatLogSession = \"ChatLogSession\"" }, { "identifier": "ChatLogRequest", "path": "promptmodel/types/request.py", "snippet": "class ChatLogRequest(BaseModel):\n uuid: Optional[str] = None\n message: Dict[str, Any]\n metadata: Optional[Dict] = None\n api_response: Optional[ModelResponse] = None\n\n def __post_init__(\n self,\n ):\n if self.api_response is not None and self.message is None:\n self.message = self.api_response.choices[0].message.model_dump()" }, { "identifier": "AsyncAPIClient", "path": "promptmodel/apis/base.py", "snippet": "class AsyncAPIClient:\n \"\"\"\n A class to represent an Async API request client.\n Used in Deployment stage.\n\n ...\n\n Methods\n -------\n get_headers():\n Generates headers for the API request.\n execute(method=\"GET\", params=None, data=None, json=None, **kwargs):\n Executes the API request.\n \"\"\"\n\n @classmethod\n async def _get_headers(cls, use_cli_key: bool = True) -> Dict:\n \"\"\"\n Reads, decrypts the api_key, and returns headers for API request.\n\n Returns\n -------\n dict\n a dictionary containing the Authorization header\n \"\"\"\n config = read_config()\n if use_cli_key:\n if \"connection\" not in config:\n print(\n \"User not logged in. Please run [violet]prompt login[/violet] first.\"\n )\n exit()\n\n encrypted_key = config[\"connection\"][\"encrypted_api_key\"]\n if encrypted_key is None:\n raise Exception(\"No API key found. Please run 'prompt login' first.\")\n decrypted_key = decrypt_message(encrypted_key)\n else:\n decrypted_key = os.environ.get(\"PROMPTMODEL_API_KEY\")\n if decrypted_key is None:\n raise Exception(\n \"PROMPTMODEL_API_KEY was not found in the current environment.\"\n )\n headers = {\"Authorization\": f\"Bearer {decrypted_key}\"}\n return headers\n\n @classmethod\n async def execute(\n cls,\n path: str,\n method=\"GET\",\n params: Dict = None,\n data: Dict = None,\n json: Dict = None,\n ignore_auth_error: bool = False,\n use_cli_key: bool = True,\n **kwargs,\n ) -> requests.Response:\n \"\"\"\n Executes the API request with the decrypted API key in the headers.\n\n Parameters\n ----------\n method : str, optional\n The HTTP method of the request (default is \"GET\")\n params : dict, optional\n The URL parameters to be sent with the request\n data : dict, optional\n The request body to be sent with the request\n json : dict, optional\n The JSON-encoded request body to be sent with the request\n ignore_auth_error: bool, optional\n Whether to ignore authentication errors (default is False)\n **kwargs : dict\n Additional arguments to pass to the requests.request function\n\n Returns\n -------\n requests.Response\n The response object returned by the requests library\n \"\"\"\n url = f\"{ENDPOINT_URL}{path}\"\n headers = await cls._get_headers(use_cli_key)\n try:\n async with httpx.AsyncClient(http2=True) as _client:\n response = await _client.request(\n method,\n url,\n headers=headers,\n params=params,\n data=data,\n json=json,\n **kwargs,\n )\n if not response:\n print(f\"[red]Error: {response}[/red]\")\n if response.status_code == 200:\n return response\n elif response.status_code == 403:\n if not ignore_auth_error:\n print(\"[red]Authentication failed.[/red]\")\n else:\n print(f\"[red]Error: {response}[/red]\")\n\n return response\n except requests.exceptions.ConnectionError:\n print(\"[red]Could not connect to the Promptmodel API.[/red]\")\n except requests.exceptions.Timeout:\n print(\"[red]The request timed out.[/red]\")\n except Exception as exception:\n print(f\"[red]Error: {exception}[/red]\")" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Coroutine, Union from uuid import uuid4 from litellm import ModelResponse from promptmodel import DevClient from promptmodel.llms.llm_proxy import LLMProxy from promptmodel.utils import logger from promptmodel.utils.config_utils import ( read_config, upsert_config, check_connection_status_decorator, ) from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.types.response import LLMStreamResponse, LLMResponse, ChatModelConfig from promptmodel.types.enums import InstanceType from promptmodel.types.request import ChatLogRequest from promptmodel.apis.base import AsyncAPIClient import sys
11,515
from __future__ import annotations class RegisteringMeta(type): def __call__(cls, *args, **kwargs): instance: ChatModel = super().__call__(*args, **kwargs) # Find the global client instance in the current context client = cls.find_client_instance() if client is not None: client.register_chat_model(instance.name) return instance @staticmethod def find_client_instance(): # Get the current frame frame = sys._getframe(2) # Get global variables in the current frame global_vars = frame.f_globals # Find an instance of Client among global variables for var_name, var_val in global_vars.items():
from __future__ import annotations class RegisteringMeta(type): def __call__(cls, *args, **kwargs): instance: ChatModel = super().__call__(*args, **kwargs) # Find the global client instance in the current context client = cls.find_client_instance() if client is not None: client.register_chat_model(instance.name) return instance @staticmethod def find_client_instance(): # Get the current frame frame = sys._getframe(2) # Get global variables in the current frame global_vars = frame.f_globals # Find an instance of Client among global variables for var_name, var_val in global_vars.items():
if isinstance(var_val, DevClient):
0
2023-10-09 03:35:44+00:00
16k
cambridgeltl/ClaPS
algs/genetics.py
[ { "identifier": "BaseTrainer", "path": "algs/base_trainer.py", "snippet": "class BaseTrainer(abc.ABC):\n \"\"\"\n The base trainer class.\n\n Attributes:\n obj_func: the callable function handle for model interfacing.\n logger: an optional logger object.\n bn_calibrator: a batch norm calibration object. Only used in\n testing (not training or validation).\n \"\"\"\n\n def __init__(\n self,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n logger: Optional[Any] = None,\n use_bn_calibrator: bool = False,\n n_samples_bn_calibrator: int = 128,\n ):\n self.obj_func = obj_func\n self.logger = logger\n self.prompt_dataset = prompt_dataset\n\n self.bn_calibrator = BatchNormCalibrate() if use_bn_calibrator else None\n self.n_samples_bn_calibrator = n_samples_bn_calibrator\n\n @abc.abstractmethod\n def train(self, train_data: Iterable[Any]):\n raise NotImplementedError()\n\n def validate(self, val_dataset: Iterable[Any], best_str_list: List[str]) -> str:\n t_dataset = val_dataset\n if self.logger is not None:\n self.logger.info(\"total val dataset length: %s\", len(t_dataset))\n val_acc_list = []\n\n for prompt in best_str_list:\n n_correct = 0\n\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size,\n (batch_idx + 1) * self.eval_batch_size,\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n _, _, batch_acc = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n prompt,\n True,\n \"infer\",\n verbose=True,\n )\n n_correct += batch_acc * len(idx)\n torch.cuda.empty_cache()\n\n if self.logger is not None:\n self.logger.info(\"prompt: %s\", prompt)\n self.logger.info(\"final val acc: %s\", (n_correct / len(t_dataset)))\n val_acc_list.append(float(n_correct / len(t_dataset)))\n # best_prompt = best_str_list[np.argmax(val_acc_list)]\n max_acc = np.max(val_acc_list)\n indices = np.argwhere(val_acc_list == max_acc)\n last_index = indices[-1][0]\n best_prompt = best_str_list[last_index]\n if self.logger is not None:\n self.logger.info(\"val acc list: %s\", val_acc_list)\n self.logger.info(\"best prompt: %s\", best_prompt)\n self.logger.info(\"best prompt acc: %s\", np.max(val_acc_list))\n\n return best_prompt\n\n def test(\n self,\n test_dataset,\n best_prompt,\n bn_calibrate_if_available: bool = True,\n return_logits: bool = False,\n ) -> Tuple[float, Optional[Dict[str, torch.Tensor]]]:\n t_dataset = test_dataset\n if self.logger is not None:\n self.logger.info(\"total test dataset length: %s\", len(t_dataset))\n n_correct = 0\n\n if self.bn_calibrator is not None and bn_calibrate_if_available:\n # select some samples for calibration\n idx_calibrate = np.random.choice(\n len(test_dataset),\n min(len(test_dataset), self.n_samples_bn_calibrator),\n replace=False,\n )\n\n calibrate_data = [t_dataset[int(i)] for i in idx_calibrate]\n (\n t_premise_texts,\n t_hypothesis,\n _,\n ) = self.prompt_dataset.get_data(calibrate_data)\n\n # Initialize the bn calibrator\n self.bn_calibrator.train()\n # Get the logits\n calibrate_logits = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n [0] * len(t_premise_texts), # dummy class labels\n best_prompt,\n to_tensor=True,\n mode=\"infer\",\n accumulate_class=True,\n )[-1]\n # Run the prediction logits only through the BN calibrator to obtain\n # running statistics.\n self.bn_calibrator(calibrate_logits[0], flush=True)\n self.bn_calibrator.eval()\n self.obj_func.bn_calibrator = self.bn_calibrator\n else:\n calibrate_logits = None\n\n all_logits: List[torch.Tensor] = []\n all_labels: List[int] = []\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size, (batch_idx + 1) * self.eval_batch_size\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n (\n _,\n _,\n batch_acc,\n _,\n _,\n _,\n class_logits,\n ) = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n best_prompt,\n True,\n \"infer\",\n verbose=True,\n accumulate_class=True,\n )\n n_correct += batch_acc * len(idx)\n torch.cuda.empty_cache()\n if return_logits:\n all_logits.append(class_logits[0])\n all_labels += t_class_labels\n if self.logger is not None:\n self.logger.info(\"prompt: %s\", best_prompt)\n self.logger.info(n_correct)\n self.logger.info(\"final test acc: %s\", (n_correct / len(t_dataset)))\n if return_logits:\n return n_correct / len(t_dataset), {\n \"output_logits\": torch.cat(all_logits),\n \"calibrate_logits\": calibrate_logits,\n \"labels\": all_labels,\n }\n return n_correct / len(t_dataset), None\n\n def manual(\n self,\n test_dataset: Iterable[Any],\n bn_calibrate_if_available: bool = True,\n return_logits: bool = False,\n ) -> Tuple[float, Optional[Dict[str, torch.Tensor]]]:\n t_dataset = test_dataset\n for i in range(self.n_classes):\n test_I = [x for x in t_dataset if x[\"label\"] == i]\n if self.logger is not None:\n self.logger.info(\n \"total test dataset length: %s for class %s\", len(test_I), i\n )\n if self.logger is not None:\n self.logger.info(\"total test dataset length: %s\", len(t_dataset))\n n_correct = 0\n sum_ece = 0\n sum_entropy = 0\n class_correct = collections.Counter((i, 0) for i in range(self.n_classes))\n\n if self.bn_calibrator is not None and bn_calibrate_if_available:\n # select some samples for calibration\n idx_calibrate = np.random.choice(\n len(test_dataset),\n min(len(test_dataset), self.n_samples_bn_calibrator),\n replace=False,\n )\n\n calibrate_data = [t_dataset[int(i)] for i in idx_calibrate]\n (\n t_premise_texts,\n t_hypothesis,\n _,\n ) = self.prompt_dataset.get_data(calibrate_data)\n\n # Initialize the bn calibrator\n self.bn_calibrator.train()\n # Get the logits\n calibrate_logits = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n [0] * len(t_premise_texts), # dummy class labels\n \"\",\n to_tensor=True,\n mode=\"infer\",\n accumulate_class=True,\n )[-1]\n # Run the prediction logits only through the BN calibrator to obtain\n # running statistics.\n self.bn_calibrator(calibrate_logits[0], flush=True)\n self.bn_calibrator.eval()\n self.obj_func.bn_calibrator = self.bn_calibrator\n else:\n calibrate_logits = None\n\n all_logits: List[torch.Tensor] = []\n all_labels: List[int] = []\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size, (batch_idx + 1) * self.eval_batch_size\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n (\n _,\n _,\n batch_acc,\n count_class,\n batch_ece,\n batch_entropy,\n class_logits,\n ) = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n \"\",\n True,\n \"infer\",\n verbose=True,\n accumulate_class=True,\n )\n n_correct += batch_acc * len(idx)\n sum_ece += batch_ece * len(idx)\n sum_entropy += batch_entropy * len(idx)\n class_correct += count_class[0]\n if return_logits:\n all_logits.append(class_logits[0])\n all_labels += t_class_labels\n # print(count_class)\n torch.cuda.empty_cache()\n # print(class_correct)\n if self.logger is not None:\n self.logger.info(\n \"manual prompt test acc: %s\", (float(n_correct) / len(t_dataset))\n )\n self.logger.info(\"count class: %s\", class_correct)\n self.logger.info(\n \"manual prompt test ece percent: %s\",\n (float(sum_ece) / len(t_dataset) * 100),\n )\n self.logger.info(\n \"manual prompt test entropy: %s\", (float(sum_entropy) / len(t_dataset))\n )\n if return_logits:\n return float(n_correct) / len(t_dataset), {\n \"output_logits\": torch.cat(all_logits),\n \"calibrate_logits\": calibrate_logits,\n \"labels\": all_labels,\n }\n return float(n_correct) / len(t_dataset), None" }, { "identifier": "PromptedClassificationDataset", "path": "utils/fsc_datasets.py", "snippet": "class PromptedClassificationDataset:\n def __init__(self, args):\n self.args = args\n self.glue_list = ['sst2', 'rte', 'mrpc', 'qqp', 'mnli', 'qnli']\n self.superglue_list = ['cb', 'copa', 'boolq', 'wic', 'wsc']\n self.nli_3_list = ['mnli', 'xnli', 'anli', 'cb', 'snli']\n if 'xnli' in args['dataset_name']:\n split = self.args['dataset_name'].split('_')[1]\n self.dataset = datasets.load_dataset('xnli', split)\n elif args['dataset_name'] in self.glue_list:\n self.dataset = datasets.load_dataset('glue', args['dataset_name'])\n elif 'anli' in args['dataset_name']:\n self.dataset = datasets.load_dataset('anli')\n elif args['dataset_name'] in self.superglue_list:\n self.dataset = datasets.load_dataset('super_glue', args['dataset_name'])\n elif 'rl' in args['dataset_name']:\n pass\n else:\n self.dataset = datasets.load_dataset(args['dataset_name'])\n def get_few_shot_dataset(self, shots: int) -> tuple:\n \"\"\"\n Retrieves a few-shot dataset by selecting a specified number of instances per class from the given dataset.\n \n Args:\n dataset (dict): A dictionary containing the dataset split into \"train\", \"validation\", and \"test\" subsets.\n shots (int): The number of instances to select per class for the few-shot dataset.\n \n Returns:\n tuple: The few-shot training dataset, the original validation dataset, and the original test dataset.\n \"\"\"\n \n if self.args['dataset_name'] == 'mnli':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation_matched']\n test_dataset = self.dataset['test_matched']\n elif self.args['dataset_name'] == 'yelp_polarity' or self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'SetFit/CR' or self.args['dataset_name'] == 'yelp_review_full':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['train']\n test_dataset = self.dataset['test']\n elif 'rl' in self.args['dataset_name']:\n train_dataset = get_rl_data('train', self.args['dataset_name'], self.args['seed'])\n val_dataset = get_rl_data('dev', self.args['dataset_name'], self.args['seed'])\n test_dataset = get_rl_data('test', self.args['dataset_name'], self.args['seed'])\n train_dataset = [x for x in train_dataset]\n val_dataset = [x for x in val_dataset]\n return train_dataset, val_dataset, test_dataset\n elif self.args['dataset_name'] == 'snli':\n train_dataset = [x for x in self.dataset['train'] if x['label'] != -1]\n val_dataset = [x for x in self.dataset['validation'] if x['label'] != -1]\n test_dataset = [x for x in self.dataset['test'] if x['label'] != -1]\n else:\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation']\n test_dataset = self.dataset['test']\n\n train_0 = [x for x in train_dataset if x['label'] == 0][:shots]\n train_1 = [x for x in train_dataset if x['label'] == 1][:shots]\n train_2 = [x for x in train_dataset if x['label'] == 2][:shots]\n train_3 = [x for x in train_dataset if x['label'] == 3][:shots]\n train_4 = [x for x in train_dataset if x['label'] == 4][:shots]\n train_dataset = train_0 + train_1 + train_2 + train_3 + train_4\n if self.args['dataset_name'] in self.glue_list or self.args['dataset_name'] in self.superglue_list:\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n new_val_dataset = val_0 + val_1 + val_2\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n elif self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'yele_review_full':\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n val_3 = [x for x in train_dataset if x['label'] == 3][-shots:]\n val_4 = [x for x in train_dataset if x['label'] == 4][-shots:]\n new_val_dataset = val_0 + val_1 + val_2 + val_3 + val_4\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n \n val_0 = [x for x in val_dataset if x['label'] == 0][:shots]\n val_1 = [x for x in val_dataset if x['label'] == 1][:shots]\n val_2 = [x for x in val_dataset if x['label'] == 2][:shots]\n val_dataset = val_0 + val_1 + val_2\n print('train_dataset', train_dataset)\n return train_dataset, val_dataset, test_dataset\n\n def get_verbalizer(self) -> list:\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n verbalizer_predefined = ['yes', 'maybe', 'no']\n elif self.args['dataset_name'] == 'sst2' or self.args['dataset_name'] == 'yelp_polarity':\n verbalizer_predefined = ['negative', 'positive']\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'qnli':\n verbalizer_predefined = ['yes', 'no']\n elif self.args['dataset_name'] == 'mrpc' or self.args['dataset_name'] == 'qqp':\n verbalizer_predefined = ['no', 'yes']\n elif self.args['dataset_name'] == 'boolq':\n verbalizer_predefined = ['no', 'yes']\n elif 'indonlp/NusaX-senti' in self.args['dataset_name']:\n verbalizer_predefined = ['negative', 'neutral', 'positive']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Technology']\n\n special_space = '▁'\n binary_list = ['SetFit/sst2', 'yelp_polarity', 'SetFit/CR', 'rotten_tomatoes']\n rl_binary_list = ['rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-yelp-2']\n if 'bert' in self.args['model_name']:\n special_space = 'Ġ'\n if self.args['dataset_name'] in binary_list:\n verbalizer_predefined = ['terrible', 'great']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Tech']\n elif self.args['dataset_name'] == 'SetFit/sst5' or self.args['dataset_name'] == 'yelp_review_full':\n verbalizer_predefined = ['terrible', 'bad', 'okay', 'good', 'great']\n elif self.args['dataset_name'] in rl_binary_list:\n verbalizer_predefined = ['terrible', 'great']\n\n verbalizer_predefined = [special_space + v for v in verbalizer_predefined]\n return verbalizer_predefined\n \n def get_data(self, data) -> tuple:\n text_label_list = ['yelp_polarity', 'ag_news', 'SetFit/sst5', 'SetFit/CR', 'rotten_tomatoes', \"SetFit/sst2\", 'yelp_review_full']\n rl_list = ['rl-agnews', 'rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-sst-5', 'rl-yelp-2', 'rl-yelp-5']\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n return [d[\"premise\"] for d in data], [d[\"hypothesis\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'sst2':\n return [d[\"sentence\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'mrpc':\n return [d[\"sentence1\"] for d in data], [d[\"sentence2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qnli':\n return [d[\"question\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qqp':\n return [d[\"question1\"] for d in data], [d[\"question2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'boolq':\n return [d[\"question\"] for d in data], [d[\"passage\"] for d in data], [d[\"label\"] for d in data]\n elif 'indonlp/NusaX-senti' in self.args['dataset_name'] or self.args['dataset_name'] in text_label_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] in rl_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]" }, { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: List[str],\n reward_type: str = \"entropy\",\n compute_zscore: bool = True,\n incorrect_coeff: float = 180.0, # lambda_1 in paper\n correct_coeff: float = 200.0, # lambda_2 in paper\n use_bn_calibration: bool = False,\n bn_calibrator: Optional[BatchNormCalibrate] = None,\n template: Optional[str] = None,\n gpu_id: Optional[int] = None,\n ):\n \"\"\"\n Few shot text classification reward (adapted from RLPrompt repository)\n Args:\n task_lm: the string specifying the language model type of the task LM\n is_mask_lm: bool. Whether the LM is masked, or left-to-right.\n compute_zscore: bool. Whether do reward normalization by normalizing the\n mean and standard deviation across the batch.\n incorrect_coeff, correct_coeff:\n num_classes: number of classes in the labels\n verbalizers: a list of verbalizers (for e.g., for sentiment classification)\n reward_type: the type of the reward.\n \"gap\" -- use the one proposed in RLPrompt\n \"ll\" -- use the usual cross entropy loss\n template: the template to organize the queries and prompts.\n default one is [Input][Prompt][MASK].\n default template is adopted when it is not specified.\n bn_calibrator: an optional batch norm calibrator. When provided,\n in inference mode the logits will be first normalised by it first. The\n calibrator must be initialized when passed to this class.\n This class essentially provides the objective function for BO/RL/any other\n prompt optimizer.\n \"\"\"\n super().__init__()\n if torch.cuda.is_available():\n if gpu_id:\n self.device = torch.device(f\"cuda:{gpu_id}\")\n else:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n # self.device = torch.device(\"cpu\")\n self.args = args\n self.task_lm = task_lm\n if is_mask_lm is None:\n # If False, then treat as left-to-right LM\n self.is_mask_lm = True if \"bert\" in self.task_lm else False\n else:\n self.is_mask_lm = is_mask_lm\n assert reward_type in [\"gap\", \"cross_entropy\", \"entropy\"]\n self.reward_type = reward_type\n print(\"Task LM:\", self.task_lm)\n if self.is_mask_lm:\n assert self.task_lm in SUPPORTED_MASK_LMS\n self._tokenizer = AutoTokenizer.from_pretrained(self.task_lm)\n self._generator = AutoModelForMaskedLM.from_pretrained(self.task_lm).to(\n self.device\n )\n else:\n self._generator = T5ForConditionalGeneration.from_pretrained(\n self.task_lm\n ).to(self.device)\n self._tokenizer = AutoTokenizer.from_pretrained(\n self.task_lm, use_fast=False\n )\n\n self.compute_zscore = compute_zscore\n self.incorrect_coeff = incorrect_coeff\n self.correct_coeff = correct_coeff\n self.num_classes = num_classes\n print(\"Num classes:\", self.num_classes)\n self.verbalizers = verbalizers\n print(\"Verbalizers:\", self.verbalizers)\n self.verbalizer_ids = [\n self._tokenizer.convert_tokens_to_ids(v) for v in self.verbalizers\n ]\n print(\"Verbalizer ids:\", self.verbalizer_ids)\n if template is None:\n self.template = self.load_default_template() # prompt templates\n else:\n self.template = template\n self.use_bn_calibration = use_bn_calibration\n self.bn_calibrator = bn_calibrator\n self._counter = 0\n\n def to(self, device):\n self._generator.to(device)\n\n def load_default_template(self) -> List[str]:\n template_dict = {\n \"xnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \", \n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"mnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"snli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \",\n ],\n \"rte\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Textual Entailment: \",\n ],\n \"sst2\": [\n \" {prompt}. Sentence: {sentence_1}, Sentiment: \",\n ],\n \"mrpc\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"qnli\": [\n \" {prompt}. Question: {sentence_1}, Sentence: {sentence_2}, Entailment: \",\n ],\n \"qqp\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"ag_news\": [\n \" {prompt}. Classify the news articles into the categories of World, Sports, Business, and Technology. {sentence_1}: \",\n \"{prompt}\\n\\n{sentence_1}\\n\\nWhich topic is this article about?\\nWorld, Sports, Business, Technology, \",\n ],\n }\n if \"anli\" in self.args[\"dataset_name\"]:\n template = template_dict[\"anli\"][self.args[\"template_id\"]]\n elif (\n \"xnli\" in self.args[\"dataset_name\"]\n or \"americas_nli\" in self.args[\"dataset_name\"]\n ):\n template = template_dict[\"xnli\"][self.args[\"template_id\"]]\n else:\n if self.args[\"dataset_name\"] in template_dict:\n template = template_dict[self.args[\"dataset_name\"]][\n self.args[\"template_id\"]\n ]\n if self.is_mask_lm:\n mask_token = self._tokenizer.mask_token\n print(mask_token)\n simple_list = [\"SetFit/sst2\", \"SetFit/CR\", \"rotten_tomatoes\", \"SetFit/sst5\"]\n long_list = [\"yelp_polarity\", \"yelp_review_full\"]\n hard_list = [\"ag_news\"]\n rl_list = [\n \"rl-agnews\",\n \"rl-cr\",\n \"rl-mr\",\n \"rl-sst-2\",\n \"rl-sst-5\",\n \"rl-yelp-2\",\n \"rl-yelp-5\",\n ]\n if self.args[\"dataset_name\"] in simple_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n elif self.args[\"dataset_name\"] in long_list:\n template = f\" {{prompt}} It was {mask_token}. {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in hard_list:\n template = f\" {{prompt}} {mask_token} News: {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in rl_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n return template\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n return self.forward(*args, **kwds)\n\n def forward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n # output_token: Union[List[str], str],\n to_tensor: bool,\n mode: str = \"train\",\n verbose: bool = True,\n accumulate_class: bool = False,\n ) -> Tuple[Union[List[float], torch.Tensor], Dict[str, Any]]:\n \"\"\"\n This computes the reward of the current prompt.\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n assert mode in [\"train\", \"infer\"]\n if mode == \"train\":\n self._counter += 1\n\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n accs: List[float] = []\n confs: List[float] = []\n entropies: List[float] = []\n class_logits: List[torch.Tensor] = []\n\n counter_list = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n quantities_to_log = {}\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n (\n reward,\n acc,\n correct_predictions,\n conf,\n entropy,\n class_logit,\n ) = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n bn_calibrator=self.bn_calibrator if self.use_bn_calibration else None,\n )\n\n rewards.append(reward)\n accs.append(acc.item())\n confs.append(conf.item())\n entropies.append(entropy.item())\n counter_list.append(correct_predictions)\n class_logits.append(class_logit)\n\n # keep track of rewards for z-score normalization\n input_rewards[\"z\"] += [reward.item()]\n\n # Print examples\n if verbose:\n print_strs = [\n \"Accuracy:\",\n acc.item(),\n \"|\",\n \"Reward:\",\n round(reward.item(), 2),\n ]\n print(*print_strs)\n rewards_tensor = torch.stack(rewards)\n accs_tensor = torch.tensor(accs)\n confs_tensor = torch.tensor(confs)\n entropies_tensor = torch.tensor(entropies)\n # compute the expected calibration error (ECE) by accs_tensor and confs_tensor\n ece = torch.abs(accs_tensor - confs_tensor).mean()\n\n # z-score normalization (2nd stage)\n if mode == \"train\" and self.compute_zscore:\n input_reward_means = {k: np.mean(v) for k, v in input_rewards.items()}\n input_reward_stds = {k: np.std(v) for k, v in input_rewards.items()}\n # not source strings\n idx_means = torch.tensor(input_reward_means[\"z\"]).float()\n idx_stds = torch.tensor(input_reward_stds[\"z\"]).float()\n rewards_tensor = (rewards_tensor - idx_means) / (idx_stds + 1e-4)\n quantities_to_log[prompt_strings[i]][\"resized_reward\"] = []\n for i in range(rewards_tensor.size(0)):\n quantities_to_log[prompt_strings[i]][\"resized_reward\"].append(\n rewards_tensor[i].item()\n )\n elif mode == \"infer\": # Optional: Predict Val Prompts\n score = rewards_tensor.mean().item()\n if verbose:\n print(f\"Our prompt: {prompt_strings}. Score={score}. Acc={acc}\")\n for pt in prompt_strings:\n print(self._tokenizer.tokenize(pt))\n print(accumulate_class)\n print(\"counter_list\", counter_list)\n print(\"ece\", ece)\n if accumulate_class:\n return (\n prompt_strings,\n rewards_tensor,\n accs_tensor,\n counter_list,\n ece,\n entropies_tensor,\n class_logits, # <- list of tensors. n elements = n prompts\n )\n else:\n return prompt_strings, rewards_tensor, accs_tensor\n\n if to_tensor is True:\n return rewards_tensor, accs_tensor, quantities_to_log\n else:\n return rewards_tensor.tolist(), accs, quantities_to_log\n\n def kl_divergence_row_by_row(self, p, q):\n kl_div = torch.sum(p * torch.log(p / q), dim=1)\n return kl_div\n\n def compute_default_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the probs of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_probs = _compute_probs(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_probs\n\n def compute_default_reward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the rewards of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_reward, _, _, _, _, _ = _compute_reward(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_reward\n\n def compute_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_probs: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_probs = _compute_probs(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n kl = self.kl_divergence_row_by_row(prompt_probs, default_probs)\n kl = torch.sum(kl)\n rewards.append(kl)\n kl_tensor = torch.stack(rewards)\n return kl_tensor\n\n def compute_reward_diff(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_rewards: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_rewards, _, _, _, _, _ = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n reward_diff = prompt_rewards - default_rewards\n reward_diff = torch.sum(reward_diff)\n rewards.append(reward_diff)\n reward_diff_tensor = torch.stack(rewards)\n return reward_diff_tensor\n\n # Adapted from\n # https://huggingface.co/docs/transformers/v4.21.1/en/task_summary#masked-language-modeling\n def _get_mask_token_index(self, input_ids: torch.Tensor) -> np.ndarray:\n mask_token_index = torch.where(input_ids == self._tokenizer.mask_token_id)[1]\n return mask_token_index\n\n def ensure_exactly_one_mask_token(\n self, model_inputs: Dict[str, torch.Tensor]\n ) -> None:\n for input_ids in model_inputs[\"input_ids\"]:\n masked_index = self._get_mask_token_index(input_ids)\n numel = np.prod(masked_index.shape)\n assert numel == 1\n\n @torch.no_grad()\n def _get_logits(self, texts: List[str]) -> torch.Tensor:\n # for MLM, add mask token\n batch_size = len(texts)\n encoded_inputs = self._tokenizer(\n texts,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\",\n add_special_tokens=True,\n )\n decoder_input_ids = (\n torch.ones((batch_size, 1)) * torch.tensor(self._tokenizer.pad_token_id)\n ).int()\n if self.is_mask_lm:\n # self.ensure_exactly_one_mask_token(encoded_inputs) TODO\n token_logits = self._generator(**encoded_inputs.to(self.device)).logits\n mask_token_indices = self._get_mask_token_index(encoded_inputs[\"input_ids\"])\n out_logits = token_logits[range(batch_size), mask_token_indices, :]\n return out_logits\n else:\n token_logits = self._generator(\n input_ids=encoded_inputs[\"input_ids\"].to(self.device),\n decoder_input_ids=decoder_input_ids.to(self.device),\n ).logits\n token_logits = token_logits[:, 0, :]\n return token_logits\n\n def _convert_tokens_to_string(self, tokens: List[List[str]]) -> List[str]:\n return [self._tokenizer.convert_tokens_to_string(s) for s in tokens]\n\n def _format_prompts(\n self,\n source_strs: List[str],\n source_2_strs: List[str],\n prompt_strs: List[str],\n ) -> List[str]:\n return [\n self.template.format(sentence_1=s_1, sentence_2=s_2, prompt=p)\n for s_1, s_2, p in zip(source_strs, source_2_strs, prompt_strs)\n ]" } ]
import random import numpy as np from typing import Any from .base_trainer import BaseTrainer from utils.fsc_datasets import PromptedClassificationDataset from rewards.text_classification_reward import PromptedClassificationReward
11,341
class Genetics: def __init__(self, crossover_tokenizer, vocab_id): self.crossover_tokenizer = crossover_tokenizer self.vocab_id = vocab_id def mutate(self, x, prob=0.1): """ Mutates the input string by replacing tokens with a certain probability. Args: x (str): The input string. prob (float, optional): The probability of replacing each token. Defaults to 0.1. Returns: str: The mutated string. """ x_list = self.crossover_tokenizer.encode(x) def pick_another(x_, candidates): return ( x_ if len(candidates) == 1 else random.choice([v for v in candidates if v != x_]) ) for i, element in enumerate(x_list): if i == 0 or i == len(x_list) - 1: continue if random.random() < prob: x_list[i] = pick_another(element, self.vocab_id) out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True) return out def crossover(self, x1, x2): """ Performs crossover between two input strings. Args: x1 (str): The first input string. x2 (str): The second input string. Returns: str: The crossover result. """ def _crossover_helper(v1, v2): return v1 if random.random() < 0.5 else v2 def _inbalance_helper(v1, v2): n_tokens = min(len(v1), len(v2)) max_n = max(len(v1), len(v2)) out_token = [] for i in range(n_tokens): out_token.append(v1[i] if random.random() < 0.5 else v2[i]) for i in range(n_tokens, max_n): out_token.append(v1[i] if len(v1) > n_tokens else v2[i]) return out_token x1_tokens = self.crossover_tokenizer.encode(x1) x2_tokens = self.crossover_tokenizer.encode(x2) x = _crossover_helper(x1_tokens, x2_tokens) ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True) return ret def random_string(self, length=5): """ Generates a random string of a specified length. Args: length (int, optional): The length of the random string. Defaults to 5. Returns: str: The random string. """ choices = self.vocab_id out = random.choices(choices, k=length) out = self.crossover_tokenizer.decode(out, skip_special_tokens=True) return out def random_extend_pop(self, pop: list, n: int) -> list: """ Extends the population with random strings. Args: pop (list): The population. n (int): The number of random strings to generate. Returns: list: The extended population. """ pop = [p + self.random_string(n) for p in pop] return pop
class Genetics: def __init__(self, crossover_tokenizer, vocab_id): self.crossover_tokenizer = crossover_tokenizer self.vocab_id = vocab_id def mutate(self, x, prob=0.1): """ Mutates the input string by replacing tokens with a certain probability. Args: x (str): The input string. prob (float, optional): The probability of replacing each token. Defaults to 0.1. Returns: str: The mutated string. """ x_list = self.crossover_tokenizer.encode(x) def pick_another(x_, candidates): return ( x_ if len(candidates) == 1 else random.choice([v for v in candidates if v != x_]) ) for i, element in enumerate(x_list): if i == 0 or i == len(x_list) - 1: continue if random.random() < prob: x_list[i] = pick_another(element, self.vocab_id) out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True) return out def crossover(self, x1, x2): """ Performs crossover between two input strings. Args: x1 (str): The first input string. x2 (str): The second input string. Returns: str: The crossover result. """ def _crossover_helper(v1, v2): return v1 if random.random() < 0.5 else v2 def _inbalance_helper(v1, v2): n_tokens = min(len(v1), len(v2)) max_n = max(len(v1), len(v2)) out_token = [] for i in range(n_tokens): out_token.append(v1[i] if random.random() < 0.5 else v2[i]) for i in range(n_tokens, max_n): out_token.append(v1[i] if len(v1) > n_tokens else v2[i]) return out_token x1_tokens = self.crossover_tokenizer.encode(x1) x2_tokens = self.crossover_tokenizer.encode(x2) x = _crossover_helper(x1_tokens, x2_tokens) ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True) return ret def random_string(self, length=5): """ Generates a random string of a specified length. Args: length (int, optional): The length of the random string. Defaults to 5. Returns: str: The random string. """ choices = self.vocab_id out = random.choices(choices, k=length) out = self.crossover_tokenizer.decode(out, skip_special_tokens=True) return out def random_extend_pop(self, pop: list, n: int) -> list: """ Extends the population with random strings. Args: pop (list): The population. n (int): The number of random strings to generate. Returns: list: The extended population. """ pop = [p + self.random_string(n) for p in pop] return pop
class GeneticAlgorithmTrainer(BaseTrainer):
0
2023-10-08 12:39:44+00:00
16k
clessig/atmorep
atmorep/core/trainer.py
[ { "identifier": "AtmoRep", "path": "atmorep/core/atmorep_model.py", "snippet": "class AtmoRep( torch.nn.Module) :\n\n def __init__(self, cf) :\n '''Constructor'''\n \n super( AtmoRep, self).__init__()\n\n self.cf = cf\n\n ###################################################\n def create( self, devices, load_pretrained=True) :\n '''Create network'''\n\n cf = self.cf\n self.devices = devices\n size_token_info = 6\n self.fields_coupling_idx = []\n\n self.fields_index = {}\n for ifield, field_info in enumerate(cf.fields) :\n self.fields_index[ field_info[0] ] = ifield \n \n # # embedding network for global/auxiliary token infos\n # TODO: only for backward compatibility, remove\n self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)\n torch.nn.init.constant_( self.embed_token_info.weight, 0.0)\n\n self.embeds_token_info = torch.nn.ModuleList()\n for ifield, field_info in enumerate( cf.fields) :\n \n self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net))\n \n if len(field_info[1]) > 4 and load_pretrained :\n # TODO: inconsistent with embeds_token_info -> version that can handle both\n # we could imply use the file name: embed_token_info vs embeds_token_info\n name = 'AtmoRep' + '_embed_token_info'\n mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1]))\n self.embeds_token_info[-1].load_state_dict( mloaded)\n print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) )\n else :\n # initalization\n torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0)\n self.embeds_token_info[-1].bias.data.fill_(0.0)\n\n # embedding and encoder\n\n self.embeds = torch.nn.ModuleList()\n self.encoders = torch.nn.ModuleList()\n self.masks = torch.nn.ParameterList()\n\n for field_idx, field_info in enumerate(cf.fields) : \n\n # learnabl class token\n if cf.learnable_mask :\n mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True))\n self.masks.append( mask.to(devices[0]))\n else :\n self.masks.append( None)\n\n # encoder\n self.encoders.append( TransformerEncoder( cf, field_idx, True).create())\n # load pre-trained model if specified\n if len(field_info[1]) > 4 and load_pretrained :\n self.load_block( field_info, 'encoder', self.encoders[-1])\n self.embeds.append( self.encoders[-1].embed)\n\n # indices of coupled fields for efficient access in forward\n self.fields_coupling_idx.append( [field_idx])\n for field_coupled in field_info[1][2] : \n if 'axial' in cf.encoder_att_type :\n self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] )\n else :\n for _ in range(cf.coupling_num_heads_per_field) :\n self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] )\n\n # decoder \n\n self.decoders = torch.nn.ModuleList()\n self.field_pred_idxs = []\n for field in cf.fields_prediction :\n\n for ifield, field_info in enumerate(cf.fields) : \n if field_info[0] == field[0] :\n self.field_pred_idxs.append( ifield)\n break\n\n self.decoders.append( TransformerDecoder( cf, field_info ) )\n # load pre-trained model if specified\n if len(field_info[1]) > 4 and load_pretrained :\n self.load_block( field_info, 'decoder', self.decoders[-1])\n\n # tail networks\n \n self.tails = torch.nn.ModuleList()\n for ifield, field in enumerate(cf.fields_prediction) :\n\n field_idx = self.field_pred_idxs[ifield]\n field_info = cf.fields[field_idx]\n self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create())\n # load pre-trained model if specified\n if len(field_info[1]) > 4 and load_pretrained:\n self.load_block( field_info, 'tail', self.tails[-1])\n\n # set devices\n\n for field_idx, field_info in enumerate(cf.fields) :\n # find determined device, use default if nothing specified\n device = self.devices[0]\n if len(field_info[1]) > 3 :\n assert field_info[1][3] < 4, 'Only single node model parallelism supported'\n assert field_info[1][3] < len(devices), 'Per field device id larger than max devices'\n device = self.devices[ field_info[1][3] ]\n # set device\n if self.masks[field_idx] != None :\n self.masks[field_idx].to(device)\n self.embeds[field_idx].to(device)\n self.encoders[field_idx].to(device)\n\n for field_idx, field in enumerate(cf.fields_prediction) :\n field_info = cf.fields[ self.field_pred_idxs[field_idx] ]\n device = self.devices[0]\n if len(field_info[1]) > 3 :\n device = self.devices[ field_info[1][3] ]\n self.decoders[field_idx].to(device)\n self.tails[field_idx].to(device)\n\n # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal\n self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove\n self.embeds_token_info.to(devices[0])\n\n self.checkpoint = identity\n if cf.grad_checkpointing :\n self.checkpoint = checkpoint_wrapper\n\n return self\n\n ###################################################\n def load_block( self, field_info, block_name, block ) :\n\n # name = self.__class__.__name__ + '_' + block_name + '_' + field_info[0]\n name = 'AtmoRep_' + block_name + '_' + field_info[0]\n\n b_loaded = torch.load( get_model_filename(name, field_info[1][4][0], field_info[1][4][1]))\n\n # in coupling mode, proj_out of attention heads needs separate treatment: only the pre-trained\n # part can be loaded\n keys_del = []\n for name, param in block.named_parameters():\n if 'proj_out' in name :\n for k in b_loaded.keys() :\n if name == k :\n if param.shape[0] != param.shape[1] : # non-square proj_out indicate deviation from pre-training\n with torch.no_grad() :\n # load pre-trained part\n param[ : , : b_loaded[k].shape[1] ] = b_loaded[k]\n # initalize remaining part to small random value\n param[ : , b_loaded[k].shape[1] : ] = 0.01 * torch.rand( param.shape[0],\n param.shape[1] - b_loaded[k].shape[1])\n keys_del += [ k ]\n for k in keys_del :\n del b_loaded[k]\n\n # use strict=False so that differing blocks, e.g. through coupling, are ignored\n mkeys, _ = block.load_state_dict( b_loaded, False)\n\n # missing keys = keys that are not pre-trained are initalized to small value\n [mkeys.remove(k) for k in keys_del] # remove proj_out keys so that they are not over-written\n [utils.init_weights_uniform( block.state_dict()[k], 0.01) for k in mkeys]\n\n print( 'Loaded {} for {} from id = {} (ignoring/missing {} elements).'.format( block_name,\n field_info[0], field_info[1][4][0], len(mkeys) ) )\n\n ###################################################\n @staticmethod\n def load( model_id, devices, cf = None, epoch = -2, load_pretrained=False) :\n '''Load network from checkpoint'''\n\n if not cf : \n cf = utils.Config()\n cf.load_json( model_id)\n\n model = AtmoRep( cf).create( devices, load_pretrained=False)\n mloaded = torch.load( utils.get_model_filename( model, model_id, epoch) )\n mkeys, _ = model.load_state_dict( mloaded, False )\n\n if len(mkeys) > 0 :\n print( f'Loaded AtmoRep: ignoring {len(mkeys)} elements: {mkeys}')\n\n # TODO: remove, only for backward \n if model.embeds_token_info[0].weight.abs().max() == 0. :\n model.embeds_token_info = torch.nn.ModuleList()\n\n return model\n \n ###################################################\n def save( self, epoch = -2) :\n '''Save network '''\n\n # save entire network\n torch.save( self.state_dict(), utils.get_model_filename( self, self.cf.wandb_id, epoch) )\n\n # save parts also separately\n\n # name = self.__class__.__name__ + '_embed_token_info'\n # torch.save( self.embed_token_info.state_dict(),\n # utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n name = self.__class__.__name__ + '_embeds_token_info'\n torch.save( self.embeds_token_info.state_dict(),\n utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n for ifield, enc in enumerate(self.encoders) :\n name = self.__class__.__name__ + '_encoder_' + self.cf.fields[ifield][0]\n torch.save( enc.state_dict(), utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n for ifield, dec in enumerate(self.decoders) :\n name = self.__class__.__name__ + '_decoder_' + self.cf.fields_prediction[ifield][0]\n torch.save( dec.state_dict(), utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n for ifield, tail in enumerate(self.tails) :\n name = self.__class__.__name__ + '_tail_' + self.cf.fields_prediction[ifield][0]\n torch.save( tail.state_dict(), utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n ###################################################\n def forward( self, xin) :\n '''Evaluate network'''\n\n # embedding\n cf = self.cf\n fields_embed = self.get_fields_embed(xin)\n \n # attention maps (if requested)\n atts = [ [] for _ in cf.fields ]\n\n # encoder\n embeds_layers = [[] for i in self.field_pred_idxs]\n for ib in range(self.cf.encoder_num_layers) :\n fields_embed, att = self.forward_encoder_block( ib, fields_embed) \n [embeds_layers[idx].append( fields_embed[i]) for idx,i in enumerate(self.field_pred_idxs)]\n [atts[i].append( att[i]) for i,_ in enumerate(cf.fields) ]\n \n # encoder-decoder coupling / token transformations\n (decoders_in, embeds_layers) = self.encoder_to_decoder( embeds_layers)\n\n preds = []\n for idx,i in enumerate(self.field_pred_idxs) :\n \n # decoder\n token_seq_embed, att = self.decoders[idx]( (decoders_in[idx], embeds_layers[idx]) )\n \n # tail net\n tail_in = self.decoder_to_tail( idx, token_seq_embed)\n pred = self.checkpoint( self.tails[idx], tail_in)\n \n preds.append( pred)\n [atts[i].append( a) for a in att]\n\n return preds, atts\n\n ###################################################\n def forward_encoder_block( self, iblock, fields_embed) :\n ''' evaluate one block (attention and mlp) '''\n\n # double buffer for commutation-invariant result (w.r.t evaluation order of transformers)\n fields_embed_cur, atts = [], []\n\n # attention heads\n for ifield in range( len(fields_embed)) :\n d = fields_embed[ifield].device\n fields_in =[fields_embed[i].to(d,non_blocking=True) for i in self.fields_coupling_idx[ifield]]\n # unpack list in argument for checkpointing\n y, att = self.checkpoint( self.encoders[ifield].heads[iblock], *fields_in)\n fields_embed_cur.append( y)\n atts.append( att)\n \n # MLPs \n for ifield in range( len(fields_embed)) :\n fields_embed_cur[ifield] = self.checkpoint( self.encoders[ifield].mlps[iblock], \n fields_embed_cur[ifield] )\n \n return fields_embed_cur, atts\n\n ###################################################\n \n def get_fields_embed( self, xin ) :\n cf = self.cf\n if 0 == len(self.embeds_token_info) : # TODO: only for backward compatibility, remove\n emb_net_ti = self.embed_token_info\n return [prepare_token( field_data, emb_net, emb_net_ti, cf.with_cls )\n for fidx,(field_data,emb_net) in enumerate(zip( xin, self.embeds))]\n else :\n embs_net_ti = self.embeds_token_info\n return [prepare_token( field_data, emb_net, embs_net_ti[fidx], cf.with_cls )\n for fidx,(field_data,emb_net) in enumerate(zip( xin, self.embeds))]\n \n ###################################################\n\n def get_attention( self, xin) : \n\n cf = self.cf\n attn = []\n fields_embed = self.get_fields_embed(xin)\n #either accumulated attention or last layer attention:\n blocks = list(range(self.cf.encoder_num_layers)) if cf.attention_mode == 'accum' else [self.cf.encoder_num_layers-1]\n for idx, ifield in enumerate(self.field_pred_idxs) : \n d = fields_embed[ifield].device\n fields_in =[fields_embed[i].to(d,non_blocking=True) for i in self.fields_coupling_idx[ifield]]\n attn_field = self.encoders[ifield].heads[blocks[0]].get_attention(fields_in)\n if cf.attention_mode == 'accum':\n for iblock in blocks[1:]:\n attn_layer = self.encoders[ifield].heads[iblock].get_attention(fields_in)\n attn_field = attn_field + attn_layer\n attn_field = torch.sum(attn_field, dim = 0, keepdim=True)\n attn.append(attn_field)\n# print(\"att FINAL\", ifield, len(attn), attn[0].shape)\n return attn" }, { "identifier": "AtmoRepData", "path": "atmorep/core/atmorep_model.py", "snippet": "class AtmoRepData( torch.nn.Module) :\n\n def __init__( self, net) :\n '''Wrapper class for AtmoRep that handles data loading'''\n\n super( AtmoRepData, self).__init__()\n \n self.data_loader_test = None\n self.data_loader_train = None\n self.data_loader_iter = None\n\n self.net = net\n\n # ensure that all data loaders have the same seed and hence load the same data\n self.rng_seed = net.cf.rng_seed \n if not self.rng_seed :\n self.rng_seed = int(torch.randint( 100000000, (1,))) \n \n ###################################################\n def load_data( self, mode : NetMode, batch_size = -1, num_loader_workers = -1) :\n '''Load data'''\n\n cf = self.net.cf\n \n if batch_size < 0 :\n batch_size = cf.batch_size_max\n if num_loader_workers < 0 :\n num_loader_workers = cf.num_loader_workers\n\n if mode == NetMode.train :\n self.data_loader_train = self._load_data( self.dataset_train, batch_size, num_loader_workers)\n elif mode == NetMode.test :\n batch_size = cf.batch_size_test\n self.data_loader_test = self._load_data( self.dataset_test, batch_size, num_loader_workers)\n else : \n assert False\n\n ###################################################\n def _load_data( self, dataset, batch_size, num_loader_workers) :\n '''Private implementation for load'''\n\n dataset.load_data( batch_size)\n\n loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, \n 'num_workers': num_loader_workers, 'pin_memory': True}\n data_loader = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) \n\n return data_loader\n\n ###################################################\n def set_data( self, mode : NetMode, times_pos, batch_size = -1, num_loader_workers = -1) :\n\n cf = self.net.cf\n if batch_size < 0 :\n batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test\n \n dataset = self.dataset_train if mode == NetMode.train else self.dataset_test\n dataset.set_data( times_pos, batch_size)\n\n self._set_data( dataset, mode, batch_size, num_loader_workers)\n\n ###################################################\n def set_global( self, mode : NetMode, times, batch_size = -1, num_loader_workers = -1) :\n\n cf = self.net.cf\n if batch_size < 0 :\n batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test\n \n dataset = self.dataset_train if mode == NetMode.train else self.dataset_test\n dataset.set_global( times, batch_size, cf.token_overlap)\n\n self._set_data( dataset, mode, batch_size, num_loader_workers)\n\n ###################################################\n def set_location( self, mode : NetMode, pos, years, months, num_t_samples_per_month, \n batch_size = -1, num_loader_workers = -1) :\n\n cf = self.net.cf\n if batch_size < 0 :\n batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test\n \n dataset = self.dataset_train if mode == NetMode.train else self.dataset_test\n dataset.set_location( pos, years, months, num_t_samples_per_month, batch_size)\n\n self._set_data( dataset, mode, batch_size, num_loader_workers)\n\n ###################################################\n def _set_data( self, dataset, mode : NetMode, batch_size = -1, loader_workers = -1) :\n '''Private implementation for set_data, set_global'''\n\n cf = self.net.cf\n if loader_workers < 0 :\n loader_workers = cf.num_loader_workers\n\n loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, \n 'num_workers': loader_workers, 'pin_memory': True}\n \n if mode == NetMode.train :\n self.data_loader_train = torch.utils.data.DataLoader( dataset, **loader_params, \n sampler = None)\n elif mode == NetMode.test :\n self.data_loader_test = torch.utils.data.DataLoader( dataset, **loader_params, \n sampler = None)\n else :\n assert False\n\n ###################################################\n def normalizer( self, field, vl_idx) :\n\n if isinstance( field, str) :\n for fidx, field_info in enumerate(self.cf.fields) :\n if field == field_info[0] :\n break\n assert fidx < len(self.cf.fields), 'invalid field'\n normalizer = self.dataset_train.datasets[fidx].normalizer\n\n elif isinstance( field, int) :\n normalizer = self.dataset_train.datasets[field][vl_idx].normalizer\n\n else :\n assert False, 'invalid argument type (has to be index to cf.fields or field name)'\n\n return normalizer\n\n ###################################################\n def mode( self, mode : NetMode) :\n \n if mode == NetMode.train :\n self.data_loader_iter = iter(self.data_loader_train)\n self.net.train()\n elif mode == NetMode.test :\n self.data_loader_iter = iter(self.data_loader_test)\n self.net.eval()\n else :\n assert False\n\n self.cur_mode = mode\n\n ###################################################\n def len( self, mode : NetMode) :\n if mode == NetMode.train :\n return len(self.data_loader_train)\n elif mode == NetMode.test :\n return len(self.data_loader_test)\n else :\n assert False\n\n ###################################################\n def next( self) :\n return next(self.data_loader_iter)\n\n ###################################################\n def forward( self, xin) :\n pred = self.net.forward( xin)\n return pred\n\n ###################################################\n def get_attention( self, xin): #, field_idx) :\n attn = self.net.get_attention( xin) #, field_idx)\n return attn\n\n ###################################################\n def create( self, pre_batch, devices, create_net = True, pre_batch_targets = None,\n load_pretrained=True) :\n\n if create_net :\n self.net.create( devices, load_pretrained)\n\n self.pre_batch = pre_batch\n self.pre_batch_targets = pre_batch_targets\n\n cf = self.net.cf\n self.dataset_train = MultifieldDataSampler( cf.data_dir, cf.years_train, cf.fields,\n batch_size = cf.batch_size_start,\n num_t_samples = cf.num_t_samples,\n num_patches_per_t = cf.num_patches_per_t_train,\n num_load = cf.num_files_train,\n pre_batch = self.pre_batch,\n rng_seed = self.rng_seed,\n file_shape = cf.file_shape,\n smoothing = cf.data_smoothing,\n level_type = cf.level_type,\n file_format = cf.file_format,\n month = cf.month,\n time_sampling = cf.time_sampling,\n geo_range = cf.geo_range_sampling,\n fields_targets = cf.fields_targets,\n pre_batch_targets = self.pre_batch_targets )\n \n self.dataset_test = MultifieldDataSampler( cf.data_dir, cf.years_test, cf.fields,\n batch_size = cf.batch_size_test,\n num_t_samples = cf.num_t_samples,\n num_patches_per_t = cf.num_patches_per_t_test,\n num_load = cf.num_files_test,\n pre_batch = self.pre_batch,\n rng_seed = self.rng_seed,\n file_shape = cf.file_shape,\n smoothing = cf.data_smoothing,\n level_type = cf.level_type,\n file_format = cf.file_format,\n month = cf.month,\n time_sampling = cf.time_sampling,\n geo_range = cf.geo_range_sampling,\n lat_sampling_weighted = cf.lat_sampling_weighted,\n fields_targets = cf.fields_targets,\n pre_batch_targets = self.pre_batch_targets )\n\n return self" }, { "identifier": "prepare_batch_BERT_multifield", "path": "atmorep/training/bert.py", "snippet": "def prepare_batch_BERT_multifield( cf, rngs, fields, BERT_strategy, fields_data) :\n \n fields_tokens_masked_idx = [[] for _ in fields_data]\n fields_tokens_masked_idx_list = [[] for _ in fields_data]\n fields_targets = [[] for _ in fields_data]\n sources = [[] for _ in fields_data]\n token_infos = [[] for _ in fields_data]\n\n if not BERT_strategy :\n BERT_strategy = cf.BERT_strategy\n\n if BERT_strategy == 'BERT' :\n bert_f = prepare_batch_BERT_field\n elif BERT_strategy == 'forecast' :\n bert_f = prepare_batch_BERT_forecast_field\n elif BERT_strategy == 'temporal_interpolation' :\n bert_f = prepare_batch_BERT_temporal_field\n elif BERT_strategy == 'forecast_1shot' :\n bert_f = prepare_batch_BERT_forecast_field_1shot\n elif BERT_strategy == 'identity' :\n bert_f = prepare_batch_BERT_identity_field\n elif BERT_strategy == 'totalmask' :\n bert_f = prepare_batch_BERT_totalmask_field\n else :\n assert False\n\n # # advance randomly to avoid issues with parallel data loaders that naively duplicate rngs\n # delta = torch.randint( 0, 1000, (1,)).item()\n # [rng.bit_generator.advance( delta) for rng in rngs]\n\n if cf.BERT_window :\n # window size has to be multiple of two due to the variable token sizes (the size is \n # however currently restricted to differ by exactly a factor of two only)\n size_t = int(rngs[0].integers( 2, fields[0][3][0]+1, 1)[0] / 2.) * 2 \n size_lat = int(rngs[0].integers( 2, fields[0][3][1]+1, 1)[0] / 2.) * 2\n size_lon = int(rngs[0].integers( 2, fields[0][3][2]+1, 1)[0] / 2.) * 2\n\n rng_idx = 1\n for ifield, data_field in enumerate(fields_data) :\n for ilevel, (field_data, token_info) in enumerate(data_field) :\n\n tok_size = fields[ifield][4]\n field_data = tokenize( field_data, tok_size )\n field_data_shape = field_data.shape\n \n # cut neighborhood for current batch\n if cf.BERT_window :\n # adjust size based on token size so that one has a fixed size window in physical space\n cur_size_t = int(size_t * fields[ifield][3][0] / fields[0][3][0])\n cur_size_lat = int(size_lat * fields[ifield][3][1] / fields[0][3][1])\n cur_size_lon = int(size_lon * fields[ifield][3][2] / fields[0][3][2])\n # define indices\n idx_t_s = field_data.shape[1] - cur_size_t\n idx_lat_s = field_data.shape[2] - cur_size_lat\n idx_lon_s = field_data.shape[3] - cur_size_lon\n # cut\n field_data = field_data[ :, idx_t_s:, idx_lat_s:, idx_lon_s:]\n field_data = field_data.contiguous()\n # for token info first recover space-time shape\n token_info = token_info.reshape( list(field_data_shape[0:4]) + [token_info.shape[-1]]) \n token_info = token_info[ :, idx_t_s:, idx_lat_s:, idx_lon_s:]\n token_info = torch.flatten( token_info, 1, -2)\n token_info = token_info.contiguous()\n \n # no masking for static fields or if masking rate = 0\n if fields[ifield][1][0] > 0 and fields[ifield][5][0] > 0. :\n\n ret = bert_f( cf, ifield, field_data, token_info, rngs[rng_idx])\n (field_data, token_info, target, tokens_masked_idx, tokens_masked_idx_list) = ret\n \n if target != None :\n fields_targets[ifield].append( target)\n fields_tokens_masked_idx[ifield].append( tokens_masked_idx)\n fields_tokens_masked_idx_list[ifield].append( tokens_masked_idx_list)\n\n rng_idx += 1\n\n sources[ifield].append( field_data.unsqueeze(1) )\n token_infos[ifield].append( token_info )\n\n # merge along vertical level\n sources[ifield] = torch.cat( sources[ifield], 1)\n token_infos[ifield] = torch.cat( token_infos[ifield], 1)\n # merge along vertical level, for target we have level, batch, ... ordering \n fields_targets[ifield] = torch.cat( fields_targets[ifield],0) \\\n if len(fields_targets[ifield]) > 0 else fields_targets[ifield]\n\n return (sources, token_infos, fields_targets, fields_tokens_masked_idx,\n fields_tokens_masked_idx_list)" }, { "identifier": "positional_encoding_harmonic", "path": "atmorep/transformer/transformer_base.py", "snippet": "def positional_encoding_harmonic( x, num_levels, num_tokens, with_cls = False) :\n '''space time harmonic positional encoding'''\n\n dim_embed = x.shape[-1]\n dev = x.get_device()\n \n # num_tokens = x.shape[-3:-1]\n # len_token_seq = num_levels * np.prod(num_tokens)\n # pe = torch.zeros( len_token_seq, dim_embed, device=dev)\n # position = torch.arange( 0, len_token_seq).unsqueeze(1)\n # div = torch.exp(torch.arange( 0, dim_embed, 2) * -(math.log(1000) / dim_embed))\n\n # pe[:, 0::2] = torch.sin(position * div)\n # pe[:, 1::2] = torch.cos(position * div)\n # pe = pe.unsqueeze(0)\n\n # x += pe.reshape( x[0].shape )\n\n\n idx = torch.arange( np.prod( x.shape[1:-1]), device=dev)\n num_tokens_t_lat_lon = np.prod( num_tokens)\n num_tokens_lat_lon = num_tokens[1] * num_tokens[2]\n idxs_v = (idx / num_tokens_t_lat_lon).int()\n # idxs_v = num_tokens_t_lat_lon\n temp = torch.remainder( idx, num_tokens_t_lat_lon)\n idxs_t = (temp / num_tokens_lat_lon).int()\n temp = torch.remainder( idx, num_tokens_lat_lon)\n idxs_lat = (temp / num_tokens[1]).int()\n idxs_lon = torch.remainder( temp, num_tokens[2])\n\n pe = torch.zeros( idx.shape[0], dim_embed, device=dev)\n xs = (2. * np.pi * torch.arange( 0, dim_embed, 2, device=dev) / dim_embed)\n pe[:, 0::2] = 0.5 * torch.sin( torch.outer( 8 * idxs_lat, xs) ) \\\n + torch.sin( torch.outer( idxs_t, xs) )\n pe[:, 1::2] = 0.5 * torch.cos( torch.outer( 8 * idxs_lon, xs) ) \\\n + torch.cos( torch.outer( idxs_v , xs) )\n if with_cls :\n x[:,1:] += pe.reshape( x[0,1:].shape)\n else :\n x += pe.reshape( x[0].shape)\n\n return x" }, { "identifier": "shape_to_str", "path": "atmorep/utils/utils.py", "snippet": "def shape_to_str( shape) :\n ret ='{}'.format( list(shape)).replace(' ', '').replace(',','_').replace('(','s_').replace(')','')\n ret = ret.replace('[','s_').replace(']','')\n return ret" }, { "identifier": "relMSELoss", "path": "atmorep/utils/utils.py", "snippet": "def relMSELoss( pred, target = None) :\n val = torch.mean( (pred - target) * (pred - target)) / torch.mean( target * target)\n return val" }, { "identifier": "Gaussian", "path": "atmorep/utils/utils.py", "snippet": "def Gaussian( x, mu=0., std_dev=1.) :\n # return (1 / (std_dev*np.sqrt(2.*np.pi))) * torch.exp( -0.5 * (x-mu)*(x-mu) / (std_dev*std_dev))\n # unnormalized Gaussian where maximum is one\n return torch.exp( -0.5 * (x-mu)*(x-mu) / (std_dev*std_dev))" }, { "identifier": "CRPS", "path": "atmorep/utils/utils.py", "snippet": "def CRPS( y, mu, std_dev) :\n # see Eq. A2 in S. Rasp and S. Lerch. Neural networks for postprocessing ensemble weather forecasts. Monthly Weather Review, 146(11):3885 – 3900, 2018.\n c1 = np.sqrt(1./np.pi)\n t1 = 2. * erf( (y-mu) / std_dev) - 1.\n t2 = 2. * Gaussian( (y-mu) / std_dev)\n val = std_dev * ( (y-mu)/std_dev * t1 + t2 - c1 )\n return val" }, { "identifier": "NetMode", "path": "atmorep/utils/utils.py", "snippet": "class NetMode( Enum) :\n indeterminate = 0\n train = 1\n test = 2" }, { "identifier": "sgn_exp", "path": "atmorep/utils/utils.py", "snippet": "def sgn_exp( x ) :\n '''exponential preserving sign'''\n return x.sign() * (torch.exp( x.abs() ) - 1.)" }, { "identifier": "write_forecast", "path": "atmorep/datasets/data_writer.py", "snippet": "def write_forecast( model_id, epoch, batch_idx, levels, sources, sources_coords,\n targets, targets_coords,\n preds, ensembles,\n zarr_store_type = 'ZipStore' ) :\n ''' \n sources : num_fields x [field name , data]\n targets :\n preds, ensemble share coords with targets\n '''\n\n fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch:05d}' + '_{}.zarr'\n\n zarr_store = getattr( zarr, zarr_store_type)\n\n store_source = zarr_store( fname.format( 'source'))\n exp_source = zarr.group(store=store_source)\n for fidx, field in enumerate(sources) :\n ds_field = exp_source.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=sources_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=sources_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=sources_coords[2][bidx])\n store_source.close()\n\n store_target = zarr_store( fname.format( 'target'))\n exp_target = zarr.group(store=store_target)\n for fidx, field in enumerate(targets) :\n ds_field = exp_target.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=targets_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=targets_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=targets_coords[2][bidx])\n store_target.close()\n\n store_pred = zarr_store( fname.format( 'pred'))\n exp_pred = zarr.group(store=store_pred)\n for fidx, field in enumerate(preds) :\n ds_field = exp_pred.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=targets_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=targets_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=targets_coords[2][bidx])\n store_pred.close()\n\n store_ens = zarr_store( fname.format( 'ens'))\n exp_ens = zarr.group(store=store_ens)\n for fidx, field in enumerate(ensembles) :\n ds_field = exp_ens.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=targets_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=targets_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=targets_coords[2][bidx])\n store_ens.close()" }, { "identifier": "write_BERT", "path": "atmorep/datasets/data_writer.py", "snippet": "def write_BERT( model_id, epoch, batch_idx, levels, sources, sources_coords,\n targets, targets_coords,\n preds, ensembles,\n zarr_store_type = 'ZipStore' ) :\n '''\n sources : num_fields x [field name , data]\n targets :\n preds, ensemble share coords with targets\n '''\n\n # fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch}.zarr'\n fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch:05d}' + '_{}.zarr'\n\n zarr_store = getattr( zarr, zarr_store_type)\n\n store_source = zarr_store( fname.format( 'source'))\n exp_source = zarr.group(store=store_source)\n for fidx, field in enumerate(sources) :\n ds_field = exp_source.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels[fidx])\n ds_batch_item.create_dataset( 'datetime', data=sources_coords[0][0][bidx])\n ds_batch_item.create_dataset( 'lat', data=sources_coords[1][0][bidx])\n ds_batch_item.create_dataset( 'lon', data=sources_coords[2][0][bidx])\n store_source.close()\n\n store_target = zarr_store( fname.format( 'target'))\n exp_target = zarr.group(store=store_target)\n for fidx, field in enumerate(targets) :\n if 0 == len(field[1]) : # skip fields that were not predicted\n continue\n batch_size = len(field[1][0])\n ds_field = exp_target.require_group( f'{field[0]}')\n for bidx in range( len(field[1][0])) :\n sample = batch_idx * batch_size + bidx\n ds_target_b = ds_field.create_group( f'sample={sample:05d}')\n for vidx in range(len(levels[fidx])) :\n ds_target_b_l = ds_target_b.require_group( f'ml={levels[fidx][vidx]}')\n ds_target_b_l.create_dataset( 'data', data=field[1][vidx][bidx])\n ds_target_b_l.create_dataset( 'ml', data=levels[fidx][vidx])\n ds_target_b_l.create_dataset( 'datetime', data=targets_coords[0][fidx][bidx][vidx])\n ds_target_b_l.create_dataset( 'lat', data=targets_coords[1][fidx][bidx][vidx])\n ds_target_b_l.create_dataset( 'lon', data=targets_coords[2][fidx][bidx][vidx])\n store_target.close()\n\n store_pred = zarr_store( fname.format( 'pred'))\n exp_pred = zarr.group(store=store_pred)\n for fidx, field in enumerate(preds) :\n if 0 == len(field[1]) : # skip fields that were not predicted\n continue\n batch_size = len(field[1][0])\n ds_pred = exp_pred.require_group( f'{field[0]}')\n for bidx in range( len(field[1][0])) :\n sample = batch_idx * batch_size + bidx\n ds_pred_b = ds_pred.create_group( f'sample={sample:05d}')\n for vidx in range(len(levels[fidx])) :\n ds_pred_b_l = ds_pred_b.create_group( f'ml={levels[fidx][vidx]}')\n ds_pred_b_l.create_dataset( 'data', data\n =field[1][vidx][bidx])\n ds_pred_b_l.create_dataset( 'ml', data=levels[fidx][vidx])\n ds_pred_b_l.create_dataset( 'datetime', data=targets_coords[0][fidx][bidx][vidx])\n ds_pred_b_l.create_dataset( 'lat', data=targets_coords[1][fidx][bidx][vidx])\n ds_pred_b_l.create_dataset( 'lon', data=targets_coords[2][fidx][bidx][vidx])\n store_pred.close()\n\n store_ens = zarr_store( fname.format( 'ens'))\n exp_ens = zarr.group(store=store_ens)\n for fidx, field in enumerate(ensembles) :\n if 0 == len(field[1]) : # skip fields that were not predicted\n continue\n batch_size = len(field[1][0])\n ds_ens = exp_ens.require_group( f'{field[0]}')\n for bidx in range( len(field[1][0])) :\n sample = batch_idx * batch_size + bidx\n ds_ens_b = ds_ens.create_group( f'sample={sample:05d}')\n for vidx in range(len(levels[fidx])) :\n ds_ens_b_l = ds_ens_b.create_group( f'ml={levels[fidx][vidx]}')\n ds_ens_b_l.create_dataset( 'data', data=field[1][vidx][bidx])\n ds_ens_b_l.create_dataset( 'ml', data=levels[fidx][vidx])\n ds_ens_b_l.create_dataset( 'datetime', data=targets_coords[0][fidx][bidx][vidx])\n ds_ens_b_l.create_dataset( 'lat', data=targets_coords[1][fidx][bidx][vidx])\n ds_ens_b_l.create_dataset( 'lon', data=targets_coords[2][fidx][bidx][vidx])\n store_ens.close()" }, { "identifier": "write_attention", "path": "atmorep/datasets/data_writer.py", "snippet": "def write_attention(model_id, epoch, batch_idx, levels, attn, attn_coords, zarr_store_type = 'ZipStore' ) :\n\n fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch:05d}' + '_{}.zarr'\n zarr_store = getattr( zarr, zarr_store_type)\n\n store_attn = zarr_store( fname.format( 'attention'))\n exp_attn = zarr.group(store=store_attn)\n\n for fidx, atts_f in enumerate(attn) :\n ds_field = exp_attn.require_group( f'{atts_f[0]}')\n ds_field_b = ds_field.require_group( f'batch={batch_idx:05d}')\n for lidx, atts_f_l in enumerate(atts_f[1]) : # layer in the network\n ds_f_l = ds_field_b.require_group( f'layer={lidx:05d}')\n ds_f_l.create_dataset( 'ml', data=levels[fidx])\n ds_f_l.create_dataset( 'datetime', data=attn_coords[0][fidx])\n ds_f_l.create_dataset( 'lat', data=attn_coords[1][fidx])\n ds_f_l.create_dataset( 'lon', data=attn_coords[2][fidx])\n ds_f_l_h = ds_f_l.require_group('heads')\n for hidx, atts_f_l_head in enumerate(atts_f_l) : # number of attention head\n if atts_f_l_head != None :\n ds_f_l_h.create_dataset(f'{hidx}', data=atts_f_l_head.numpy() )\n store_attn.close()" } ]
import torch import torchinfo import numpy as np import code import os import datetime import functools import pandas as pd import wandb import torch.distributed as dist import torch.utils.data.distributed import atmorep.config.config as config import atmorep.utils.token_infos_transformations as token_infos_transformations import atmorep.utils.utils as utils from pathlib import Path from typing import TypeVar from torch.distributed.optim import ZeroRedundancyOptimizer from atmorep.core.atmorep_model import AtmoRep from atmorep.core.atmorep_model import AtmoRepData from atmorep.training.bert import prepare_batch_BERT_multifield from atmorep.transformer.transformer_base import positional_encoding_harmonic from atmorep.utils.utils import shape_to_str from atmorep.utils.utils import relMSELoss from atmorep.utils.utils import Gaussian from atmorep.utils.utils import CRPS from atmorep.utils.utils import NetMode from atmorep.utils.utils import sgn_exp from atmorep.datasets.data_writer import write_forecast, write_BERT, write_attention
13,635
if 0 == cf.par_rank : print( 'validation loss for strategy={} at epoch {} : {}'.format( BERT_test_strategy, epoch, total_loss), flush=True) if cf.with_wandb and (0 == cf.par_rank) : loss_dict = {"val. loss {}".format(BERT_test_strategy) : total_loss} total_losses = total_losses.cpu().detach() for i, field in enumerate(cf.fields_prediction) : idx_name = 'val., {}, '.format(BERT_test_strategy) + field[0] loss_dict[idx_name] = total_losses[i] print( 'validation loss for {} : {}'.format( field[0], total_losses[i] )) wandb.log( loss_dict) batch_data = [] torch.cuda.empty_cache() cf.BERT_strategy = BERT_strategy_train self.mode_test = False return total_loss ################################################### def evaluate( self, data_idx = 0, log = True): cf = self.cf self.model.mode( NetMode.test) log_sources = [] test_len = 0 # evaluate loss = torch.tensor( 0.) with torch.no_grad() : for it in range( self.model.len( NetMode.test)) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : targets = [] for target_field in batch_data[1] : targets.append(torch.cat([target_vl[0].unsqueeze(1) for target_vl in target_field],1)) # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] cur_loss = self.MSELoss( pred[0], target = target ).cpu() loss += cur_loss ifield += 1 test_len += 1 # logging if cf.par_rank < cf.log_test_num_ranks : self.log_validate( data_idx, it, log_sources, preds) if cf.attention: self.log_attention( data_idx , it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes loss /= test_len * len(self.cf.fields_prediction) if cf.with_ddp : loss_cuda = loss.cuda() dist.all_reduce( loss_cuda, op=torch.distributed.ReduceOp.AVG ) loss = loss_cuda.cpu() if 0 == cf.par_rank : print( 'Loss {}'.format( loss)) ################################################### def test_loss( self, pred, target) : '''Hook for custom test loss''' pass ################################################### def loss( self, preds, batch_idx = 0) : # TODO: move implementations to individual files cf = self.cf mse_loss_total = torch.tensor( 0.,) losses = dict(zip(cf.losses,[[] for loss in cf.losses ])) for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] mse_loss = self.MSELoss( pred[0], target = target) mse_loss_total += mse_loss.cpu().detach() # MSE loss if 'mse' in self.cf.losses : losses['mse'].append( mse_loss) # MSE loss if 'mse_ensemble' in self.cf.losses : loss_en = torch.tensor( 0., device=target.device) for en in torch.transpose( pred[2], 1, 0) : loss_en += self.MSELoss( en, target = target) # losses['mse_ensemble'].append( 50. * loss_en / pred[2].shape[1]) losses['mse_ensemble'].append( loss_en / pred[2].shape[1]) # Generalized cross entroy loss for continuous distributions if 'stats' in self.cf.losses :
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class Trainer_Base() : def __init__( self, cf, devices ) : self.cf = cf self.devices = devices self.device_in = devices[0] self.device_out = devices[-1] self.fields_prediction_idx = [] self.loss_weights = torch.zeros( len(cf.fields_prediction) ) for ifield, field in enumerate(cf.fields_prediction) : self.loss_weights[ifield] = self.cf.fields_prediction[ifield][1] for idx, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.fields_prediction_idx.append( idx) break self.loss_weights = self.loss_weights.to( self.device_out) self.MSELoss = torch.nn.MSELoss() # transformation for token infos if hasattr( cf, 'token_infos_transformation') : self.tok_infos_trans = getattr( token_infos_transformations, cf.token_infos_transformation) else : self.tok_infos_trans = getattr( token_infos_transformations, 'identity') if 0 == cf.par_rank : directory = Path( config.path_results, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) directory = Path( config.path_models, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) ################################################### def create( self, load_embeds=True) : net = AtmoRep( self.cf) self.model = AtmoRepData( net) self.model.create( self.pre_batch, self.devices, load_embeds) # TODO: pass the properly to model / net self.model.net.encoder_to_decoder = self.encoder_to_decoder self.model.net.decoder_to_tail = self.decoder_to_tail return self ################################################### @classmethod def load( Typename, cf, model_id, epoch, devices) : trainer = Typename( cf, devices).create( load_embeds=False) trainer.model.net = trainer.model.net.load( model_id, devices, cf, epoch) # TODO: pass the properly to model / net trainer.model.net.encoder_to_decoder = trainer.encoder_to_decoder trainer.model.net.decoder_to_tail = trainer.decoder_to_tail str = 'Loaded model id = {}{}.'.format( model_id, f' at epoch = {epoch}' if epoch> -2 else '') print( str) return trainer ################################################### def save( self, epoch) : self.model.net.save( epoch) ################################################### def get_learn_rates( self) : cf = self.cf size_padding = 5 learn_rates = np.zeros( cf.num_epochs + size_padding) learn_rates[:cf.lr_start_epochs] = np.linspace( cf.lr_start, cf.lr_max, num = cf.lr_start_epochs) lr = learn_rates[cf.lr_start_epochs-1] ic = 0 for epoch in range( cf.lr_start_epochs, cf.num_epochs + size_padding) : lr = max( lr / cf.lr_decay_rate, cf.lr_min) learn_rates[epoch] = lr if ic > 9999 : # sanity check assert "Maximum number of epochs exceeded." return learn_rates ################################################### def run( self, epoch = -1) : cf = self.cf model = self.model learn_rates = self.get_learn_rates() if cf.with_ddp : self.model_ddp = torch.nn.parallel.DistributedDataParallel( model, static_graph=True) if not cf.optimizer_zero : self.optimizer = torch.optim.AdamW( self.model_ddp.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) else : self.optimizer = ZeroRedundancyOptimizer(self.model_ddp.parameters(), optimizer_class=torch.optim.AdamW, lr=cf.lr_start ) else : self.optimizer = torch.optim.AdamW( self.model.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) if 0 == cf.par_rank : # print( self.model.net) model_parameters = filter(lambda p: p.requires_grad, self.model_ddp.parameters()) num_params = sum([np.prod(p.size()) for p in model_parameters]) print( f'Number of trainable parameters: {num_params:,}') # test at the beginning as reference self.model.load_data( NetMode.test, batch_size=cf.batch_size_test) if cf.test_initial : cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() test_loss = np.array( [cur_test_loss]) else : # generic value based on data normalization test_loss = np.array( [1.0]) epoch += 1 batch_size = cf.batch_size_start - cf.batch_size_delta if cf.profile : lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr self.model.load_data( NetMode.train, batch_size = cf.batch_size_max) self.profile() # training loop while True : if epoch >= cf.num_epochs : break lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr batch_size = min( cf.batch_size_max, batch_size + cf.batch_size_delta) tstr = datetime.datetime.now().strftime("%H:%M:%S") print( '{} : {} :: batch_size = {}, lr = {}'.format( epoch, tstr, batch_size, lr) ) self.model.load_data( NetMode.train, batch_size = batch_size) self.train( epoch) if cf.with_wandb and 0 == cf.par_rank : self.save( epoch) cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() # self.validate( epoch, 'forecast') # save model if cur_test_loss < test_loss.min() : self.save( -2) test_loss = np.append( test_loss, [cur_test_loss]) epoch += 1 tstr = datetime.datetime.now().strftime("%H:%M:%S") print( 'Finished training at {} with test loss = {}.'.format( tstr, test_loss[-1]) ) # save final network if cf.with_wandb and 0 == cf.par_rank : self.save( -2) ################################################### def train( self, epoch): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() loss_total = [[] for i in range(len(cf.losses)) ] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] mse_loss_total = [] grad_loss_total = [] ctr = 0 for batch_idx in range( model.len( NetMode.train)) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() loss.backward() self.optimizer.step() [loss_total[idx].append( losses[key]) for idx, key in enumerate(losses)] mse_loss_total.append( mse_loss.detach().cpu() ) grad_loss_total.append( loss.detach().cpu() ) [std_dev_total[idx].append( pred[1].detach().cpu()) for idx, pred in enumerate(preds)] # logging if int((batch_idx * cf.batch_size_max) / 4) > ctr : # wandb logging if cf.with_wandb and (0 == cf.par_rank) : loss_dict = { "training loss": torch.mean( torch.tensor( mse_loss_total)), "gradient loss": torch.mean( torch.tensor( grad_loss_total)) } # log individual loss terms for individual fields for idx, cur_loss in enumerate(loss_total) : loss_name = self.cf.losses[idx] lt = torch.tensor(cur_loss) for i, field in enumerate(cf.fields_prediction) : idx_name = loss_name + ', ' + field[0] idx_std_name = 'stddev, ' + field[0] loss_dict[idx_name] = torch.mean( lt[:,i]).cpu().detach() loss_dict[idx_std_name] = torch.mean(torch.cat(std_dev_total[i],0)).cpu().detach() wandb.log( loss_dict ) # console output print('train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:1.5f} : {:1.5f} :: {:1.5f}'.format( epoch, batch_idx, model.len( NetMode.train), 100. * batch_idx/model.len(NetMode.train), torch.mean( torch.tensor( grad_loss_total)), torch.mean(torch.tensor(mse_loss_total)), torch.mean( preds[0][1]) ), flush=True) # save model (use -2 as epoch to indicate latest, stored without epoch specification) # self.save( -2) # reset loss_total = [[] for i in range(len(cf.losses)) ] mse_loss_total = [] grad_loss_total = [] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] ctr += 1 # save gradients if cf.save_grads and cf.with_wandb and (0 == cf.par_rank) : dir_name = './grads/id{}'.format( cf.wandb_id) if not os.path.exists(dir_name): os.makedirs(dir_name) rmsprop_ws = [] for k in range( len(self.optimizer.state_dict()['state']) ) : rmsprop_ws.append(self.optimizer.state_dict()['state'][k]['exp_avg_sq'].mean().unsqueeze(0)) rmsprop_ws = torch.cat( rmsprop_ws) fname = '{}/{}_epoch{}_rmsprop.npy'.format( dir_name, cf.wandb_id, epoch) np.save( fname, rmsprop_ws.cpu().detach().numpy() ) idx = 0 for name, param in self.model.named_parameters(): if param.requires_grad : fname = '{}/{}_epoch{}_{:05d}_{}_grad.npy'.format( dir_name, cf.wandb_id, epoch, idx,name) np.save( fname, param.grad.cpu().detach().numpy() ) idx += 1 # clean memory self.optimizer.zero_grad() del batch_data, loss, loss_total, mse_loss_total, grad_loss_total, std_dev_total ################################################### def profile( self): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() # See https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html # for details on how to load and analyse report # https://pytorch.org/blog/trace-analysis-for-masses/ # do for all par_ranks to avoid that they run out of sync print( '---------------------------------') print( 'Profiling:') pname = './logs/profile_par_rank' + str(cf.par_rank) + '_' + cf.wandb_id + '/profile' with torch.profiler.profile( activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2), on_trace_ready=torch.profiler.tensorboard_trace_handler(pname), profile_memory=True, record_shapes=True, with_stack=True) as prof: for batch_idx in range( 2 * (1+1+3) ) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() # loss.backward() # self.optimizer.step() prof.step() print( 'Profiling finished.') print( '---------------------------------') ################################################### def validate( self, epoch, BERT_test_strategy = 'BERT'): cf = self.cf BERT_strategy_train = cf.BERT_strategy cf.BERT_strategy = BERT_test_strategy self.model.mode( NetMode.test) total_loss = 0. total_losses = torch.zeros( len(self.fields_prediction_idx) ) test_len = 0 self.mode_test = True # run in training mode offset = 0 if -1 == epoch and 0 == cf.par_rank : if 1 == cf.num_accs_per_task : # bug in torchinfo; fixed in v1.8.0 offset += 1 print( 'Network size:') batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) torchinfo.summary( self.model, input_data=[batch_data]) # run test set evaluation with torch.no_grad() : for it in range( self.model.len( NetMode.test) - offset) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : if type(batch_data[1][0][0]) is list : targets = [batch_data[1][i][0][0] for i in range( len(batch_data[1]))] else : targets = batch_data[1][0] # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) loss = torch.tensor( 0.) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] # hook for custom test loss self.test_loss( pred, target) # base line loss cur_loss = self.MSELoss( pred[0], target = target ).cpu().item() loss += cur_loss total_losses[ifield] += cur_loss ifield += 1 total_loss += loss test_len += 1 # store detailed results on current test set for book keeping if cf.par_rank < cf.log_test_num_ranks : log_preds = [[p.detach().clone().cpu() for p in pred] for pred in preds] self.log_validate( epoch, it, log_sources, log_preds) if cf.attention: self.log_attention( epoch, it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes total_loss /= test_len * len(self.cf.fields_prediction) total_losses /= test_len if cf.with_ddp : total_loss_cuda = total_loss.cuda() total_losses_cuda = total_losses.cuda() dist.all_reduce( total_loss_cuda, op=torch.distributed.ReduceOp.AVG ) dist.all_reduce( total_losses_cuda, op=torch.distributed.ReduceOp.AVG ) total_loss = total_loss_cuda.cpu() total_losses = total_losses_cuda.cpu() if 0 == cf.par_rank : print( 'validation loss for strategy={} at epoch {} : {}'.format( BERT_test_strategy, epoch, total_loss), flush=True) if cf.with_wandb and (0 == cf.par_rank) : loss_dict = {"val. loss {}".format(BERT_test_strategy) : total_loss} total_losses = total_losses.cpu().detach() for i, field in enumerate(cf.fields_prediction) : idx_name = 'val., {}, '.format(BERT_test_strategy) + field[0] loss_dict[idx_name] = total_losses[i] print( 'validation loss for {} : {}'.format( field[0], total_losses[i] )) wandb.log( loss_dict) batch_data = [] torch.cuda.empty_cache() cf.BERT_strategy = BERT_strategy_train self.mode_test = False return total_loss ################################################### def evaluate( self, data_idx = 0, log = True): cf = self.cf self.model.mode( NetMode.test) log_sources = [] test_len = 0 # evaluate loss = torch.tensor( 0.) with torch.no_grad() : for it in range( self.model.len( NetMode.test)) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : targets = [] for target_field in batch_data[1] : targets.append(torch.cat([target_vl[0].unsqueeze(1) for target_vl in target_field],1)) # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] cur_loss = self.MSELoss( pred[0], target = target ).cpu() loss += cur_loss ifield += 1 test_len += 1 # logging if cf.par_rank < cf.log_test_num_ranks : self.log_validate( data_idx, it, log_sources, preds) if cf.attention: self.log_attention( data_idx , it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes loss /= test_len * len(self.cf.fields_prediction) if cf.with_ddp : loss_cuda = loss.cuda() dist.all_reduce( loss_cuda, op=torch.distributed.ReduceOp.AVG ) loss = loss_cuda.cpu() if 0 == cf.par_rank : print( 'Loss {}'.format( loss)) ################################################### def test_loss( self, pred, target) : '''Hook for custom test loss''' pass ################################################### def loss( self, preds, batch_idx = 0) : # TODO: move implementations to individual files cf = self.cf mse_loss_total = torch.tensor( 0.,) losses = dict(zip(cf.losses,[[] for loss in cf.losses ])) for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] mse_loss = self.MSELoss( pred[0], target = target) mse_loss_total += mse_loss.cpu().detach() # MSE loss if 'mse' in self.cf.losses : losses['mse'].append( mse_loss) # MSE loss if 'mse_ensemble' in self.cf.losses : loss_en = torch.tensor( 0., device=target.device) for en in torch.transpose( pred[2], 1, 0) : loss_en += self.MSELoss( en, target = target) # losses['mse_ensemble'].append( 50. * loss_en / pred[2].shape[1]) losses['mse_ensemble'].append( loss_en / pred[2].shape[1]) # Generalized cross entroy loss for continuous distributions if 'stats' in self.cf.losses :
stats_loss = Gaussian( target, pred[0], pred[1])
6
2023-10-09 19:42:46+00:00
16k
NKI-AI/ahcore
ahcore/callbacks/wsi_metric_callback.py
[ { "identifier": "WriteH5Callback", "path": "ahcore/callbacks/h5_callback.py", "snippet": "class WriteH5Callback(Callback):\n def __init__(\n self,\n max_queue_size: int,\n max_concurrent_writers: int,\n dump_dir: Path,\n normalization_type: str = str(NormalizationType.LOGITS),\n precision: str = str(InferencePrecision.FP32),\n ):\n \"\"\"\n Callback to write predictions to H5 files. This callback is used to write whole-slide predictions to single H5\n files in a separate thread.\n\n TODO:\n - Add support for distributed data parallel\n\n Parameters\n ----------\n max_queue_size : int\n The maximum number of items to store in the queue (i.e. tiles).\n max_concurrent_writers : int\n The maximum number of concurrent writers.\n dump_dir : pathlib.Path\n The directory to dump the H5 files to.\n normalization_type : str\n The normalization type to use for the predictions. One of \"sigmoid\", \"softmax\" or \"logits\".\n precision : str\n The precision to use for the predictions. One of \"float16\", \"float32\" or \"uint8\".\n \"\"\"\n super().__init__()\n self._writers: dict[str, _WriterMessage] = {}\n self._current_filename = None\n self._dump_dir = Path(dump_dir)\n self._max_queue_size = max_queue_size\n self._semaphore = Semaphore(max_concurrent_writers)\n self._dataset_index = 0\n self._normalization_type: NormalizationType = NormalizationType(normalization_type)\n self._precision: InferencePrecision = InferencePrecision(precision)\n\n self._logger = get_logger(type(self).__name__)\n\n @property\n def dump_dir(self) -> Path:\n return self._dump_dir\n\n def __process_management(self) -> None:\n \"\"\"\n Handle the graceful termination of multiple processes at the end of h5 writing.\n This block ensures proper release of resources allocated during multiprocessing.\n\n Returns\n -------\n None\n \"\"\"\n assert self._current_filename, \"_current_filename shouldn't be None here\"\n\n self._writers[self._current_filename][\"queue\"].put(None)\n self._writers[self._current_filename][\"process\"].join()\n self._writers[self._current_filename][\"process\"].close()\n self._writers[self._current_filename][\"queue\"].close()\n\n @property\n def writers(self) -> dict[str, _WriterMessage]:\n return self._writers\n\n def _batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n stage: str,\n dataloader_idx: int = 0,\n ) -> None:\n filename = batch[\"path\"][0] # Filenames are constant across the batch.\n if any([filename != path for path in batch[\"path\"]]):\n raise ValueError(\n \"All paths in a batch must be the same. \"\n \"Either use batch_size=1 or ahcore.data.samplers.WsiBatchSampler.\"\n )\n\n if filename != self._current_filename:\n output_filename = _get_h5_output_filename(\n self.dump_dir,\n filename,\n model_name=str(pl_module.name),\n step=pl_module.global_step,\n )\n output_filename.parent.mkdir(parents=True, exist_ok=True)\n link_fn = (\n self.dump_dir / \"outputs\" / f\"{pl_module.name}\" / f\"step_{pl_module.global_step}\" / \"image_h5_link.txt\"\n )\n with open(link_fn, \"a\" if link_fn.is_file() else \"w\") as file:\n file.write(f\"{filename},{output_filename}\\n\")\n\n self._logger.debug(\"%s -> %s\", filename, output_filename)\n if self._current_filename is not None:\n self.__process_management()\n self._semaphore.release()\n\n self._semaphore.acquire()\n\n if stage == \"validate\":\n total_dataset: ConcatDataset = trainer.datamodule.validate_dataset # type: ignore\n elif stage == \"predict\":\n total_dataset: ConcatDataset = trainer.predict_dataloaders.dataset # type: ignore\n else:\n raise NotImplementedError(f\"Stage {stage} is not supported for {self.__class__.__name__}.\")\n\n current_dataset: TiledWsiDataset\n current_dataset, _ = total_dataset.index_to_dataset(self._dataset_index) # type: ignore\n slide_image = current_dataset.slide_image\n\n data_description: DataDescription = pl_module.data_description # type: ignore\n inference_grid: GridDescription = data_description.inference_grid\n\n mpp = inference_grid.mpp\n if mpp is None:\n mpp = slide_image.mpp\n\n _, size = slide_image.get_scaled_slide_bounds(slide_image.get_scaling(mpp))\n num_samples = len(current_dataset)\n\n # Let's get the data_description, so we can figure out the tile size and things like that\n tile_size = inference_grid.tile_size\n tile_overlap = inference_grid.tile_overlap\n\n # TODO: We are really putting strange things in the Queue if we may believe mypy\n new_queue: Queue[Any] = Queue() # pylint: disable=unsubscriptable-object\n parent_conn, child_conn = Pipe()\n new_writer = H5FileImageWriter(\n output_filename,\n size=size,\n mpp=mpp,\n tile_size=tile_size,\n tile_overlap=tile_overlap,\n num_samples=num_samples,\n color_profile=None,\n is_compressed_image=False,\n progress=None,\n precision=InferencePrecision(self._precision),\n )\n new_process = Process(target=new_writer.consume, args=(self.generator(new_queue), child_conn))\n new_process.start()\n self._writers[filename] = {\n \"queue\": new_queue,\n \"writer\": new_writer,\n \"process\": new_process,\n \"connection\": parent_conn,\n }\n self._current_filename = filename\n\n prediction = outputs[\"prediction\"]\n prediction = NormalizationType.normalize(self._normalization_type)(prediction).detach().cpu().numpy()\n coordinates_x, coordinates_y = batch[\"coordinates\"]\n coordinates = torch.stack([coordinates_x, coordinates_y]).T.detach().cpu().numpy()\n self._writers[filename][\"queue\"].put((coordinates, prediction))\n self._dataset_index += prediction.shape[0]\n\n def _epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:\n if self._current_filename is not None:\n self.__process_management()\n self._semaphore.release()\n self._dataset_index = 0\n # Reset current filename to None for correct execution of subsequent validation loop\n self._current_filename = None\n # Clear all the writers from the current epoch\n self._writers = {}\n\n def on_validation_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int = 0,\n ) -> None:\n self._batch_end(trainer, pl_module, outputs, batch, batch_idx, \"validate\", dataloader_idx)\n\n def on_predict_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int = 0,\n ) -> None:\n self._batch_end(trainer, pl_module, outputs, batch, batch_idx, \"predict\", dataloader_idx)\n\n def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:\n self._epoch_end(trainer, pl_module)\n\n def on_predict_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:\n self._epoch_end(trainer, pl_module)\n\n @staticmethod\n def generator(\n queue: Queue[Optional[GenericArray]], # pylint: disable=unsubscriptable-object\n ) -> Generator[GenericArray, None, None]:\n while True:\n batch = queue.get()\n if batch is None:\n break\n yield batch" }, { "identifier": "AhCoreLightningModule", "path": "ahcore/lit_module.py", "snippet": "class AhCoreLightningModule(pl.LightningModule):\n RELEVANT_KEYS = [\n \"coordinates\",\n \"mpp\",\n \"path\",\n \"region_index\",\n \"grid_local_coordinates\",\n \"grid_index\",\n ]\n\n def __init__(\n self,\n model: nn.Module,\n optimizer: torch.optim.Optimizer, # noqa\n data_description: DataDescription,\n loss: nn.Module | None = None,\n augmentations: dict[str, nn.Module] | None = None,\n metrics: dict[str, MetricFactory | WSIMetricFactory] | None = None,\n scheduler: torch.optim.lr_scheduler.LRScheduler | None = None, # noqa\n ):\n super().__init__()\n\n self.save_hyperparameters(\n logger=False,\n ignore=[\n \"model\",\n \"augmentations\",\n \"metrics\",\n \"data_description\",\n \"loss\",\n ],\n ) # TODO: we should send the hyperparams to the logger elsewhere\n\n self._num_classes = data_description.num_classes\n self._model = model(out_channels=self._num_classes)\n self._augmentations = augmentations\n\n self._loss = loss\n if metrics is not None:\n tile_metric = metrics.get(\"tile_level\")\n wsi_metric = metrics.get(\"wsi_level\", None)\n if tile_metric is not None and not isinstance(tile_metric, MetricFactory):\n raise ConfigurationError(\"Tile metrics must be of type MetricFactory\")\n if wsi_metric is not None and not isinstance(wsi_metric, WSIMetricFactory):\n raise ConfigurationError(\"WSI metrics must be of type WSIMetricFactory\")\n\n self._tile_metric = tile_metric\n self._wsi_metrics = wsi_metric\n\n self._data_description = data_description\n\n @property\n def wsi_metrics(self) -> WSIMetricFactory | None:\n return self._wsi_metrics\n\n @property\n def name(self) -> str:\n return str(self._model.__class__.__name__)\n\n def forward(self, sample: torch.Tensor) -> Any:\n \"\"\"This function is only used during inference\"\"\"\n self._model.eval()\n return self._model.forward(sample)\n\n @property\n def data_description(self) -> DataDescription:\n return self._data_description\n\n def _compute_metrics(\n self,\n prediction: torch.Tensor,\n target: torch.Tensor,\n roi: torch.Tensor | None,\n stage: TrainerFn | str,\n ) -> dict[str, torch.Tensor]:\n if not self._tile_metric:\n return {}\n\n _stage = stage.value if isinstance(stage, TrainerFn) else stage\n metrics = {f\"{_stage}/{k}\": v for k, v in self._tile_metric(prediction, target, roi).items()}\n return metrics\n\n def do_step(self, batch: DlupDatasetSample, batch_idx: int, stage: TrainerFn | str) -> LitModuleSample:\n if self._augmentations and stage in self._augmentations:\n batch = self._augmentations[stage](batch)\n\n if self._loss is None:\n raise RuntimeError(\n f\"Loss is not defined for {self.__class__.__name__}. \"\n f\"This is required during training and validation\"\n )\n\n _target = batch[\"target\"]\n # Batch size is required for accurate loss calculation and logging\n batch_size = batch[\"image\"].shape[0]\n # ROIs can reduce the usable area of the inputs, the loss should be scaled appropriately\n roi = batch.get(\"roi\", None)\n\n if stage == \"fit\":\n _prediction = self._model(batch[\"image\"])\n batch[\"prediction\"] = _prediction\n else:\n batch = {**batch, **self._get_inference_prediction(batch[\"image\"])}\n _prediction = batch[\"prediction\"]\n\n loss = self._loss(_prediction, _target, roi)\n\n # The relevant_dict contains values to know where the tiles originate.\n _relevant_dict = {k: v for k, v in batch.items() if k in self.RELEVANT_KEYS}\n _metrics = self._compute_metrics(_prediction, _target, roi, stage=stage)\n _loss = loss.mean()\n # TODO: This can be a TypedDict\n output = {\n \"loss\": _loss,\n \"loss_per_sample\": loss.clone().detach(),\n \"metrics\": _metrics,\n **_relevant_dict,\n }\n if stage != \"fit\":\n output[\"prediction\"] = _prediction\n\n _stage = stage.value if isinstance(stage, TrainerFn) else stage\n\n self.log(\n f\"{_stage}/loss\",\n _loss,\n batch_size=batch_size,\n sync_dist=True,\n on_epoch=True,\n prog_bar=True,\n )\n\n # Log the metrics\n self.log_dict(\n _metrics,\n batch_size=batch_size,\n sync_dist=True,\n prog_bar=False,\n on_epoch=True,\n on_step=False,\n )\n\n return output\n\n def _get_inference_prediction(self, _input: torch.Tensor) -> dict[str, torch.Tensor]:\n output = {}\n output[\"prediction\"] = self._model(_input)\n return output\n\n def training_step(self, batch: dict[str, Any], batch_idx: int) -> dict[str, Any]:\n output = self.do_step(batch, batch_idx, stage=\"fit\")\n return output\n\n def validation_step(self, batch: dict[str, Any], batch_idx: int) -> dict[str, Any]:\n output = self.do_step(batch, batch_idx, stage=\"validate\")\n\n # This is a sanity check. We expect the filenames to be constant across the batch.\n filename = batch[\"path\"][0]\n if any([filename != f for f in batch[\"path\"]]):\n raise ValueError(\"Filenames are not constant across the batch.\")\n return output\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n if self._augmentations and \"predict\" in self._augmentations:\n batch = self._augmentations[\"predict\"](batch)\n\n _relevant_dict = {k: v for k, v in batch.items() if k in self.RELEVANT_KEYS}\n batch = {**batch, **self._get_inference_prediction(batch[\"image\"])}\n _prediction = batch[\"prediction\"]\n output = {\"prediction\": _prediction, **_relevant_dict}\n\n # This is a sanity check. We expect the filenames to be constant across the batch.\n filename = batch[\"path\"][0]\n if any([filename != f for f in batch[\"path\"]]):\n raise ValueError(\"Filenames are not constant across the batch.\")\n return output\n\n def configure_optimizers(self) -> Any:\n optimizer = self.hparams.optimizer(params=self.parameters()) # type: ignore\n if self.hparams.scheduler is not None: # type: ignore\n scheduler = self.hparams.scheduler(optimizer=optimizer) # type: ignore\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": scheduler,\n \"monitor\": \"validate/loss\",\n \"interval\": \"epoch\",\n \"frequency\": self.trainer.check_val_every_n_epoch,\n },\n }\n return {\"optimizer\": optimizer}" }, { "identifier": "WSIMetricFactory", "path": "ahcore/metrics/metrics.py", "snippet": "class WSIMetricFactory:\n # TODO: this should be rewritten to actually be a factory\n def __init__(self, metrics: list[WSIMetric]) -> None:\n super().__init__()\n names = [metric.name for metric in metrics]\n if len(set(names)) != len(names):\n raise RuntimeError(\"Each individual metric must have a different name.\")\n\n self._metrics = metrics\n\n @classmethod\n def for_segmentation(cls, *args: Any, **kwargs: Any) -> WSIMetricFactory:\n dices = WSIDiceMetric(*args, **kwargs)\n return cls([dices])\n\n @classmethod\n def for_wsi_classification(cls, *args: Any, **kwargs: Any) -> WSIMetricFactory:\n raise NotImplementedError\n\n @classmethod\n def for_tile_classification(cls, roi_name: str, label: str, threshold: float) -> WSIMetricFactory:\n raise NotImplementedError\n\n def process_batch(\n self,\n predictions: torch.Tensor,\n target: torch.Tensor,\n wsi_name: str,\n roi: torch.Tensor | None,\n ) -> None:\n for metric in self._metrics:\n metric.process_batch(predictions, target, wsi_name=wsi_name, roi=roi)\n\n def get_average_score(\n self, precomputed_output: list[list[dict[str, dict[str, float]]]] | None = None\n ) -> dict[str, float]:\n output = {}\n for metric in self._metrics:\n output.update(metric.get_average_score(precomputed_output))\n return output\n\n def reset(self) -> None:\n for metric in self._metrics:\n metric.reset()\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(metrics={self._metrics})\"" }, { "identifier": "H5FileImageReader", "path": "ahcore/readers.py", "snippet": "class H5FileImageReader:\n def __init__(self, filename: Path, stitching_mode: StitchingMode) -> None:\n self._filename = filename\n self._stitching_mode = stitching_mode\n\n self.__empty_tile: GenericArray | None = None\n\n self._h5file: Optional[h5py.File] = None\n self._metadata = None\n self._mpp = None\n self._tile_size = None\n self._tile_overlap = None\n self._size = None\n self._num_channels = None\n self._dtype = None\n self._stride = None\n\n @classmethod\n def from_file_path(cls, filename: Path, stitching_mode: StitchingMode = StitchingMode.CROP) -> \"H5FileImageReader\":\n return cls(filename=filename, stitching_mode=stitching_mode)\n\n @property\n def size(self) -> tuple[int, int]:\n if not self._size:\n self._open_file()\n assert self._size\n return self._size\n\n @property\n def mpp(self) -> float:\n if not self._mpp:\n self._open_file()\n assert self._mpp\n return self._mpp\n\n def get_mpp(self, scaling: Optional[float]) -> float:\n if not self._mpp:\n self._open_file()\n assert self._mpp\n if scaling is None:\n return self.mpp\n\n return self._mpp / scaling\n\n def get_scaling(self, mpp: Optional[float]) -> float:\n \"\"\"Inverse of get_mpp().\"\"\"\n if not self._mpp:\n self._open_file()\n assert self._mpp\n if not mpp:\n return 1.0\n return self._mpp / mpp\n\n def _open_file(self) -> None:\n if not self._filename.is_file():\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), str(self._filename))\n\n try:\n self._h5file = h5py.File(self._filename, \"r\")\n except OSError as e:\n logger.error(f\"Could not open file {self._filename}: {e}\")\n raise e\n\n try:\n self._metadata = json.loads(self._h5file.attrs[\"metadata\"])\n except KeyError as e:\n logger.error(f\"Could not read metadata from file {self._filename}: {e}\")\n raise e\n\n if not self._metadata:\n raise ValueError(\"Metadata of h5 file is empty.\")\n\n self._mpp = self._metadata[\"mpp\"]\n self._tile_size = self._metadata[\"tile_size\"]\n self._tile_overlap = self._metadata[\"tile_overlap\"]\n self._size = self._metadata[\"size\"]\n self._num_channels = self._metadata[\"num_channels\"]\n self._dtype = self._metadata[\"dtype\"]\n self._precision = self._metadata[\"precision\"]\n self._multiplier = self._metadata[\"multiplier\"]\n self._stride = (\n self._tile_size[0] - self._tile_overlap[0],\n self._tile_size[1] - self._tile_overlap[1],\n )\n\n if self._metadata[\"has_color_profile\"]:\n _color_profile = self._h5file[\"color_profile\"][()].tobytes()\n raise NotImplementedError(f\"Color profiles are not yet implemented, and are present in {self._filename}.\")\n\n def __enter__(self) -> \"H5FileImageReader\":\n if self._h5file is None:\n self._open_file()\n return self\n\n def _empty_tile(self) -> GenericArray:\n if self.__empty_tile is not None:\n return self.__empty_tile\n\n # When this happens we would already be in the read_region, and self._num_channels would be populated.\n assert self._num_channels\n\n self.__empty_tile = np.zeros((self._num_channels, *self._tile_size), dtype=self._dtype)\n return self.__empty_tile\n\n def read_region(\n self,\n location: tuple[int, int],\n scaling: float,\n size: tuple[int, int],\n ) -> GenericArray:\n \"\"\"\n\n Parameters\n ----------\n location : tuple[int, int]\n Location from the top left (x, y) in pixel coordinates given at the requested scaling.\n scaling : float\n size : tuple[int, int]\n Size of the output region\n\n Returns\n -------\n np.ndarray\n The requested region.\n \"\"\"\n if scaling == 1.0:\n return self.read_region_raw(location, size)\n\n order = 1\n # Calculate original location and size considering the scaling\n\n # unpack for mypy\n l1, l2 = location\n s1, s2 = size\n\n original_location = (\n int(math.floor(l1 / scaling)) - order,\n int(math.floor(l2 / scaling)) - order,\n )\n original_size = (\n int(math.ceil(s1 / scaling)) + order,\n int(math.ceil(s2 / scaling)) + order,\n )\n\n raw_region = self.read_region_raw(original_location, original_size)\n\n # Determine the fractional start and end coordinates for mapping\n fractional_start = tuple(map(lambda _, ol: (_ / scaling) - ol + order, location, original_location))\n fractional_end = tuple(fs + size[i] / scaling for i, fs in enumerate(fractional_start))\n\n # Create an array of coordinates for map_coordinates\n # mypy doesn't properly understand yet that the complex type is valid\n coordinates = np.mgrid[\n fractional_start[0] : fractional_end[0] : complex(size[0]), # type: ignore\n fractional_start[1] : fractional_end[1] : complex(size[1]), # type: ignore\n ]\n coordinates = np.moveaxis(coordinates, 0, -1)\n\n # Interpolate using map_coordinates for all channels\n grid = np.mgrid[: raw_region.shape[0]]\n coordinates = np.concatenate([grid[:, None, None], coordinates], axis=0)\n # scipy doesn't have proper typing yet\n rescaled_region = cast(GenericArray, map_coordinates(raw_region, coordinates, order=order))\n\n return rescaled_region\n\n def read_region_raw(self, location: tuple[int, int], size: tuple[int, int]) -> GenericArray:\n \"\"\"\n Reads a region in the stored h5 file. This function stitches the regions as saved in the h5 file. Doing this\n it takes into account:\n 1) The region overlap, several region merging strategies are implemented: cropping, averaging across borders\n and taking the maximum across borders.\n 2) If tiles are saved or not. In case the tiles are skipped due to a background mask, an empty tile is returned.\n\n Parameters\n ----------\n location : tuple[int, int]\n Coordinates (x, y) of the upper left corner of the region.\n size : tuple[int, int]\n The (h, w) size of the extracted region.\n\n Returns\n -------\n np.ndarray\n Extracted region\n \"\"\"\n if self._h5file is None:\n self._open_file()\n assert self._h5file, \"File is not open. Should not happen\"\n assert self._tile_size\n assert self._tile_overlap\n\n image_dataset = self._h5file[\"data\"]\n num_tiles = self._metadata[\"num_tiles\"]\n tile_indices = self._h5file[\"tile_indices\"]\n\n total_rows = math.ceil((self._size[1] - self._tile_overlap[1]) / self._stride[1])\n total_cols = math.ceil((self._size[0] - self._tile_overlap[0]) / self._stride[0])\n\n assert total_rows * total_cols == num_tiles\n\n x, y = location\n w, h = size\n if x < 0 or y < 0 or x + w > self._size[0] or y + h > self._size[1]:\n logger.error(f\"Requested region is out of bounds: {location}, {self._size}\")\n raise ValueError(\"Requested region is out of bounds\")\n\n start_row = y // self._stride[1]\n end_row = min((y + h - 1) // self._stride[1] + 1, total_rows)\n start_col = x // self._stride[0]\n end_col = min((x + w - 1) // self._stride[0] + 1, total_cols)\n\n if self._stitching_mode == StitchingMode.AVERAGE:\n divisor_array = np.zeros((h, w), dtype=np.uint8)\n stitched_image = np.zeros((self._num_channels, h, w), dtype=self._dtype)\n for i in range(start_row, end_row):\n for j in range(start_col, end_col):\n tile_idx = (i * total_cols) + j\n # Map through tile indices\n tile_index_in_image_dataset = tile_indices[tile_idx]\n tile = (\n self._empty_tile()\n if tile_index_in_image_dataset == -1\n else image_dataset[tile_index_in_image_dataset]\n )\n start_y = i * self._stride[1] - y\n end_y = start_y + self._tile_size[1]\n start_x = j * self._stride[0] - x\n end_x = start_x + self._tile_size[0]\n\n img_start_y = max(0, start_y)\n img_end_y = min(h, end_y)\n img_start_x = max(0, start_x)\n img_end_x = min(w, end_x)\n\n if self._stitching_mode == StitchingMode.CROP:\n crop_start_y = img_start_y - start_y\n crop_end_y = img_end_y - start_y\n crop_start_x = img_start_x - start_x\n crop_end_x = img_end_x - start_x\n\n bbox = (crop_start_x, crop_start_y), (\n crop_end_x - crop_start_x,\n crop_end_y - crop_start_y,\n )\n cropped_tile = crop_to_bbox(tile, bbox)\n stitched_image[:, img_start_y:img_end_y, img_start_x:img_end_x] = cropped_tile\n\n elif self._stitching_mode == StitchingMode.AVERAGE:\n raise NotImplementedError\n tile_start_y = max(0, -start_y)\n tile_end_y = img_end_y - img_start_y\n tile_start_x = max(0, -start_x)\n tile_end_x = img_end_x - img_start_x\n\n # TODO: Replace this with crop_to_bbox\n cropped_tile = tile[tile_start_y:tile_end_y, tile_start_x:tile_end_x]\n stitched_image[img_start_y:img_end_y, img_start_x:img_end_x] += cropped_tile\n divisor_array[img_start_y:img_end_y, img_start_x:img_end_x] += 1\n else:\n raise ValueError(\"Unsupported stitching mode\")\n\n if self._stitching_mode == StitchingMode.AVERAGE:\n stitched_image = (stitched_image / divisor_array[..., np.newaxis]).astype(float)\n\n if self._precision != str(InferencePrecision.FP32):\n # Always convert to float32.\n stitched_image = stitched_image / self._multiplier\n stitched_image = stitched_image.astype(np.float32)\n\n return stitched_image\n\n def close(self) -> None:\n if self._h5file is not None:\n self._h5file.close() # Close the file in close\n del self._h5file # Reset the h5file attribute\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> Literal[False]:\n self.close()\n return False" }, { "identifier": "StitchingMode", "path": "ahcore/readers.py", "snippet": "class StitchingMode(str, Enum):\n CROP = \"crop\"\n AVERAGE = \"average\"\n MAXIMUM = \"maximum\"" }, { "identifier": "_get_h5_output_filename", "path": "ahcore/utils/callbacks.py", "snippet": "def _get_h5_output_filename(dump_dir: Path, input_path: Path, model_name: str, step: None | int | str = None) -> Path:\n hex_dig = _get_uuid_for_filename(input_path=input_path)\n\n # Return the hashed filename with the new extension\n if step is not None:\n return dump_dir / \"outputs\" / model_name / f\"step_{step}\" / f\"{hex_dig}.h5\"\n return dump_dir / \"outputs\" / model_name / f\"{hex_dig}.h5\"" }, { "identifier": "_ValidationDataset", "path": "ahcore/utils/callbacks.py", "snippet": "class _ValidationDataset(Dataset[DlupDatasetSample]):\n \"\"\"Helper dataset to compute the validation metrics.\"\"\"\n\n def __init__(\n self,\n data_description: Optional[DataDescription],\n native_mpp: float,\n reader: H5FileImageReader,\n annotations: Optional[WsiAnnotations] = None,\n mask: Optional[WsiAnnotations] = None,\n region_size: tuple[int, int] = (1024, 1024),\n ):\n \"\"\"\n Parameters\n ----------\n data_description : DataDescription\n native_mpp : float\n The actual mpp of the underlying image.\n reader : H5FileImageReader\n annotations : WsiAnnotations\n mask : WsiAnnotations\n region_size : Tuple[int, int]\n The region size to use to split up the image into regions.\n \"\"\"\n super().__init__()\n self._data_description = data_description\n self._native_mpp = native_mpp\n self._scaling = self._native_mpp / reader.mpp\n self._reader = reader\n self._region_size = region_size\n self._logger = get_logger(type(self).__name__)\n\n self._annotations = self._validate_annotations(annotations)\n self._mask = self._validate_annotations(mask)\n\n self._grid = Grid.from_tiling(\n (0, 0),\n reader.size,\n tile_size=self._region_size,\n tile_overlap=(0, 0),\n mode=TilingMode.overflow,\n order=GridOrder.C,\n )\n\n self._regions = self._generate_regions()\n self._logger.debug(f\"Number of validation regions: {len(self._regions)}\")\n\n def _validate_annotations(self, annotations: Optional[WsiAnnotations]) -> Optional[WsiAnnotations]:\n if annotations is None:\n return None\n\n if isinstance(annotations, WsiAnnotations):\n if self._data_description is None:\n raise ValueError(\n \"Annotations as a `WsiAnnotations` class are provided but no data description is given.\"\n \"This is required to map the labels to indices.\"\n )\n elif isinstance(annotations, SlideImage):\n pass # We do not need a specific test for this\n else:\n raise NotImplementedError(f\"Annotations of type {type(annotations)} are not supported.\")\n\n return annotations\n\n def _generate_regions(self) -> list[tuple[int, int]]:\n \"\"\"Generate the regions to use. These regions are filtered grid cells where there is a mask.\n\n Returns\n -------\n List[Tuple[int, int]]\n The list of regions.\n \"\"\"\n regions = []\n for coordinates in self._grid:\n _coordinates = (coordinates[0], coordinates[1])\n if self._mask is None or self._is_masked(_coordinates):\n regions.append(_coordinates)\n return regions\n\n def _is_masked(self, coordinates: tuple[int, int]) -> bool:\n \"\"\"Check if the region is masked. This works with any masking function that supports a `read_region` method or\n returns a list of annotations with an `area` attribute. In case there are elements of the form `Point` in the\n annotation list, these are also added.\n\n Parameters\n ----------\n coordinates : Tuple[int, int]\n The coordinates of the region to check.\n\n Returns\n -------\n bool\n True if the region is masked, False otherwise. Will also return True when there is no mask.\n \"\"\"\n if self._mask is None:\n return True\n\n region_mask = self._mask.read_region(coordinates, self._scaling, self._region_size)\n\n if isinstance(region_mask, np.ndarray):\n return region_mask.sum() > 0\n\n # We check if the region is not a Point, otherwise this annotation is always included\n # Else, we compute if there is a positive area in the region.\n return bool(sum(_.area if _ is not isinstance(_, (Point, MultiPoint)) else 1.0 for _ in region_mask) > 0)\n\n def __getitem__(self, idx: int) -> dict[str, Any]:\n sample = {}\n coordinates = self._regions[idx]\n\n sample[\"prediction\"] = self._get_h5_region(coordinates)\n\n if self._annotations is not None:\n target, roi = self._get_annotation_data(coordinates)\n if roi is not None:\n sample[\"roi\"] = roi.astype(np.uint8)\n else:\n sample[\"roi\"] = None # type: ignore\n sample[\"target\"] = target\n\n return sample\n\n def _get_h5_region(self, coordinates: tuple[int, int]) -> npt.NDArray[np.uint8 | np.uint16 | np.float32 | np.bool_]:\n x, y = coordinates\n width, height = self._region_size\n\n if x + width > self._reader.size[0] or y + height > self._reader.size[1]:\n region = self._read_and_pad_region(coordinates)\n else:\n region = self._reader.read_region_raw(coordinates, self._region_size)\n return region\n\n def _read_and_pad_region(self, coordinates: tuple[int, int]) -> npt.NDArray[Any]:\n x, y = coordinates\n width, height = self._region_size\n new_width = min(width, self._reader.size[0] - x)\n new_height = min(height, self._reader.size[1] - y)\n clipped_region = self._reader.read_region_raw((x, y), (new_width, new_height))\n\n prediction = np.zeros((clipped_region.shape[0], *self._region_size), dtype=clipped_region.dtype)\n prediction[:, :new_height, :new_width] = clipped_region\n return prediction\n\n def _get_annotation_data(\n self, coordinates: tuple[int, int]\n ) -> tuple[npt.NDArray[np.float32], npt.NDArray[np.int_] | None]:\n if not self._annotations:\n raise ValueError(\"No annotations are provided.\")\n\n if not self._data_description:\n raise ValueError(\"No data description is provided.\")\n\n if not self._data_description.index_map:\n raise ValueError(\"Index map is not provided.\")\n\n _annotations = self._annotations.read_region(coordinates, self._scaling, self._region_size)\n\n if self._data_description.remap_labels:\n _annotations = rename_labels(_annotations, remap_labels=self._data_description.remap_labels)\n\n points, boxes, region, roi = convert_annotations(\n _annotations,\n self._region_size,\n index_map=self._data_description.index_map,\n roi_name=self._data_description.roi_name,\n )\n encoded_region = one_hot_encoding(index_map=self._data_description.index_map, mask=region)\n if roi is not None:\n return encoded_region, roi[np.newaxis, ...]\n return encoded_region, None\n\n def __iter__(self) -> Iterator[dict[str, Any]]:\n for idx in range(len(self)):\n yield self[idx]\n\n def __len__(self) -> int:\n return len(self._regions)" }, { "identifier": "DataDescription", "path": "ahcore/utils/data.py", "snippet": "class DataDescription(BaseModel):\n mask_label: Optional[str] = None\n mask_threshold: Optional[float] = None # This is only used for training\n roi_name: Optional[str] = None\n num_classes: PositiveInt\n data_dir: Path\n manifest_database_uri: str\n manifest_name: str\n split_version: str\n annotations_dir: Path\n training_grid: GridDescription\n inference_grid: GridDescription\n index_map: Optional[Dict[str, int]]\n remap_labels: Optional[Dict[str, str]] = None\n use_class_weights: Optional[bool] = False\n convert_mask_to_rois: bool = True\n use_roi: bool = True\n apply_color_profile: bool = True" }, { "identifier": "get_logger", "path": "ahcore/utils/io.py", "snippet": "def get_logger(name: str = __name__) -> logging.Logger:\n \"\"\"Initializes multi-GPU-friendly python command line logger.\"\"\"\n\n logger = logging.getLogger(name)\n\n # this ensures all logging levels get marked with the rank zero decorator\n # otherwise logs would get multiplied for each GPU process in multi-GPU setup\n for level in (\n \"debug\",\n \"info\",\n \"warning\",\n \"error\",\n \"exception\",\n \"fatal\",\n \"critical\",\n ):\n setattr(logger, level, rank_zero_only(getattr(logger, level)))\n\n return logger" }, { "identifier": "DataManager", "path": "ahcore/utils/manifest.py", "snippet": "class DataManager:\n def __init__(self, database_uri: str) -> None:\n self._database_uri = database_uri\n self.__session: Optional[Session] = None\n self._logger = get_logger(type(self).__name__)\n\n @property\n def _session(self) -> Session:\n if self.__session is None:\n self.__session = open_db(self._database_uri)\n return self.__session\n\n @staticmethod\n def _ensure_record(record: Any, description: str) -> None:\n \"\"\"Raises an error if the record is None.\"\"\"\n if not record:\n raise RecordNotFoundError(f\"{description} not found.\")\n\n def get_records_by_split(\n self,\n manifest_name: str,\n split_version: str,\n split_category: Optional[str] = None,\n ) -> Generator[Patient, None, None]:\n manifest = self._session.query(Manifest).filter_by(name=manifest_name).first()\n self._ensure_record(manifest, f\"Manifest with name {manifest_name}\")\n\n split_definition = self._session.query(SplitDefinitions).filter_by(version=split_version).first()\n self._ensure_record(split_definition, f\"Split definition with version {split_version}\")\n\n # This is because mypy is complaining otherwise,\n # but _ensure_record effectively ensures that the record is not None\n assert manifest is not None\n assert split_definition is not None\n query = (\n self._session.query(Patient)\n .join(Split)\n .filter(\n Patient.manifest_id == manifest.id,\n Split.split_definition_id == split_definition.id,\n )\n )\n\n if split_category is not None:\n split_category_key = get_enum_key_from_value(split_category, CategoryEnum)\n query = query.filter(Split.category == split_category_key)\n\n patients = query.all()\n\n self._logger.info(\n f\"Found {len(patients)} patients for split {split_category if split_category else 'all categories'}\"\n )\n for patient in patients:\n yield patient\n\n def get_image_metadata_by_split(\n self,\n manifest_name: str,\n split_version: str,\n split_category: Optional[str] = None,\n ) -> Generator[ImageMetadata, None, None]:\n \"\"\"\n Yields the metadata of images for a given manifest name, split version, and optional split category.\n\n Parameters\n ----------\n manifest_name : str\n The name of the manifest.\n split_version : str\n The version of the split.\n split_category : Optional[str], default=None\n The category of the split (e.g., \"fit\", \"validate\", \"test\").\n\n Yields\n -------\n ImageMetadata\n The metadata of the image.\n \"\"\"\n for patient in self.get_records_by_split(manifest_name, split_version, split_category):\n for image in patient.images:\n yield fetch_image_metadata(image)\n\n def get_image_metadata_by_patient(self, patient_code: str) -> list[ImageMetadata]:\n \"\"\"\n Fetch the metadata for the images associated with a specific patient.\n\n Parameters\n ----------\n patient_code : str\n The unique code of the patient.\n\n Returns\n -------\n list[ImageData]\n A list of metadata for all images associated with the patient.\n \"\"\"\n patient = self._session.query(Patient).filter_by(patient_code=patient_code).first()\n self._ensure_record(patient, f\"Patient with code {patient_code} not found\")\n assert patient is not None # for mypy\n return [fetch_image_metadata(image) for image in patient.images]\n\n def get_image_by_filename(self, filename: str) -> Image:\n \"\"\"\n Fetch the metadata for an image based on its filename.\n\n Parameters\n ----------\n filename : str\n The filename of the image.\n\n Returns\n -------\n Image\n The image from the database.\n \"\"\"\n image = self._session.query(Image).filter_by(filename=filename).first()\n self._ensure_record(image, f\"Image with filename {filename} not found\")\n assert image\n return image\n\n def get_image_metadata_by_id(self, image_id: int) -> ImageMetadata:\n \"\"\"\n Fetch the metadata for an image based on its ID.\n\n Parameters\n ----------\n image_id : int\n The ID of the image.\n\n Returns\n -------\n ImageMetadata\n Metadata of the image.\n \"\"\"\n image = self._session.query(Image).filter_by(id=image_id).first()\n self._ensure_record(image, f\"No image found with ID {image_id}\")\n assert image is not None # mypy\n return fetch_image_metadata(image)\n\n def __enter__(self) -> \"DataManager\":\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> Literal[False]:\n if self._session is not None:\n self.close()\n return False\n\n def close(self) -> None:\n if self.__session is not None:\n self.__session.close()\n self.__session = None" }, { "identifier": "ImageMetadata", "path": "ahcore/utils/manifest.py", "snippet": "class ImageMetadata(BaseModel):\n \"\"\"Model to hold image metadata\"\"\"\n\n class Config:\n frozen = True\n\n filename: Path\n height: PositiveInt\n width: PositiveInt\n mpp: PositiveFloat" }, { "identifier": "fetch_image_metadata", "path": "ahcore/utils/manifest.py", "snippet": "def fetch_image_metadata(image: Image) -> ImageMetadata:\n \"\"\"Extract metadata from an Image object.\"\"\"\n return ImageMetadata(\n filename=Path(image.filename),\n height=int(image.height),\n width=int(image.width),\n mpp=float(image.mpp),\n )" }, { "identifier": "get_mask_and_annotations_from_record", "path": "ahcore/utils/manifest.py", "snippet": "def get_mask_and_annotations_from_record(\n annotations_root: Path, record: Image\n) -> tuple[_AnnotationReturnTypes | None, _AnnotationReturnTypes | None]:\n \"\"\"\n Get the mask and annotations from a record of type Image.\n\n Parameters\n ----------\n annotations_root : Path\n The root directory of the annotations.\n record : Type[Image]\n The record containing the mask and annotations.\n\n Returns\n -------\n tuple[WsiAnnotations, WsiAnnotations]\n The mask and annotations.\n \"\"\"\n _masks = parse_annotations_from_record(annotations_root, record.masks)\n _annotations = parse_annotations_from_record(annotations_root, record.annotations)\n return _masks, _annotations" } ]
import itertools import json import multiprocessing import time import pytorch_lightning as pl import torch from collections import namedtuple from multiprocessing.pool import Pool from pathlib import Path from typing import Any, Generator, Optional, cast from pytorch_lightning import Callback from ahcore.callbacks import WriteH5Callback from ahcore.lit_module import AhCoreLightningModule from ahcore.metrics import WSIMetricFactory from ahcore.readers import H5FileImageReader, StitchingMode from ahcore.utils.callbacks import _get_h5_output_filename, _ValidationDataset from ahcore.utils.data import DataDescription from ahcore.utils.io import get_logger from ahcore.utils.manifest import DataManager, ImageMetadata, fetch_image_metadata, get_mask_and_annotations_from_record
13,259
# Check for completed tasks for result in list(results_to_filename.keys()): if result.ready(): filename = results_to_filename.pop(result) try: metric = result.get() except Exception as exc: self._logger.error("%r generated an exception: %s" % (filename, exc)) else: metrics.append(metric) self._logger.debug("Metric for %r is %s" % (filename, metric)) completed_tasks += 1 # Schedule a new task if there are more filenames left in the generator next_metadata = next(self._validate_metadata, None) while next_metadata: task_data = prepare_task_data( next_metadata.filename, # <-- Changed from image_metadata.filename self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) next_metadata = next(self._validate_metadata, None) return metrics def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") if not self._wsi_metrics: raise ValueError("WSI metrics are not set.") assert self._model_name # This should be set in the setup() # Ensure that all h5 files have been written self._logger.debug("Computing metrics for %s predictions", len(self._filenames)) computed_metrics = self.compute_metrics(trainer, pl_module) metrics = self._wsi_metrics.get_average_score(computed_metrics) results_json_fn = ( self._dump_dir / "outputs" / self._model_name / f"step_{pl_module.global_step}" / "results.json" ) with open(results_json_fn, "w", encoding="utf-8") as json_file: json.dump(self._dump_list, json_file, indent=2) self._wsi_metrics.reset() # Reset stuff self._dump_list = [] self._filenames = {} self._logger.debug("Metrics: %s", metrics) # TODO: Maybe put this elsewhere? metrics = {f"validate/{k}": v for k, v in metrics.items()} pl_module.log_dict(metrics, prog_bar=True) TaskData = namedtuple("TaskData", ["filename", "h5_filename", "metadata", "mask", "annotations"]) def prepare_task_data( filename: Path, dump_dir: Path, pl_module: pl.LightningModule, data_description: DataDescription, data_manager: DataManager, ) -> TaskData: h5_filename = _get_h5_output_filename( dump_dir=dump_dir, input_path=data_description.data_dir / filename, model_name=str(pl_module.name), step=pl_module.global_step, ) image = data_manager.get_image_by_filename(str(filename)) metadata = fetch_image_metadata(image) mask, annotations = get_mask_and_annotations_from_record(data_description.annotations_dir, image) return TaskData(filename, h5_filename, metadata, mask, annotations) def schedule_task( task_data: TaskData, pool: Pool, results_dict: dict[Any, str], # Any because it will be a multiprocessing.pool.AsyncResult class_names: dict[int, str], data_description: DataDescription, wsi_metrics: WSIMetricFactory, save_per_image: bool, ) -> None: result = pool.apply_async( compute_metrics_for_case, args=(task_data, class_names, data_description, wsi_metrics, save_per_image), ) results_dict[result] = task_data.filename def compute_metrics_for_case( task_data: TaskData, class_names: dict[int, str], data_description: DataDescription, wsi_metrics: WSIMetricFactory, save_per_image: bool, ) -> list[dict[str, Any]]: # Extract the data from the namedtuple filename, h5_filename, metadata, mask, annotations = task_data dump_list = [] logger.info("Computing metrics for %s", filename)
from __future__ import annotations logger = get_logger(__name__) class ComputeWsiMetricsCallback(Callback): def __init__(self, max_processes: int = 10, save_per_image: bool = True) -> None: """ Callback to compute metrics on whole-slide images. This callback is used to compute metrics on whole-slide images in separate processes. Parameters ---------- max_processes : int The maximum number of concurrent processes. """ self._data_description: Optional[DataDescription] = None self._reader = H5FileImageReader self._max_processes: int = max_processes self._dump_dir: Optional[Path] = None self._save_per_image = save_per_image self._filenames: dict[Path, Path] = {} self._wsi_metrics: WSIMetricFactory | None = None self._class_names: dict[int, str] = {} self._data_manager = None self._validate_filenames_gen = None self._model_name: str | None = None self._validate_metadata_gen: Generator[ImageMetadata, None, None] | None = None self._dump_list: list[dict[str, str]] = [] self._logger = get_logger(type(self).__name__) def setup( self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None, ) -> None: if not isinstance(pl_module, AhCoreLightningModule): # TODO: Make a AhCoreCallback with these features raise ValueError("AhCoreLightningModule required for WriteTiffCallback.") self._model_name = pl_module.name _callback: Optional[WriteH5Callback] = None for idx, callback in enumerate(trainer.callbacks): # type: ignore if isinstance(callback, WriteH5Callback): _callback = cast(WriteH5Callback, trainer.callbacks[idx]) # type: ignore break if _callback is None: raise ValueError( "WriteH5Callback is not in the trainer's callbacks. " "This is required before WSI metrics can be computed using this Callback" ) self._dump_dir = _callback.dump_dir if pl_module.wsi_metrics is None: raise ValueError("WSI metrics are not set.") self._wsi_metrics = pl_module.wsi_metrics self._data_description = trainer.datamodule.data_description # type: ignore # For mypy assert self._data_description index_map = self._data_description.index_map assert index_map if not self._data_description: raise ValueError("Data description is not set.") self._class_names = dict([(v, k) for k, v in index_map.items()]) self._class_names[0] = "background" # Here we can query the database for the validation images self._data_manager: DataManager = trainer.datamodule.data_manager # type: ignore def _create_validate_image_metadata_gen( self, ) -> Generator[ImageMetadata, None, None]: assert self._data_description assert self._data_manager gen = self._data_manager.get_image_metadata_by_split( manifest_name=self._data_description.manifest_name, split_version=self._data_description.split_version, split_category="validate", ) for image_metadata in gen: yield image_metadata @property def _validate_metadata(self) -> Generator[ImageMetadata, None, None] | None: return self._validate_metadata_gen def on_validation_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: self._validate_metadata_gen = self._create_validate_image_metadata_gen() def on_validation_batch_end( self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int = 0, ) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") filenames = batch["path"] # Filenames are constant across the batch. if len(set(filenames)) != 1: raise ValueError( "All paths in a batch must be the same. " "Either use batch_size=1 or ahcore.data.samplers.WsiBatchSampler." ) def compute_metrics( self, trainer: pl.Trainer, pl_module: pl.LightningModule ) -> list[list[dict[str, dict[str, float]]]]: assert self._dump_dir assert self._data_description assert self._validate_metadata assert self._data_manager metrics = [] with multiprocessing.Pool(processes=self._max_processes) as pool: results_to_filename: dict[list[dict[str, Any]], str] = {} completed_tasks = 0 # Fill up the initial task pool for image_metadata in itertools.islice(self._validate_metadata, self._max_processes): logger.info("Metadata: %s", image_metadata) # Assemble the task data # filename", "h5_filename", "metadata", "mask", "annotations" task_data = prepare_task_data( image_metadata.filename, self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) while results_to_filename: time.sleep(0.1) # Reduce excessive polling # Check for completed tasks for result in list(results_to_filename.keys()): if result.ready(): filename = results_to_filename.pop(result) try: metric = result.get() except Exception as exc: self._logger.error("%r generated an exception: %s" % (filename, exc)) else: metrics.append(metric) self._logger.debug("Metric for %r is %s" % (filename, metric)) completed_tasks += 1 # Schedule a new task if there are more filenames left in the generator next_metadata = next(self._validate_metadata, None) while next_metadata: task_data = prepare_task_data( next_metadata.filename, # <-- Changed from image_metadata.filename self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) next_metadata = next(self._validate_metadata, None) return metrics def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") if not self._wsi_metrics: raise ValueError("WSI metrics are not set.") assert self._model_name # This should be set in the setup() # Ensure that all h5 files have been written self._logger.debug("Computing metrics for %s predictions", len(self._filenames)) computed_metrics = self.compute_metrics(trainer, pl_module) metrics = self._wsi_metrics.get_average_score(computed_metrics) results_json_fn = ( self._dump_dir / "outputs" / self._model_name / f"step_{pl_module.global_step}" / "results.json" ) with open(results_json_fn, "w", encoding="utf-8") as json_file: json.dump(self._dump_list, json_file, indent=2) self._wsi_metrics.reset() # Reset stuff self._dump_list = [] self._filenames = {} self._logger.debug("Metrics: %s", metrics) # TODO: Maybe put this elsewhere? metrics = {f"validate/{k}": v for k, v in metrics.items()} pl_module.log_dict(metrics, prog_bar=True) TaskData = namedtuple("TaskData", ["filename", "h5_filename", "metadata", "mask", "annotations"]) def prepare_task_data( filename: Path, dump_dir: Path, pl_module: pl.LightningModule, data_description: DataDescription, data_manager: DataManager, ) -> TaskData: h5_filename = _get_h5_output_filename( dump_dir=dump_dir, input_path=data_description.data_dir / filename, model_name=str(pl_module.name), step=pl_module.global_step, ) image = data_manager.get_image_by_filename(str(filename)) metadata = fetch_image_metadata(image) mask, annotations = get_mask_and_annotations_from_record(data_description.annotations_dir, image) return TaskData(filename, h5_filename, metadata, mask, annotations) def schedule_task( task_data: TaskData, pool: Pool, results_dict: dict[Any, str], # Any because it will be a multiprocessing.pool.AsyncResult class_names: dict[int, str], data_description: DataDescription, wsi_metrics: WSIMetricFactory, save_per_image: bool, ) -> None: result = pool.apply_async( compute_metrics_for_case, args=(task_data, class_names, data_description, wsi_metrics, save_per_image), ) results_dict[result] = task_data.filename def compute_metrics_for_case( task_data: TaskData, class_names: dict[int, str], data_description: DataDescription, wsi_metrics: WSIMetricFactory, save_per_image: bool, ) -> list[dict[str, Any]]: # Extract the data from the namedtuple filename, h5_filename, metadata, mask, annotations = task_data dump_list = [] logger.info("Computing metrics for %s", filename)
with H5FileImageReader(h5_filename, stitching_mode=StitchingMode.CROP) as h5reader:
4
2023-10-14 18:04:12+00:00
16k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py
[ { "identifier": "BaseEstimator", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class BaseEstimator(_MetadataRequester):\n \"\"\"Base class for all estimators in scikit-learn.\n\n Notes\n -----\n All estimators should specify all the parameters that can be set\n at the class level in their ``__init__`` as explicit keyword\n arguments (no ``*args`` or ``**kwargs``).\n \"\"\"\n\n @classmethod\n def _get_param_names(cls):\n \"\"\"Get parameter names for the estimator\"\"\"\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, \"deprecated_original\", cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = inspect.signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [\n p\n for p in init_signature.parameters.values()\n if p.name != \"self\" and p.kind != p.VAR_KEYWORD\n ]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\n \"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\" % (cls, init_signature)\n )\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])\n\n def get_params(self, deep=True):\n \"\"\"\n Get parameters for this estimator.\n\n Parameters\n ----------\n deep : bool, default=True\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : dict\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_param_names():\n value = getattr(self, key)\n if deep and hasattr(value, \"get_params\") and not isinstance(value, type):\n deep_items = value.get_params().items()\n out.update((key + \"__\" + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n def set_params(self, **params):\n \"\"\"Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as :class:`~sklearn.pipeline.Pipeline`). The latter have\n parameters of the form ``<component>__<parameter>`` so that it's\n possible to update each component of a nested object.\n\n Parameters\n ----------\n **params : dict\n Estimator parameters.\n\n Returns\n -------\n self : estimator instance\n Estimator instance.\n \"\"\"\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition(\"__\")\n if key not in valid_params:\n local_valid_params = self._get_param_names()\n raise ValueError(\n f\"Invalid parameter {key!r} for estimator {self}. \"\n f\"Valid parameters are: {local_valid_params!r}.\"\n )\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n # TODO(1.4): remove specific handling of \"base_estimator\".\n # The \"base_estimator\" key is special. It was deprecated and\n # renamed to \"estimator\" for several estimators. This means we\n # need to translate it here and set sub-parameters on \"estimator\",\n # but only if the user did not explicitly set a value for\n # \"base_estimator\".\n if (\n key == \"base_estimator\"\n and valid_params[key] == \"deprecated\"\n and self.__module__.startswith(\"sklearn.\")\n ):\n warnings.warn(\n (\n f\"Parameter 'base_estimator' of {self.__class__.__name__} is\"\n \" deprecated in favor of 'estimator'. See\"\n f\" {self.__class__.__name__}'s docstring for more details.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n key = \"estimator\"\n valid_params[key].set_params(**sub_params)\n\n return self\n\n def __sklearn_clone__(self):\n return _clone_parametrized(self)\n\n def __repr__(self, N_CHAR_MAX=700):\n # N_CHAR_MAX is the (approximate) maximum number of non-blank\n # characters to render. We pass it as an optional parameter to ease\n # the tests.\n\n from .utils._pprint import _EstimatorPrettyPrinter\n\n N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences\n\n # use ellipsis for sequences with a lot of elements\n pp = _EstimatorPrettyPrinter(\n compact=True,\n indent=1,\n indent_at_name=True,\n n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,\n )\n\n repr_ = pp.pformat(self)\n\n # Use bruteforce ellipsis when there are a lot of non-blank characters\n n_nonblank = len(\"\".join(repr_.split()))\n if n_nonblank > N_CHAR_MAX:\n lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends\n regex = r\"^(\\s*\\S){%d}\" % lim\n # The regex '^(\\s*\\S){%d}' % n\n # matches from the start of the string until the nth non-blank\n # character:\n # - ^ matches the start of string\n # - (pattern){n} matches n repetitions of pattern\n # - \\s*\\S matches a non-blank char following zero or more blanks\n left_lim = re.match(regex, repr_).end()\n right_lim = re.match(regex, repr_[::-1]).end()\n\n if \"\\n\" in repr_[left_lim:-right_lim]:\n # The left side and right side aren't on the same line.\n # To avoid weird cuts, e.g.:\n # categoric...ore',\n # we need to start the right side with an appropriate newline\n # character so that it renders properly as:\n # categoric...\n # handle_unknown='ignore',\n # so we add [^\\n]*\\n which matches until the next \\n\n regex += r\"[^\\n]*\\n\"\n right_lim = re.match(regex, repr_[::-1]).end()\n\n ellipsis = \"...\"\n if left_lim + len(ellipsis) < len(repr_) - right_lim:\n # Only add ellipsis if it results in a shorter repr\n repr_ = repr_[:left_lim] + \"...\" + repr_[-right_lim:]\n\n return repr_\n\n def __getstate__(self):\n if getattr(self, \"__slots__\", None):\n raise TypeError(\n \"You cannot use `__slots__` in objects inheriting from \"\n \"`sklearn.base.BaseEstimator`.\"\n )\n\n try:\n state = super().__getstate__()\n if state is None:\n # For Python 3.11+, empty instance (no `__slots__`,\n # and `__dict__`) will return a state equal to `None`.\n state = self.__dict__.copy()\n except AttributeError:\n # Python < 3.11\n state = self.__dict__.copy()\n\n if type(self).__module__.startswith(\"sklearn.\"):\n return dict(state.items(), _sklearn_version=__version__)\n else:\n return state\n\n def __setstate__(self, state):\n if type(self).__module__.startswith(\"sklearn.\"):\n pickle_version = state.pop(\"_sklearn_version\", \"pre-0.18\")\n if pickle_version != __version__:\n warnings.warn(\n InconsistentVersionWarning(\n estimator_name=self.__class__.__name__,\n current_sklearn_version=__version__,\n original_sklearn_version=pickle_version,\n ),\n )\n try:\n super().__setstate__(state)\n except AttributeError:\n self.__dict__.update(state)\n\n def _more_tags(self):\n return _DEFAULT_TAGS\n\n def _get_tags(self):\n collected_tags = {}\n for base_class in reversed(inspect.getmro(self.__class__)):\n if hasattr(base_class, \"_more_tags\"):\n # need the if because mixins might not have _more_tags\n # but might do redundant work in estimators\n # (i.e. calling more tags on BaseEstimator multiple times)\n more_tags = base_class._more_tags(self)\n collected_tags.update(more_tags)\n return collected_tags\n\n def _check_n_features(self, X, reset):\n \"\"\"Set the `n_features_in_` attribute, or check against it.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n reset : bool\n If True, the `n_features_in_` attribute is set to `X.shape[1]`.\n If False and the attribute exists, then check that it is equal to\n `X.shape[1]`. If False and the attribute does *not* exist, then\n the check is skipped.\n .. note::\n It is recommended to call reset=True in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n \"\"\"\n try:\n n_features = _num_features(X)\n except TypeError as e:\n if not reset and hasattr(self, \"n_features_in_\"):\n raise ValueError(\n \"X does not contain any features, but \"\n f\"{self.__class__.__name__} is expecting \"\n f\"{self.n_features_in_} features\"\n ) from e\n # If the number of features is not defined and reset=True,\n # then we skip this check\n return\n\n if reset:\n self.n_features_in_ = n_features\n return\n\n if not hasattr(self, \"n_features_in_\"):\n # Skip this check if the expected number of expected input features\n # was not recorded by calling fit first. This is typically the case\n # for stateless transformers.\n return\n\n if n_features != self.n_features_in_:\n raise ValueError(\n f\"X has {n_features} features, but {self.__class__.__name__} \"\n f\"is expecting {self.n_features_in_} features as input.\"\n )\n\n def _check_feature_names(self, X, *, reset):\n \"\"\"Set or check the `feature_names_in_` attribute.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n X : {ndarray, dataframe} of shape (n_samples, n_features)\n The input samples.\n\n reset : bool\n Whether to reset the `feature_names_in_` attribute.\n If False, the input will be checked for consistency with\n feature names of data provided when reset was last True.\n .. note::\n It is recommended to call `reset=True` in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n \"\"\"\n\n if reset:\n feature_names_in = _get_feature_names(X)\n if feature_names_in is not None:\n self.feature_names_in_ = feature_names_in\n elif hasattr(self, \"feature_names_in_\"):\n # Delete the attribute when the estimator is fitted on a new dataset\n # that has no feature names.\n delattr(self, \"feature_names_in_\")\n return\n\n fitted_feature_names = getattr(self, \"feature_names_in_\", None)\n X_feature_names = _get_feature_names(X)\n\n if fitted_feature_names is None and X_feature_names is None:\n # no feature names seen in fit and in X\n return\n\n if X_feature_names is not None and fitted_feature_names is None:\n warnings.warn(\n f\"X has feature names, but {self.__class__.__name__} was fitted without\"\n \" feature names\"\n )\n return\n\n if X_feature_names is None and fitted_feature_names is not None:\n warnings.warn(\n \"X does not have valid feature names, but\"\n f\" {self.__class__.__name__} was fitted with feature names\"\n )\n return\n\n # validate the feature names against the `feature_names_in_` attribute\n if len(fitted_feature_names) != len(X_feature_names) or np.any(\n fitted_feature_names != X_feature_names\n ):\n message = (\n \"The feature names should match those that were passed during fit.\\n\"\n )\n fitted_feature_names_set = set(fitted_feature_names)\n X_feature_names_set = set(X_feature_names)\n\n unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)\n missing_names = sorted(fitted_feature_names_set - X_feature_names_set)\n\n def add_names(names):\n output = \"\"\n max_n_names = 5\n for i, name in enumerate(names):\n if i >= max_n_names:\n output += \"- ...\\n\"\n break\n output += f\"- {name}\\n\"\n return output\n\n if unexpected_names:\n message += \"Feature names unseen at fit time:\\n\"\n message += add_names(unexpected_names)\n\n if missing_names:\n message += \"Feature names seen at fit time, yet now missing:\\n\"\n message += add_names(missing_names)\n\n if not missing_names and not unexpected_names:\n message += (\n \"Feature names must be in the same order as they were in fit.\\n\"\n )\n\n raise ValueError(message)\n\n def _validate_data(\n self,\n X=\"no_validation\",\n y=\"no_validation\",\n reset=True,\n validate_separately=False,\n cast_to_ndarray=True,\n **check_params,\n ):\n \"\"\"Validate input data and set or check the `n_features_in_` attribute.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape \\\n (n_samples, n_features), default='no validation'\n The input samples.\n If `'no_validation'`, no validation is performed on `X`. This is\n useful for meta-estimator which can delegate input validation to\n their underlying estimator(s). In that case `y` must be passed and\n the only accepted `check_params` are `multi_output` and\n `y_numeric`.\n\n y : array-like of shape (n_samples,), default='no_validation'\n The targets.\n\n - If `None`, `check_array` is called on `X`. If the estimator's\n requires_y tag is True, then an error will be raised.\n - If `'no_validation'`, `check_array` is called on `X` and the\n estimator's requires_y tag is ignored. This is a default\n placeholder and is never meant to be explicitly set. In that case\n `X` must be passed.\n - Otherwise, only `y` with `_check_y` or both `X` and `y` are\n checked with either `check_array` or `check_X_y` depending on\n `validate_separately`.\n\n reset : bool, default=True\n Whether to reset the `n_features_in_` attribute.\n If False, the input will be checked for consistency with data\n provided when reset was last True.\n .. note::\n It is recommended to call reset=True in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n\n validate_separately : False or tuple of dicts, default=False\n Only used if y is not None.\n If False, call validate_X_y(). Else, it must be a tuple of kwargs\n to be used for calling check_array() on X and y respectively.\n\n `estimator=self` is automatically added to these dicts to generate\n more informative error message in case of invalid input data.\n\n cast_to_ndarray : bool, default=True\n Cast `X` and `y` to ndarray with checks in `check_params`. If\n `False`, `X` and `y` are unchanged and only `feature_names_in_` and\n `n_features_in_` are checked.\n\n **check_params : kwargs\n Parameters passed to :func:`sklearn.utils.check_array` or\n :func:`sklearn.utils.check_X_y`. Ignored if validate_separately\n is not False.\n\n `estimator=self` is automatically added to these params to generate\n more informative error message in case of invalid input data.\n\n Returns\n -------\n out : {ndarray, sparse matrix} or tuple of these\n The validated input. A tuple is returned if both `X` and `y` are\n validated.\n \"\"\"\n self._check_feature_names(X, reset=reset)\n\n if y is None and self._get_tags()[\"requires_y\"]:\n raise ValueError(\n f\"This {self.__class__.__name__} estimator \"\n \"requires y to be passed, but the target y is None.\"\n )\n\n no_val_X = isinstance(X, str) and X == \"no_validation\"\n no_val_y = y is None or isinstance(y, str) and y == \"no_validation\"\n\n if no_val_X and no_val_y:\n raise ValueError(\"Validation should be done on X, y or both.\")\n\n default_check_params = {\"estimator\": self}\n check_params = {**default_check_params, **check_params}\n\n if not cast_to_ndarray:\n if not no_val_X and no_val_y:\n out = X\n elif no_val_X and not no_val_y:\n out = y\n else:\n out = X, y\n elif not no_val_X and no_val_y:\n out = check_array(X, input_name=\"X\", **check_params)\n elif no_val_X and not no_val_y:\n out = _check_y(y, **check_params)\n else:\n if validate_separately:\n # We need this because some estimators validate X and y\n # separately, and in general, separately calling check_array()\n # on X and y isn't equivalent to just calling check_X_y()\n # :(\n check_X_params, check_y_params = validate_separately\n if \"estimator\" not in check_X_params:\n check_X_params = {**default_check_params, **check_X_params}\n X = check_array(X, input_name=\"X\", **check_X_params)\n if \"estimator\" not in check_y_params:\n check_y_params = {**default_check_params, **check_y_params}\n y = check_array(y, input_name=\"y\", **check_y_params)\n else:\n X, y = check_X_y(X, y, **check_params)\n out = X, y\n\n if not no_val_X and check_params.get(\"ensure_2d\", True):\n self._check_n_features(X, reset=reset)\n\n return out\n\n def _validate_params(self):\n \"\"\"Validate types and values of constructor parameters\n\n The expected type and values must be defined in the `_parameter_constraints`\n class attribute, which is a dictionary `param_name: list of constraints`. See\n the docstring of `validate_parameter_constraints` for a description of the\n accepted constraints.\n \"\"\"\n validate_parameter_constraints(\n self._parameter_constraints,\n self.get_params(deep=False),\n caller_name=self.__class__.__name__,\n )\n\n @property\n def _repr_html_(self):\n \"\"\"HTML representation of estimator.\n\n This is redundant with the logic of `_repr_mimebundle_`. The latter\n should be favorted in the long term, `_repr_html_` is only\n implemented for consumers who do not interpret `_repr_mimbundle_`.\n \"\"\"\n if get_config()[\"display\"] != \"diagram\":\n raise AttributeError(\n \"_repr_html_ is only defined when the \"\n \"'display' configuration option is set to \"\n \"'diagram'\"\n )\n return self._repr_html_inner\n\n def _repr_html_inner(self):\n \"\"\"This function is returned by the @property `_repr_html_` to make\n `hasattr(estimator, \"_repr_html_\") return `True` or `False` depending\n on `get_config()[\"display\"]`.\n \"\"\"\n return estimator_html_repr(self)\n\n def _repr_mimebundle_(self, **kwargs):\n \"\"\"Mime bundle used by jupyter kernels to display estimator\"\"\"\n output = {\"text/plain\": repr(self)}\n if get_config()[\"display\"] == \"diagram\":\n output[\"text/html\"] = estimator_html_repr(self)\n return output" }, { "identifier": "ClusterMixin", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class ClusterMixin:\n \"\"\"Mixin class for all cluster estimators in scikit-learn.\"\"\"\n\n _estimator_type = \"clusterer\"\n\n def fit_predict(self, X, y=None):\n \"\"\"\n Perform clustering on `X` and returns cluster labels.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,), dtype=np.int64\n Cluster labels.\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n self.fit(X)\n return self.labels_\n\n def _more_tags(self):\n return {\"preserves_dtype\": []}" }, { "identifier": "_fit_context", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "def _fit_context(*, prefer_skip_nested_validation):\n \"\"\"Decorator to run the fit methods of estimators within context managers.\n\n Parameters\n ----------\n prefer_skip_nested_validation : bool\n If True, the validation of parameters of inner estimators or functions\n called during fit will be skipped.\n\n This is useful to avoid validating many times the parameters passed by the\n user from the public facing API. It's also useful to avoid validating\n parameters that we pass internally to inner functions that are guaranteed to\n be valid by the test suite.\n\n It should be set to True for most estimators, except for those that receive\n non-validated objects as parameters, such as meta-estimators that are given\n estimator objects.\n\n Returns\n -------\n decorated_fit : method\n The decorated fit method.\n \"\"\"\n\n def decorator(fit_method):\n @functools.wraps(fit_method)\n def wrapper(estimator, *args, **kwargs):\n global_skip_validation = get_config()[\"skip_parameter_validation\"]\n\n # we don't want to validate again for each call to partial_fit\n partial_fit_and_fitted = (\n fit_method.__name__ == \"partial_fit\" and _is_fitted(estimator)\n )\n\n if not global_skip_validation and not partial_fit_and_fitted:\n estimator._validate_params()\n\n with config_context(\n skip_parameter_validation=(\n prefer_skip_nested_validation or global_skip_validation\n )\n ):\n return fit_method(estimator, *args, **kwargs)\n\n return wrapper\n\n return decorator" }, { "identifier": "_VALID_METRICS", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/metrics/pairwise.py", "snippet": "_VALID_METRICS = [\n \"euclidean\",\n \"l2\",\n \"l1\",\n \"manhattan\",\n \"cityblock\",\n \"braycurtis\",\n \"canberra\",\n \"chebyshev\",\n \"correlation\",\n \"cosine\",\n \"dice\",\n \"hamming\",\n \"jaccard\",\n \"mahalanobis\",\n \"matching\",\n \"minkowski\",\n \"rogerstanimoto\",\n \"russellrao\",\n \"seuclidean\",\n \"sokalmichener\",\n \"sokalsneath\",\n \"sqeuclidean\",\n \"yule\",\n \"wminkowski\",\n \"nan_euclidean\",\n \"haversine\",\n]" }, { "identifier": "NearestNeighbors", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py", "snippet": "class NearestNeighbors(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase):\n \"\"\"Unsupervised learner for implementing neighbor searches.\n\n Read more in the :ref:`User Guide <unsupervised_neighbors>`.\n\n .. versionadded:: 0.9\n\n Parameters\n ----------\n n_neighbors : int, default=5\n Number of neighbors to use by default for :meth:`kneighbors` queries.\n\n radius : float, default=1.0\n Range of parameter space to use by default for :meth:`radius_neighbors`\n queries.\n\n algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'\n Algorithm used to compute the nearest neighbors:\n\n - 'ball_tree' will use :class:`BallTree`\n - 'kd_tree' will use :class:`KDTree`\n - 'brute' will use a brute-force search.\n - 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\n Note: fitting on sparse input will override the setting of\n this parameter, using brute force.\n\n leaf_size : int, default=30\n Leaf size passed to BallTree or KDTree. This can affect the\n speed of the construction and query, as well as the memory\n required to store the tree. The optimal value depends on the\n nature of the problem.\n\n metric : str or callable, default='minkowski'\n Metric to use for distance computation. Default is \"minkowski\", which\n results in the standard Euclidean distance when p = 2. See the\n documentation of `scipy.spatial.distance\n <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and\n the metrics listed in\n :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric\n values.\n\n If metric is \"precomputed\", X is assumed to be a distance matrix and\n must be square during fit. X may be a :term:`sparse graph`, in which\n case only \"nonzero\" elements may be considered neighbors.\n\n If metric is a callable function, it takes two arrays representing 1D\n vectors as inputs and must return one value indicating the distance\n between those vectors. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string.\n\n p : float, default=2\n Parameter for the Minkowski metric from\n sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is\n equivalent to using manhattan_distance (l1), and euclidean_distance\n (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n metric_params : dict, default=None\n Additional keyword arguments for the metric function.\n\n n_jobs : int, default=None\n The number of parallel jobs to run for neighbors search.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n Attributes\n ----------\n effective_metric_ : str\n Metric used to compute distances to neighbors.\n\n effective_metric_params_ : dict\n Parameters for the metric used to compute distances to neighbors.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_samples_fit_ : int\n Number of samples in the fitted data.\n\n See Also\n --------\n KNeighborsClassifier : Classifier implementing the k-nearest neighbors\n vote.\n RadiusNeighborsClassifier : Classifier implementing a vote among neighbors\n within a given radius.\n KNeighborsRegressor : Regression based on k-nearest neighbors.\n RadiusNeighborsRegressor : Regression based on neighbors within a fixed\n radius.\n BallTree : Space partitioning data structure for organizing points in a\n multi-dimensional space, used for nearest neighbor search.\n\n Notes\n -----\n See :ref:`Nearest Neighbors <neighbors>` in the online documentation\n for a discussion of the choice of ``algorithm`` and ``leaf_size``.\n\n https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.neighbors import NearestNeighbors\n >>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]\n >>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4)\n >>> neigh.fit(samples)\n NearestNeighbors(...)\n >>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)\n array([[2, 0]]...)\n >>> nbrs = neigh.radius_neighbors(\n ... [[0, 0, 1.3]], 0.4, return_distance=False\n ... )\n >>> np.asarray(nbrs[0][0])\n array(2)\n \"\"\"\n\n def __init__(\n self,\n *,\n n_neighbors=5,\n radius=1.0,\n algorithm=\"auto\",\n leaf_size=30,\n metric=\"minkowski\",\n p=2,\n metric_params=None,\n n_jobs=None,\n ):\n super().__init__(\n n_neighbors=n_neighbors,\n radius=radius,\n algorithm=algorithm,\n leaf_size=leaf_size,\n metric=metric,\n p=p,\n metric_params=metric_params,\n n_jobs=n_jobs,\n )\n\n @_fit_context(\n # NearestNeighbors.metric is not validated yet\n prefer_skip_nested_validation=False\n )\n def fit(self, X, y=None):\n \"\"\"Fit the nearest neighbors estimator from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or \\\n (n_samples, n_samples) if metric='precomputed'\n Training data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : NearestNeighbors\n The fitted nearest neighbors estimator.\n \"\"\"\n return self._fit(X)" }, { "identifier": "Interval", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class Interval(_Constraint):\n \"\"\"Constraint representing a typed interval.\n\n Parameters\n ----------\n type : {numbers.Integral, numbers.Real, RealNotInt}\n The set of numbers in which to set the interval.\n\n If RealNotInt, only reals that don't have the integer type\n are allowed. For example 1.0 is allowed but 1 is not.\n\n left : float or int or None\n The left bound of the interval. None means left bound is -∞.\n\n right : float, int or None\n The right bound of the interval. None means right bound is +∞.\n\n closed : {\"left\", \"right\", \"both\", \"neither\"}\n Whether the interval is open or closed. Possible choices are:\n\n - `\"left\"`: the interval is closed on the left and open on the right.\n It is equivalent to the interval `[ left, right )`.\n - `\"right\"`: the interval is closed on the right and open on the left.\n It is equivalent to the interval `( left, right ]`.\n - `\"both\"`: the interval is closed.\n It is equivalent to the interval `[ left, right ]`.\n - `\"neither\"`: the interval is open.\n It is equivalent to the interval `( left, right )`.\n\n Notes\n -----\n Setting a bound to `None` and setting the interval closed is valid. For instance,\n strictly speaking, `Interval(Real, 0, None, closed=\"both\")` corresponds to\n `[0, +∞) U {+∞}`.\n \"\"\"\n\n def __init__(self, type, left, right, *, closed):\n super().__init__()\n self.type = type\n self.left = left\n self.right = right\n self.closed = closed\n\n self._check_params()\n\n def _check_params(self):\n if self.type not in (Integral, Real, RealNotInt):\n raise ValueError(\n \"type must be either numbers.Integral, numbers.Real or RealNotInt.\"\n f\" Got {self.type} instead.\"\n )\n\n if self.closed not in (\"left\", \"right\", \"both\", \"neither\"):\n raise ValueError(\n \"closed must be either 'left', 'right', 'both' or 'neither'. \"\n f\"Got {self.closed} instead.\"\n )\n\n if self.type is Integral:\n suffix = \"for an interval over the integers.\"\n if self.left is not None and not isinstance(self.left, Integral):\n raise TypeError(f\"Expecting left to be an int {suffix}\")\n if self.right is not None and not isinstance(self.right, Integral):\n raise TypeError(f\"Expecting right to be an int {suffix}\")\n if self.left is None and self.closed in (\"left\", \"both\"):\n raise ValueError(\n f\"left can't be None when closed == {self.closed} {suffix}\"\n )\n if self.right is None and self.closed in (\"right\", \"both\"):\n raise ValueError(\n f\"right can't be None when closed == {self.closed} {suffix}\"\n )\n else:\n if self.left is not None and not isinstance(self.left, Real):\n raise TypeError(\"Expecting left to be a real number.\")\n if self.right is not None and not isinstance(self.right, Real):\n raise TypeError(\"Expecting right to be a real number.\")\n\n if self.right is not None and self.left is not None and self.right <= self.left:\n raise ValueError(\n f\"right can't be less than left. Got left={self.left} and \"\n f\"right={self.right}\"\n )\n\n def __contains__(self, val):\n if np.isnan(val):\n return False\n\n left_cmp = operator.lt if self.closed in (\"left\", \"both\") else operator.le\n right_cmp = operator.gt if self.closed in (\"right\", \"both\") else operator.ge\n\n left = -np.inf if self.left is None else self.left\n right = np.inf if self.right is None else self.right\n\n if left_cmp(val, left):\n return False\n if right_cmp(val, right):\n return False\n return True\n\n def is_satisfied_by(self, val):\n if not isinstance(val, self.type):\n return False\n\n return val in self\n\n def __str__(self):\n type_str = \"an int\" if self.type is Integral else \"a float\"\n left_bracket = \"[\" if self.closed in (\"left\", \"both\") else \"(\"\n left_bound = \"-inf\" if self.left is None else self.left\n right_bound = \"inf\" if self.right is None else self.right\n right_bracket = \"]\" if self.closed in (\"right\", \"both\") else \")\"\n\n # better repr if the bounds were given as integers\n if not self.type == Integral and isinstance(self.left, Real):\n left_bound = float(left_bound)\n if not self.type == Integral and isinstance(self.right, Real):\n right_bound = float(right_bound)\n\n return (\n f\"{type_str} in the range \"\n f\"{left_bracket}{left_bound}, {right_bound}{right_bracket}\"\n )" }, { "identifier": "StrOptions", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class StrOptions(Options):\n \"\"\"Constraint representing a finite set of strings.\n\n Parameters\n ----------\n options : set of str\n The set of valid strings.\n\n deprecated : set of str or None, default=None\n A subset of the `options` to mark as deprecated in the string\n representation of the constraint.\n \"\"\"\n\n def __init__(self, options, *, deprecated=None):\n super().__init__(type=str, options=options, deprecated=deprecated)" }, { "identifier": "_check_sample_weight", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def _check_sample_weight(\n sample_weight, X, dtype=None, copy=False, only_non_negative=False\n):\n \"\"\"Validate sample weights.\n\n Note that passing sample_weight=None will output an array of ones.\n Therefore, in some cases, you may want to protect the call with:\n if sample_weight is not None:\n sample_weight = _check_sample_weight(...)\n\n Parameters\n ----------\n sample_weight : {ndarray, Number or None}, shape (n_samples,)\n Input sample weights.\n\n X : {ndarray, list, sparse matrix}\n Input data.\n\n only_non_negative : bool, default=False,\n Whether or not the weights are expected to be non-negative.\n\n .. versionadded:: 1.0\n\n dtype : dtype, default=None\n dtype of the validated `sample_weight`.\n If None, and the input `sample_weight` is an array, the dtype of the\n input is preserved; otherwise an array with the default numpy dtype\n is be allocated. If `dtype` is not one of `float32`, `float64`,\n `None`, the output will be of dtype `float64`.\n\n copy : bool, default=False\n If True, a copy of sample_weight will be created.\n\n Returns\n -------\n sample_weight : ndarray of shape (n_samples,)\n Validated sample weight. It is guaranteed to be \"C\" contiguous.\n \"\"\"\n n_samples = _num_samples(X)\n\n if dtype is not None and dtype not in [np.float32, np.float64]:\n dtype = np.float64\n\n if sample_weight is None:\n sample_weight = np.ones(n_samples, dtype=dtype)\n elif isinstance(sample_weight, numbers.Number):\n sample_weight = np.full(n_samples, sample_weight, dtype=dtype)\n else:\n if dtype is None:\n dtype = [np.float64, np.float32]\n sample_weight = check_array(\n sample_weight,\n accept_sparse=False,\n ensure_2d=False,\n dtype=dtype,\n order=\"C\",\n copy=copy,\n input_name=\"sample_weight\",\n )\n if sample_weight.ndim != 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n if sample_weight.shape != (n_samples,):\n raise ValueError(\n \"sample_weight.shape == {}, expected {}!\".format(\n sample_weight.shape, (n_samples,)\n )\n )\n\n if only_non_negative:\n check_non_negative(sample_weight, \"`sample_weight`\")\n\n return sample_weight" } ]
import warnings import numpy as np from numbers import Integral, Real from scipy import sparse from ..base import BaseEstimator, ClusterMixin, _fit_context from ..metrics.pairwise import _VALID_METRICS from ..neighbors import NearestNeighbors from ..utils._param_validation import Interval, StrOptions from ..utils.validation import _check_sample_weight from ._dbscan_inner import dbscan_inner
12,551
metric : str, or callable, default='euclidean' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a :term:`sparse graph`, in which case only "nonzero" elements may be considered neighbors for DBSCAN. .. versionadded:: 0.17 metric *precomputed* to accept precomputed sparse matrix. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=None The power of the Minkowski metric to be used to calculate distance between points. If None, then ``p=2`` (equivalent to the Euclidean distance). n_jobs : int, default=None The number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Attributes ---------- core_sample_indices_ : ndarray of shape (n_core_samples,) Indices of core samples. components_ : ndarray of shape (n_core_samples, n_features) Copy of each core sample found by training. labels_ : ndarray of shape (n_samples) Cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- OPTICS : A similar clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. Examples -------- >>> from sklearn.cluster import DBSCAN >>> import numpy as np >>> X = np.array([[1, 2], [2, 2], [2, 3], ... [8, 7], [8, 8], [25, 80]]) >>> clustering = DBSCAN(eps=3, min_samples=2).fit(X) >>> clustering.labels_ array([ 0, 0, 0, 1, 1, -1]) >>> clustering DBSCAN(eps=3, min_samples=2) """ _parameter_constraints: dict = { "eps": [Interval(Real, 0.0, None, closed="neither")], "min_samples": [Interval(Integral, 1, None, closed="left")], "metric": [
""" DBSCAN: Density-Based Spatial Clustering of Applications with Noise """ # Author: Robert Layton <[email protected]> # Joel Nothman <[email protected]> # Lars Buitinck # # License: BSD 3 clause def dbscan( X, eps=0.5, *, min_samples=5, metric="minkowski", metric_params=None, algorithm="auto", leaf_size=30, p=2, sample_weight=None, n_jobs=None, ): """Perform DBSCAN clustering from vector array or distance matrix. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. eps : float, default=0.5 The maximum distance between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. min_samples : int, default=5 The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : str or callable, default='minkowski' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square during fit. X may be a :term:`sparse graph <sparse graph>`, in which case only "nonzero" elements may be considered neighbors. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=2 The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array-like of shape (n_samples,), default=None Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. If precomputed distance are used, parallel execution is not available and thus n_jobs will have no effect. Returns ------- core_samples : ndarray of shape (n_core_samples,) Indices of core samples. labels : ndarray of shape (n_samples,) Cluster labels for each point. Noisy samples are given the label -1. See Also -------- DBSCAN : An estimator interface for this clustering algorithm. OPTICS : A similar estimator interface clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. """ est = DBSCAN( eps=eps, min_samples=min_samples, metric=metric, metric_params=metric_params, algorithm=algorithm, leaf_size=leaf_size, p=p, n_jobs=n_jobs, ) est.fit(X, sample_weight=sample_weight) return est.core_sample_indices_, est.labels_ class DBSCAN(ClusterMixin, BaseEstimator): """Perform DBSCAN clustering from vector array or distance matrix. DBSCAN - Density-Based Spatial Clustering of Applications with Noise. Finds core samples of high density and expands clusters from them. Good for data which contains clusters of similar density. The worst case memory complexity of DBSCAN is :math:`O({n}^2)`, which can occur when the `eps` param is large and `min_samples` is low. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- eps : float, default=0.5 The maximum distance between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. min_samples : int, default=5 The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. If `min_samples` is set to a higher value, DBSCAN will find denser clusters, whereas if it is set to a lower value, the found clusters will be more sparse. metric : str, or callable, default='euclidean' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a :term:`sparse graph`, in which case only "nonzero" elements may be considered neighbors for DBSCAN. .. versionadded:: 0.17 metric *precomputed* to accept precomputed sparse matrix. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=None The power of the Minkowski metric to be used to calculate distance between points. If None, then ``p=2`` (equivalent to the Euclidean distance). n_jobs : int, default=None The number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Attributes ---------- core_sample_indices_ : ndarray of shape (n_core_samples,) Indices of core samples. components_ : ndarray of shape (n_core_samples, n_features) Copy of each core sample found by training. labels_ : ndarray of shape (n_samples) Cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- OPTICS : A similar clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. Examples -------- >>> from sklearn.cluster import DBSCAN >>> import numpy as np >>> X = np.array([[1, 2], [2, 2], [2, 3], ... [8, 7], [8, 8], [25, 80]]) >>> clustering = DBSCAN(eps=3, min_samples=2).fit(X) >>> clustering.labels_ array([ 0, 0, 0, 1, 1, -1]) >>> clustering DBSCAN(eps=3, min_samples=2) """ _parameter_constraints: dict = { "eps": [Interval(Real, 0.0, None, closed="neither")], "min_samples": [Interval(Integral, 1, None, closed="left")], "metric": [
StrOptions(set(_VALID_METRICS) | {"precomputed"}),
3
2023-10-07 13:19:48+00:00
16k
hellloxiaotian/KDNet
test_ccpd.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n for w in weights if isinstance(weights, list) else [weights]:\n # attempt_download(w) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n ckpt = torch.load(w, map_location=map_location) # load\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n \n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is nn.Upsample:\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n \n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble" }, { "identifier": "create_dataloader", "path": "utils/datasets.py", "snippet": "def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,\n rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):\n # Make sure only the first process in DDP process the dataset first, and the following others can use the cache\n with torch_distributed_zero_first(rank):\n dataset = LoadImagesAndLabels(path, imgsz, batch_size,\n augment=augment, # augment images\n hyp=hyp, # augmentation hyperparameters\n rect=rect, # rectangular training\n cache_images=cache,\n single_cls=opt.single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None\n loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader\n # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()\n dataloader = loader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)\n return dataloader, dataset" }, { "identifier": "coco80_to_coco91_class", "path": "utils/general.py", "snippet": "def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n return x" }, { "identifier": "check_dataset", "path": "utils/general.py", "snippet": "def check_dataset(dict):\n # Download dataset if not found locally\n val, s = dict.get('val'), dict.get('download')\n if val and len(val):\n val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path\n if not all(x.exists() for x in val):\n print('\\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])\n if s and len(s): # download script\n print('Downloading %s ...' % s)\n if s.startswith('http') and s.endswith('.zip'): # URL\n f = Path(s).name # filename\n torch.hub.download_url_to_file(s, f)\n r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip\n else: # bash script\n r = os.system(s)\n print('Dataset autodownload %s\\n' % ('success' if r == 0 else 'failure')) # analyze return value\n else:\n raise Exception('Dataset not found.')" }, { "identifier": "check_file", "path": "utils/general.py", "snippet": "def check_file(file):\n # Search for file if not found\n if Path(file).is_file() or file == '':\n return file\n else:\n files = glob.glob('./**/' + file, recursive=True) # find file\n assert len(files), f'File Not Found: {file}' # assert file was found\n assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\" # assert unique\n return files[0] # return file" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(img_size, s=32):\n # Verify img_size is a multiple of stride s\n new_size = make_divisible(img_size, int(s)) # ceil gs-multiple\n if new_size != img_size:\n print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))\n return new_size" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "def check_requirements(requirements='requirements.txt', exclude=()):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n import pkg_resources as pkg\n prefix = colorstr('red', 'bold', 'requirements:')\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n if not file.exists():\n print(f\"{prefix} {file.resolve()} not found, check failed.\")\n return\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n n += 1\n print(f\"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...\")\n print(subprocess.check_output(f\"pip install '{e.req}'\", shell=True).decode())\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s)) # emoji-safe" }, { "identifier": "box_iou", "path": "utils/general.py", "snippet": "def box_iou(box1, box2):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n def box_area(box):\n # box = 4xn\n return (box[2] - box[0]) * (box[3] - box[1])\n\n area1 = box_area(box1.T)\n area2 = box_area(box2.T)\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=()):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n if nc == 1:\n x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5,\n # so there is no need to multiplicate.\n else:\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "xywh2xyxy", "path": "utils/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "set_logging", "path": "utils/general.py", "snippet": "def set_logging(rank=-1):\n logging.basicConfig(\n format=\"%(message)s\",\n level=logging.INFO if rank in [-1, 0] else logging.WARN)" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=True, sep=''):\n # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.\n path = Path(path) # os-agnostic\n if (path.exists() and exist_ok) or (not path.exists()):\n return str(path)\n else:\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n return f\"{path}{sep}{n}\" # update path" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "ap_per_class", "path": "utils/metrics.py", "snippet": "def ap_per_class(tp, conf, pred_cls, target_cls, v5_metric=False, plot=False, save_dir='.', names=()):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (nparray, nx1 or nx10).\n conf: Objectness value from 0-1 (nparray).\n pred_cls: Predicted object classes (nparray).\n target_cls: True object classes (nparray).\n plot: Plot precision-recall curve at [email protected]\n save_dir: Plot save directory\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n nc = unique_classes.shape[0] # number of classes, number of detections\n\n # Create Precision-Recall curve and compute AP for each class\n px, py = np.linspace(0, 1, 1000), [] # for plotting\n ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))\n for ci, c in enumerate(unique_classes):\n i = pred_cls == c\n n_l = (target_cls == c).sum() # number of labels\n n_p = i.sum() # number of predictions\n\n if n_p == 0 or n_l == 0:\n continue\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum(0)\n tpc = tp[i].cumsum(0)\n\n # Recall\n recall = tpc / (n_l + 1e-16) # recall curve\n r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases\n\n # Precision\n precision = tpc / (tpc + fpc) # precision curve\n p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score\n\n # AP from recall-precision curve\n for j in range(tp.shape[1]):\n ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j], v5_metric=v5_metric)\n if plot and j == 0:\n py.append(np.interp(px, mrec, mpre)) # precision at [email protected]\n\n # Compute F1 (harmonic mean of precision and recall)\n f1 = 2 * p * r / (p + r + 1e-16)\n if plot:\n plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)\n plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')\n plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')\n plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')\n\n i = f1.mean(0).argmax() # max F1 index\n return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = general.box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(np.int16)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[gc, detection_classes[m1[j]]] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # background FP\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # background FN\n\n def matrix(self):\n return self.matrix\n\n def plot(self, save_dir='', names=()):\n try:\n import seaborn as sn\n\n array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig = plt.figure(figsize=(12, 9), tight_layout=True)\n sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size\n labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels\n sn.heatmap(array, annot=self.nc < 30, annot_kws={\"size\": 8}, cmap='Blues', fmt='.2f', square=True,\n xticklabels=names + ['background FP'] if labels else \"auto\",\n yticklabels=names + ['background FN'] if labels else \"auto\").set_facecolor((1, 1, 1))\n fig.axes[0].set_xlabel('True')\n fig.axes[0].set_ylabel('Predicted')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n except Exception as e:\n pass\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "plot_images", "path": "utils/plots.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n colors = color_list() # list of colors\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors[cls % len(colors)]\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]\n targets = []\n for i, o in enumerate(output):\n for *box, conf, cls in o.cpu().numpy():\n targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])\n return np.array(targets)" }, { "identifier": "plot_study_txt", "path": "utils/plots.py", "snippet": "def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()\n # Plot study.txt generated by test.py\n fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)\n # ax = ax.ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]:\n for f in sorted(Path(path).glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']\n # for i in range(7):\n # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n # ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(30, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n plt.savefig(str(Path(path).name) + '.png', dpi=300)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n cpu = device.lower() == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n n = torch.cuda.device_count()\n if n > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * len(s)\n for i, d in enumerate(device.split(',') if device else range(n)):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_synchronized", "path": "utils/torch_utils.py", "snippet": "def time_synchronized():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "TracedModel", "path": "utils/torch_utils.py", "snippet": "class TracedModel(nn.Module):\n\n def __init__(self, model=None, device=None, img_size=(640,640)): \n super(TracedModel, self).__init__()\n \n print(\" Convert model to Traced-model... \") \n self.stride = model.stride\n self.names = model.names\n self.model = model\n\n self.model = revert_sync_batchnorm(self.model)\n self.model.to('cpu')\n self.model.eval()\n\n self.detect_layer = self.model.model[-1]\n self.model.traced = True\n \n rand_example = torch.rand(1, 3, img_size, img_size)\n \n traced_script_module = torch.jit.trace(self.model, rand_example, strict=False)\n #traced_script_module = torch.jit.script(self.model)\n traced_script_module.save(\"traced_model.pt\")\n print(\" traced_script_module saved! \")\n self.model = traced_script_module\n self.model.to(device)\n self.detect_layer.to(device)\n print(\" model is traced! \\n\") \n\n def forward(self, x, augment=False, profile=False):\n out = self.model(x)\n out = self.detect_layer(out)\n return out" } ]
import argparse import json import os import numpy as np import torch import yaml from pathlib import Path from threading import Thread from tqdm import tqdm from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_synchronized, TracedModel from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
10,846
augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, wandb_logger=None, compute_loss=None, half_precision=True, trace=False, is_coco=False, v5_metric=False): # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check img_size if trace: model = TracedModel(model, device, imgsz) # Half half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # Configure model.eval() if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Logging log_imgs = 0 if wandb_logger and wandb_logger.wandb: log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] if v5_metric: print("Testing with YOLOv5 AP metric...") seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist():
def test(data, weights=None, batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, wandb_logger=None, compute_loss=None, half_precision=True, trace=False, is_coco=False, v5_metric=False): # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check img_size if trace: model = TracedModel(model, device, imgsz) # Half half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # Configure model.eval() if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Logging log_imgs = 0 if wandb_logger and wandb_logger.wandb: log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] if v5_metric: print("Testing with YOLOv5 AP metric...") seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
10
2023-10-08 13:05:58+00:00
16k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n \"\"\"\n Initializes the StreaksDB class and creates the 'streaks' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_streaks_table()\n\n def _create_streaks_table(self):\n \"\"\"\n Creates the 'streaks' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS streaks (\n discord_id BIGINT PRIMARY KEY,\n current_streak INT DEFAULT 0,\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :param new_streak: The new streak count.\n \"\"\"\n query = \"\"\"\n INSERT INTO streaks (discord_id, current_streak)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE current_streak = %s\n \"\"\"\n params = (discord_id, new_streak, new_streak)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :return: The current streak count.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n query = \"SELECT current_streak FROM streaks WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()" }, { "identifier": "TeamMemberDB", "path": "team_members/team_member_db.py", "snippet": "class TeamMemberDB(BaseDB):\n \"\"\"\n TeamMemberDB class handles operations related to the 'team_members' table.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the TeamMemberDB class and creates the 'team_members' table if it doesn't exist.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_team_members_table()\n\n def _create_team_members_table(self):\n \"\"\"\n Creates the 'team_members' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS team_members (\n discord_id BIGINT PRIMARY KEY,\n name VARCHAR(255) NOT NULL,\n time_zone VARCHAR(50) NOT NULL,\n github_username VARCHAR(255),\n on_vacation BOOLEAN DEFAULT FALSE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_new_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Inserts a new team member into the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param name: The name of the team member.\n :param time_zone: The time zone of the team member.\n :param github_username: The GitHub username of the team member.\n \"\"\"\n query = \"\"\"\n INSERT INTO team_members (discord_id, name, time_zone, github_username)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE name = %s, time_zone = %s, github_username = %s\n \"\"\"\n params = (discord_id, name, time_zone, github_username, name, time_zone, github_username)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Removes a team member from the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member to remove.\n \"\"\"\n query = \"DELETE FROM team_members WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def list_all_members(self) -> List[Tuple[int, str, str, str, bool]]:\n \"\"\"\n Fetches all team members from the 'team_members' table.\n\n :return: A list of tuples, each containing the Discord ID, name, time zone, GitHub username, and vacation status of a team member.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n try:\n c.execute(\"SELECT discord_id, name, time_zone, github_username, on_vacation FROM team_members\")\n return c.fetchall()\n finally:\n c.close()\n self.close()\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Updates the timezone of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param new_time_zone: The new timezone to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET time_zone = %s WHERE discord_id = %s\"\n params = (new_time_zone, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def set_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET on_vacation = %s WHERE discord_id = %s\"\n params = (on_vacation, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "UpdatesDB", "path": "updates/updates_db.py", "snippet": "class UpdatesDB(BaseDB):\n \"\"\"\n Database class for handling operations related to the 'updates' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the UpdatesDB class and creates the 'updates' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_updates_table()\n\n def _create_updates_table(self):\n \"\"\"\n Creates the 'updates' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS updates (\n id INT AUTO_INCREMENT PRIMARY KEY,\n discord_id BIGINT,\n status TEXT NOT NULL,\n summarized_status TEXT,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n time_zone VARCHAR(255),\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n )\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update into the 'updates' table.\n\n :param discord_id: The Discord ID of the team member.\n :param status: The status update.\n :param time_zone: The time zone of the user.\n \"\"\"\n # Convert current UTC time to user's local time zone\n utc_now = datetime.utcnow().replace(tzinfo=pytz.utc)\n local_now = utc_now.astimezone(pytz.timezone(time_zone))\n\n query = \"INSERT INTO updates (discord_id, status, timestamp, time_zone) VALUES (%s, %s, %s, %s)\"\n params = (discord_id, status, local_now, time_zone)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized_status for the most recent update for a given user.\n\n :param discord_id: The Discord ID of the team member.\n :param summarized_status: The summarized status update.\n \"\"\"\n query = \"\"\"\n UPDATE updates\n SET summarized_status = %s\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (summarized_status, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n \n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n :param discord_id: The Discord ID of the user.\n :param time_zone: The time zone of the user.\n :return: The count of check-ins in the current week.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Adjusting the current time to the user's time zone\n local_tz = pytz.timezone(time_zone)\n local_now = datetime.now(local_tz)\n \n # Getting the Monday of the current week in the user's time zone\n monday = local_now - timedelta(days=local_now.weekday())\n monday = monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n query = \"\"\"\n SELECT COUNT(*) FROM updates\n WHERE discord_id = %s AND timestamp >= %s\n \"\"\"\n params = (discord_id, monday)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()\n\n def get_statuses_in_date_range(self, discord_id: int, start_date: datetime, end_date: datetime) -> List[str]:\n \"\"\"\n Fetches all raw status updates for a given user within a specified date range.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n A list of raw status updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT summarized_status FROM updates\n WHERE discord_id = %s AND timestamp >= %s AND timestamp <= %s\n \"\"\"\n params = (discord_id, start_date, end_date)\n try:\n c.execute(query, params)\n \n statuses = [row[0] for row in c.fetchall()]\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor(dictionary=True) # Set dictionary=True to return results as dictionaries\n \n query = \"\"\"\n SELECT id, discord_id, status, summarized_status, timestamp \n FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n statuses = c.fetchall()\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT timestamp, time_zone FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return (row[0], row[1]) if row else (None, None)\n finally:\n c.close()\n self.close()\n \n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Fetch the ID of the newest status update for the given user\n query_get_id = \"\"\"\n SELECT id FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n try:\n c.execute(query_get_id, (discord_id,))\n \n row = c.fetchone()\n if row:\n status_id = row[0]\n \n # Now, delete the status update using its ID\n query_delete = \"\"\"\n DELETE FROM updates WHERE id = %s\n \"\"\"\n c.execute(query_delete, (status_id,))\n \n self.conn.commit()\n finally:\n c.close()\n self.close()" }, { "identifier": "WeeklyPostsDB", "path": "weekly_posts/weekly_posts_db.py", "snippet": "class WeeklyPostsDB(BaseDB):\n \"\"\"\n Database class that handles operations related to the 'weekly_posts' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the WeeklyPostsDB class, connects to the MySQL database,\n and creates the 'weekly_posts' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_weekly_posts_table()\n\n def _create_weekly_posts_table(self):\n \"\"\"\n Creates the 'weekly_posts' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS weekly_posts (\n post_id BIGINT PRIMARY KEY,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def get_weekly_post_data(self) -> Optional[Dict[str, datetime.datetime]]:\n \"\"\"\n Fetches the most recent weekly post data from the 'weekly_posts' table.\n\n :return: A dictionary containing the post ID and timestamp, or None if no data exists.\n \"\"\"\n query = \"SELECT post_id, timestamp FROM weekly_posts ORDER BY timestamp DESC LIMIT 1\"\n \n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n try:\n c.execute(query)\n row = c.fetchone()\n\n if row:\n return {'post_id': row[0], 'timestamp': row[1]}\n return None\n finally:\n c.close()\n self.close()\n\n def save_weekly_post_data(self, post_id: int, timestamp: datetime.datetime):\n \"\"\"\n Inserts or updates the weekly post data in the 'weekly_posts' table.\n\n :param post_id: The ID of the weekly post.\n :param timestamp: The timestamp of the weekly post.\n \"\"\"\n query = \"\"\"\n INSERT INTO weekly_posts (post_id, timestamp)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE timestamp = %s\n \"\"\"\n params = (post_id, timestamp, timestamp)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "StreaksManager", "path": "streaks/streaks_manager.py", "snippet": "class StreaksManager:\n \"\"\"\n Manages the streaks for team members.\n \"\"\"\n \n def __init__(self, streaks_db: StreaksDB):\n \"\"\"\n Initializes a new StreaksManager instance.\n\n Args:\n streaks_db: The StreaksDB object that handles database operations.\n \"\"\"\n self.streaks_db = streaks_db\n \n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n The current streak count.\n \"\"\"\n return self.streaks_db.get_streak(discord_id)\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n new_streak: The new streak count.\n \"\"\"\n self.streaks_db.update_streak(discord_id, new_streak)\n \n def reset_streak(self, discord_id: int):\n \"\"\"\n Resets the streak for a given user to zero.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.streaks_db.update_streak(discord_id, 0)" }, { "identifier": "TeamMemberManager", "path": "team_members/team_member_manager.py", "snippet": "class TeamMemberManager:\n \"\"\"\n Manages operations related to team members.\n \"\"\"\n\n def __init__(self, db: TeamMemberDB):\n \"\"\"\n Initialize a TeamMemberManager object.\n\n :param db: TeamMemberDB object for interacting with the database.\n \"\"\"\n self.db = db\n self.team_members = self.load_team_members()\n\n def load_team_members(self) -> List[TeamMember]:\n \"\"\"\n Load team members from the MySQL database into a list of TeamMember objects.\n\n :return: List of TeamMember objects.\n \"\"\"\n team_members = []\n members_data = self.db.list_all_members()\n\n for member_data in members_data:\n member = TeamMember(\n discord_id=member_data[0],\n time_zone=member_data[2],\n name=member_data[1],\n github_username=member_data[3],\n on_vacation=member_data[4]\n )\n team_members.append(member)\n\n return team_members\n\n def find_member(self, discord_id: int) -> TeamMember:\n \"\"\"\n Find and return a team member by their Discord ID.\n\n :param discord_id: The Discord ID of the team member.\n :return: A TeamMember object if found, otherwise None.\n \"\"\"\n for member in self.team_members:\n if member.discord_id == discord_id:\n return member\n return None\n\n def add_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Add a new team member to the list and the database.\n\n :param discord_id: The Discord ID of the new member.\n :param name: The name of the new member.\n :param time_zone: The time zone of the new member.\n :param github_username: The GitHub username of the new member.\n \"\"\"\n new_member = TeamMember(discord_id, time_zone, name, github_username)\n self.db.insert_new_member(discord_id, name, time_zone, github_username)\n self.team_members.append(new_member)\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Remove a team member from the list and the database.\n\n :param discord_id: The Discord ID of the member to remove.\n \"\"\"\n self.db.remove_member(discord_id)\n self.team_members = [member for member in self.team_members if member.discord_id != discord_id]\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Update the timezone of a team member in the database and the list.\n\n :param discord_id: The Discord ID of the member to update.\n :param new_time_zone: The new timezone string to set for the member.\n \"\"\"\n # Update the timezone in the database\n self.db.update_member_timezone(discord_id, new_time_zone)\n\n # Find the member in the team_members list and update their timezone\n member = self.find_member(discord_id)\n if member:\n member.time_zone = new_time_zone\n\n def set_member_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n # Update the vacation status in the database\n self.db.set_vacation_status(discord_id, on_vacation)\n\n # Find the member in the team_members list and update their vacation status\n member = self.find_member(discord_id)\n if member:\n member.on_vacation = on_vacation" }, { "identifier": "UpdatesManager", "path": "updates/updates_manager.py", "snippet": "class UpdatesManager:\n \"\"\"\n Manages status updates for team members.\n \"\"\"\n\n def __init__(self, updates_db: UpdatesDB):\n \"\"\"\n Initializes a new UpdatesManager instance.\n\n Args:\n updates_db: The UpdatesDB object that handles database operations.\n \"\"\"\n self.updates_db = updates_db\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update.\n\n Args:\n discord_id: The Discord ID of the team member.\n status: The status update.\n \"\"\"\n self.updates_db.insert_status(discord_id, status, time_zone)\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized status for the most recent update for a given user.\n\n Args:\n discord_id: The Discord ID of the team member.\n summarized_status: The summarized status update.\n \"\"\"\n self.updates_db.update_summarized_status(discord_id, summarized_status)\n\n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n Args:\n discord_id: The Discord ID of the user.\n time_zone: The time zone of the user.\n\n Returns:\n The count of check-ins in the current week.\n \"\"\"\n return self.updates_db.get_weekly_checkins_count(discord_id, time_zone)\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n return self.updates_db.get_all_statuses_for_user(discord_id)\n\n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n return self.updates_db.get_last_update_timestamp(discord_id)\n\n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.updates_db.delete_newest_status(discord_id)\n\n async def generate_daily_summary(self, user_message: str) -> str:\n \"\"\"\n Generates a daily summary of the user's message using a large language model.\n\n Args:\n user_message: The user's message that needs to be summarized.\n\n Returns:\n The summarized message.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Please summarize the user's update into two sections: 'Did' for tasks completed yesterday and 'Do' for tasks planned for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n \n except Exception as e:\n print(f\"An error occurred while generating the summary: {e}\")\n return \"Error in generating summary\"\n\n async def generate_weekly_summary(self, discord_id: int, start_date: datetime, end_date: datetime) -> str:\n \"\"\"\n Generates a weekly summary of the user's status updates using a large language model.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n The summarized weekly status update.\n \"\"\"\n # Fetch all raw status updates for the specified date range using the new method in UpdatesDB\n weekly_statuses = self.updates_db.get_statuses_in_date_range(discord_id, start_date, end_date)\n\n if not weekly_statuses:\n return \"There are no status updates for this week.\"\n \n # Combine all raw statuses into a single string\n combined_statuses = \"\\n\".join(weekly_statuses)\n \n # Prepare a system message to guide OpenAI's model for weekly summary\n system_message = \"Please generate a comprehensive weekly summary based on the provided daily status updates, including only tasks that have been accomplished. Ignore tasks that are not in the 'Did' section.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_statuses}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-4-0613\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n weekly_summary = response['choices'][0]['message']['content'].strip()\n\n return weekly_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the weekly summary: {e}\")\n return \"Error in generating weekly summary\"\n \n async def summarize_technical_updates(self, commit_messages: List[str]) -> str:\n \"\"\"\n Summarizes the technical updates based on commit messages.\n\n Args:\n commit_messages: List of commit messages for the day.\n\n Returns:\n A summarized version of the technical updates.\n \"\"\"\n\n # Combine commit messages into a single string for the LLM\n combined_commits = \"\\n\".join(commit_messages)\n\n # If there are no commit messages, return a default message\n if not combined_commits:\n return \"No technical updates found based on commit messages.\"\n\n # Summarization using LLM\n system_message = \"Please provide a concise summary of the technical updates based on the provided commit messages.\"\n\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_commits}\n ]\n\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the technical summary: {e}\")\n return \"Error in generating technical summary.\"\n\n async def summarize_feedback_and_revisions(self, original_report: str, feedback: str) -> str:\n \"\"\"\n Takes the original report and user feedback and generates a revised summary.\n\n Args:\n original_report: The original summarized report.\n feedback: The user's feedback or suggested edits.\n\n Returns:\n The revised summary.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Revise the original report based on the user's feedback.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": f\"Original Report: {original_report}\"},\n {\"role\": \"user\", \"content\": f\"Feedback: {feedback}\"}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n revised_summary = response['choices'][0]['message']['content'].strip()\n\n return revised_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the revised summary: {e}\")\n return \"Error in generating revised summary\"\n\n async def summarize_non_technical_updates(self, update: str) -> str:\n \"\"\"\n Summarizes a non-technical update using a large language model.\n\n Args:\n update: The raw non-technical update provided by the user.\n\n Returns:\n The summarized non-technical update.\n \"\"\"\n\n # System message to guide the LLM for a concise summary\n system_message = \"Please provide a concise summary of the non-technical update shared by the user.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": update}\n ]\n\n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the non-technical summary: {e}\")\n return \"Error in generating summary\"\n\n async def summarize_goals_for_the_day(self, goals: str) -> str:\n \"\"\"\n Summarizes the user's goals for the day using a large language model.\n\n Args:\n goals: The user's raw input on their goals for the day.\n\n Returns:\n The summarized goals for the day.\n \"\"\"\n # Initiate the conversation with the model\n system_message = \"Please provide a concise summary of the user's goals for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": goals}\n ]\n \n # Specify the model engine you want to use (this is an example and can be adjusted based on your needs)\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Provide user's input and retrieve model's response\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_goals = response['choices'][0]['message']['content'].strip()\n\n # Return the summary\n return summarized_goals\n \n except Exception as e:\n print(f\"An error occurred while generating the goals summary: {e}\")\n return \"Error in generating goals summary\"\n \n async def evaluate_performance(self, user_message: str) -> str:\n \"\"\"\n Evaluates the performance of the user based on their update.\n\n Args:\n user_message: The user's message that needs to be evaluated.\n\n Returns:\n The evaluation of the user's performance.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"\"\"\n You are a project manager at a fast-paced tech startup, recognized for providing clear and actionable feedback during stand-up meetings. Your role is to evaluate the quality of team members' daily stand-up reports, with a focus on clear communication, comprehensive planning, and problem-solving abilities.\n It is essential to note that team members should neither be penalized nor rewarded for merely mentioning issues; instead, the emphasis should be on the clarity of the report and the quality of strategies proposed to address these issues.\n Your feedback is candid and aimed at encouraging high-quality reporting and effective planning within the startup environment.\n Please provide a two-sentence summary of the stand-up and assign a grade (A, B, C, D, or F) based on the following criteria:\n\n - A: Excellent - The report is exceptionally clear and detailed, with well-defined tasks and a thorough approach to tackling issues, exemplifying the proactive and problem-solving ethos of our startup.\n - B: Good - The report is clear and adequately detailed, outlining tasks and addressing issues with a reasonable approach, indicating a commitment to momentum and resolution.\n - C: Fair - The report is understandable but lacks detail in some areas, with a basic approach to resolving issues, suggesting a need for further strategy development.\n - D: Poor - The report is vague or missing details, with a limited or unclear approach to issues, necessitating better communication and planning skills.\n - F: Fail - The report is missing, overly vague, or lacks a coherent structure, with no apparent approach to issues, reflecting a need for significant improvement in reporting and strategizing.\n\n A comprehensive stand-up report effectively communicates what was done and what is planned, clearly identifies any issues, and connects daily tasks with broader business objectives.\n\n Provide clear and constructive feedback, aiming to foster a culture of excellence and continuous improvement in how we plan and communicate our daily activities.\n \"\"\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n performance_evaluation = response['choices'][0]['message']['content'].strip()\n\n return performance_evaluation\n \n except Exception as e:\n print(f\"An error occurred while evaluating the performance: {e}\")\n return \"Error in evaluating performance\"" }, { "identifier": "WeeklyPostManager", "path": "weekly_posts/weekly_post_manager.py", "snippet": "class WeeklyPostManager:\n \"\"\"Manages the status post in a Discord channel.\"\"\"\n \n def __init__(self, channel, weekly_posts_db: WeeklyPostsDB):\n \"\"\"\n Initializes a new WeeklyPostManager instance.\n \"\"\"\n self.channel = channel\n self.weekly_posts_db = weekly_posts_db\n self.editable_weekly_post = None\n self.load_weekly_post_data()\n\n def load_weekly_post_data(self):\n \"\"\"\n Load the weekly post data from the database.\n \n This method queries the 'weekly_posts' table to get the ID and timestamp of \n the last weekly post. If no data exists, it sets the ID and timestamp to None.\n \"\"\"\n data = self.weekly_posts_db.get_weekly_post_data()\n self.editable_weekly_post_id = data.get('post_id', None)\n self.weekly_post_timestamp = data.get('timestamp', None)\n\n def save_weekly_post_data(self):\n \"\"\"\n Save the weekly post data to the database.\n \n This method inserts or updates the ID and timestamp of the current weekly post \n in the 'weekly_posts' table.\n \"\"\"\n self.weekly_posts_db.save_weekly_post_data(self.editable_weekly_post.id, datetime.now())\n\n async def initialize_post(self, team_members: List[TeamMember]):\n \"\"\"\n Initializes or retrieves the weekly status post on Discord.\n\n This function checks if a valid weekly post already exists for the current week.\n If it does, it retrieves that post. Otherwise, it sends a new message in the Discord\n channel with the list of team members and their statuses.\n\n Args:\n team_members: A list of TeamMember objects to be displayed in the post.\n \"\"\"\n current_week_number = datetime.now().isocalendar()[1]\n saved_week_number = self.weekly_post_timestamp.isocalendar()[1] if self.weekly_post_timestamp else None\n\n # Skip initialization if the post already exists and is for the current week\n if self.editable_weekly_post_id and current_week_number == saved_week_number:\n self.editable_weekly_post = await self.channel.fetch_message(self.editable_weekly_post_id)\n return\n\n utc_now = pytz.utc.localize(datetime.utcnow())\n today_weekday = utc_now.weekday()\n last_monday = utc_now - timedelta(days=today_weekday)\n next_sunday = last_monday + timedelta(days=6)\n\n start_date = self.format_date(last_monday)\n end_date = self.format_date(next_sunday)\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {m.current_streak}🔥\" if m.current_streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {'❓' * 5} {streak_str}`\"\n member_list.append(new_line)\n\n member_list_str = '\\n'.join(member_list)\n\n await self.channel.send(f\"# Weekly Status Updates\")\n await self.channel.send(f\"## {start_date} to {end_date}\")\n if member_list_str:\n self.editable_weekly_post = await self.channel.send(f\"{member_list_str}\")\n self.save_weekly_post_data() # Save the ID and timestamp after creating the post\n\n async def rebuild_post(self, team_members: List[TeamMember]):\n \"\"\"\n Rebuilds the entire weekly status post from the team members' data.\n\n Args:\n team_members: A list of TeamMember objects with updated statuses and streaks.\n \"\"\"\n # If there are no team members, delete the post and return\n if not team_members:\n if self.editable_weekly_post:\n await self.editable_weekly_post.delete()\n self.editable_weekly_post = None\n return\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Get the streak and number of weekly check-ins for the member\n streak = m.current_streak\n check_ins = m.weekly_checkins\n\n # Generate the marks based on the number of check-ins\n marks = \"✅\" * check_ins + \"❓\" * (5 - check_ins)\n\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {streak}🔥\" if streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {marks} {streak_str}`\"\n member_list.append(new_line)\n\n new_content = '\\n'.join(member_list)\n\n # Update the existing post or create a new one if it doesn't exist\n if self.editable_weekly_post:\n self.editable_weekly_post = await self.editable_weekly_post.edit(content=new_content)\n else:\n self.editable_weekly_post = await self.channel.send(new_content)\n\n # Save the ID and timestamp of the post\n self.save_weekly_post_data()\n\n def format_date(self, dt: datetime) -> str:\n \"\"\"\n Formats a datetime object into a human-readable string.\n\n Args:\n dt: The datetime object to format.\n\n Returns:\n A human-readable date string.\n \"\"\"\n suffix = ['th', 'st', 'nd', 'rd']\n day = int(dt.strftime('%d'))\n if 4 <= day <= 20 or 24 <= day <= 30:\n suffix_index = 0 # use 'th'\n else:\n suffix_index = day % 10 # use 'st', 'nd', 'rd' as appropriate\n\n return dt.strftime(f\"%B {day}{suffix[suffix_index]}\")" }, { "identifier": "Scheduler", "path": "scheduler.py", "snippet": "class Scheduler:\n \"\"\"Scheduler class to manage timed jobs for sending status requests.\n\n Attributes:\n scheduler: The APScheduler object.\n job_ids: A dictionary to store lists of job IDs for each member.\n \"\"\"\n \n def __init__(self) -> None:\n \"\"\"Initialize the Scheduler object and start the APScheduler.\"\"\"\n self.scheduler: AsyncIOScheduler = AsyncIOScheduler()\n self.job_ids: Dict[int, List[str]] = {} # Store job IDs indexed by member's Discord ID\n self.weekly_post_job_id = None # To store the ID of the scheduled weekly post job\n self.scheduler.start()\n\n def add_job(self, func: callable, member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager) -> None:\n \"\"\"Add a new job to the scheduler for a specific team member.\n \n Args:\n func: The function to call when the job is run.\n member: The TeamMember object for whom the job is added.\n \"\"\"\n time_zone = pytz.timezone(member.time_zone)\n \n weekday_trigger = CronTrigger(day_of_week='mon,tue,wed,thu,fri', hour=10, timezone=time_zone)\n weekend_trigger = CronTrigger(day_of_week='sat,sun', hour=11, timezone=time_zone)\n\n weekday_job = self.scheduler.add_job(func, weekday_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n weekend_job = self.scheduler.add_job(func, weekend_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n\n self.job_ids.setdefault(member.discord_id, []).extend([weekday_job.id, weekend_job.id])\n\n def remove_job(self, discord_id: int) -> None:\n \"\"\"Remove jobs for a specific team member.\n \n Args:\n discord_id: The Discord ID of the member for whom the job should be removed.\n \"\"\"\n job_ids = self.job_ids.get(discord_id, [])\n for job_id in job_ids:\n self.scheduler.remove_job(job_id)\n\n if discord_id in self.job_ids:\n del self.job_ids[discord_id] # Remove the job IDs from the dictionary\n\n def schedule_weekly_post(self, func: callable, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]) -> None:\n \"\"\"Schedules the weekly post based on the latest time zone among the team members.\"\"\"\n \n # Determine the latest time zone\n latest_time_zone = max([member.time_zone for member in team_members], key=lambda tz: pytz.timezone(tz).utcoffset(datetime.utcnow()))\n\n # Set the trigger for 9:10 AM in the earliest time zone on Monday\n trigger = CronTrigger(day_of_week='mon', hour=9, minute=10, timezone=latest_time_zone)\n\n # Schedule the function with the trigger\n job = self.scheduler.add_job(func, trigger, args=[weekly_post_manager, streaks_manager, team_members])\n self.weekly_post_job_id = job.id\n\n def unschedule_weekly_post(self) -> None:\n \"\"\"Removes the weekly post job from the scheduler.\"\"\"\n if self.weekly_post_job_id:\n self.scheduler.remove_job(self.weekly_post_job_id)\n self.weekly_post_job_id = None\n\n def get_all_scheduled_jobs(self, team_member_manager) -> List[str]:\n \"\"\"Retrieve all scheduled jobs as a list of strings.\"\"\"\n job_descriptions = []\n\n for job in self.scheduler.get_jobs():\n # Determine the associated team member by looking up the job ID in the job_ids dictionary\n member_discord_id = next((discord_id for discord_id, job_ids in self.job_ids.items() if job.id in job_ids), None)\n member_name = team_member_manager.find_member(member_discord_id).name if member_discord_id else \"Unknown\"\n\n # Calculate the remaining time until the next run\n now = datetime.now(job.next_run_time.tzinfo) # Get the current time with the same timezone as the job's next_run_time\n remaining_time = job.next_run_time - now\n remaining_time_str = str(remaining_time).split('.')[0] # Remove the microseconds part\n\n # If this job is the weekly post job\n if job.id == self.weekly_post_job_id:\n job_descriptions.append(f\"ID: {job.id}, Type: Weekly Post, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n else:\n job_descriptions.append(f\"ID: {job.id}, Member: {member_name}, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n\n return job_descriptions" }, { "identifier": "TeamMember", "path": "team_members/team_member.py", "snippet": "class TeamMember:\n \"\"\"TeamMember class to store individual team member details.\n \n Attributes:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone in which the team member resides.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins of the team member.\n weekly_checkins: The number of check-ins for the current week.\n \"\"\"\n \n def __init__(self, discord_id: int, time_zone: str, name: str, github_username: str,\n current_streak: int = 0, weekly_checkins: int = 0, on_vacation: bool = False) -> None:\n \"\"\"Initialize a new TeamMember object.\n \n Args:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone of the team member.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins. Defaults to 0.\n weekly_checkins: The number of check-ins for the current week. Defaults to 0.\n \"\"\"\n self.discord_id: int = discord_id\n self.time_zone: str = time_zone\n self.name: str = name\n self.github_username: str = github_username\n self.current_streak: int = current_streak\n self.weekly_checkins: int = weekly_checkins\n self.on_vacation: bool = on_vacation\n \n def update_streak(self, streak: int) -> None:\n \"\"\"Update the current streak of the team member.\n \n Args:\n streak: The new streak count.\n \"\"\"\n self.current_streak = streak\n \n def reset_streak(self) -> None:\n \"\"\"Reset the current streak of the team member to 0.\"\"\"\n self.current_streak = 0\n\n def update_weekly_checkins(self, count: int):\n \"\"\"\n Update the weekly check-ins count.\n\n Args:\n count: The new count of weekly check-ins.\n \"\"\"\n self.weekly_checkins = count\n \n def increment_weekly_checkins(self) -> None:\n \"\"\"Increment the number of check-ins for the current week by 1.\"\"\"\n self.weekly_checkins += 1\n \n def reset_weekly_checkins(self) -> None:\n \"\"\"Reset the number of check-ins for the current week to 0.\"\"\"\n self.weekly_checkins = 0" } ]
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
12,661
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝'
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝'
async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]):
7
2023-10-12 02:01:46+00:00
16k
azuline/rose
rose/tracks_test.py
[ { "identifier": "AudioTags", "path": "rose/audiotags.py", "snippet": "class AudioTags:\n id: str | None\n release_id: str | None\n title: str | None\n year: int | None\n tracknumber: str | None\n tracktotal: int | None\n discnumber: str | None\n disctotal: int | None\n album: str | None\n genre: list[str]\n label: list[str]\n releasetype: str\n\n albumartists: ArtistMapping\n trackartists: ArtistMapping\n\n duration_sec: int\n\n path: Path\n\n @classmethod\n def from_file(cls, p: Path) -> AudioTags:\n \"\"\"Read the tags of an audio file on disk.\"\"\"\n if not any(p.suffix.lower() == ext for ext in SUPPORTED_AUDIO_EXTENSIONS):\n raise UnsupportedFiletypeError(f\"{p.suffix} not a supported filetype\")\n try:\n m = mutagen.File(p) # type: ignore\n except mutagen.MutagenError as e: # type: ignore\n raise UnsupportedFiletypeError(f\"Failed to open file: {e}\") from e\n if isinstance(m, mutagen.mp3.MP3):\n # ID3 returns trackno/discno tags as no/total. We have to parse.\n tracknumber = discnumber = tracktotal = disctotal = None\n if tracknos := _get_tag(m.tags, [\"TRCK\"]):\n try:\n tracknumber, tracktotalstr = tracknos.split(\"/\", 1)\n tracktotal = _parse_int(tracktotalstr)\n except ValueError:\n tracknumber = tracknos\n if discnos := _get_tag(m.tags, [\"TPOS\"]):\n try:\n discnumber, disctotalstr = discnos.split(\"/\", 1)\n disctotal = _parse_int(disctotalstr)\n except ValueError:\n discnumber = discnos\n\n def _get_paired_frame(x: str) -> str | None:\n if not m.tags:\n return None\n for tag in [\"TIPL\", \"IPLS\"]:\n try:\n frame = m.tags[tag]\n except KeyError:\n continue\n return r\" \\\\ \".join([p[1] for p in frame.people if p[0].lower() == x.lower()])\n return None\n\n return AudioTags(\n id=_get_tag(m.tags, [\"TXXX:ROSEID\"]),\n release_id=_get_tag(m.tags, [\"TXXX:ROSERELEASEID\"]),\n title=_get_tag(m.tags, [\"TIT2\"]),\n year=_parse_year(_get_tag(m.tags, [\"TDRC\", \"TYER\"])),\n tracknumber=tracknumber,\n tracktotal=tracktotal,\n discnumber=discnumber,\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"TALB\"]),\n genre=_split_tag(_get_tag(m.tags, [\"TCON\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"TPUB\"], split=True)),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"TXXX:RELEASETYPE\"], first=True)),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"TPE2\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"TPE1\"], split=True),\n remixer=_get_tag(m.tags, [\"TPE4\"], split=True),\n composer=_get_tag(m.tags, [\"TCOM\"], split=True),\n conductor=_get_tag(m.tags, [\"TPE3\"], split=True),\n producer=_get_paired_frame(\"producer\"),\n dj=_get_paired_frame(\"DJ-mix\"),\n ),\n duration_sec=round(m.info.length),\n path=p,\n )\n if isinstance(m, mutagen.mp4.MP4):\n tracknumber = discnumber = tracktotal = disctotal = None\n with contextlib.suppress(ValueError):\n tracknumber, tracktotalstr = _get_tuple_tag(m.tags, [\"trkn\"]) # type: ignore\n tracktotal = _parse_int(tracktotalstr)\n with contextlib.suppress(ValueError):\n discnumber, disctotalstr = _get_tuple_tag(m.tags, [\"disk\"]) # type: ignore\n disctotal = _parse_int(disctotalstr)\n\n return AudioTags(\n id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:ID\"]),\n release_id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:RELEASEID\"]),\n title=_get_tag(m.tags, [\"\\xa9nam\"]),\n year=_parse_year(_get_tag(m.tags, [\"\\xa9day\"])),\n tracknumber=str(tracknumber),\n tracktotal=tracktotal,\n discnumber=str(discnumber),\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"\\xa9alb\"]),\n genre=_split_tag(_get_tag(m.tags, [\"\\xa9gen\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"----:com.apple.iTunes:LABEL\"], split=True)),\n releasetype=_normalize_rtype(\n _get_tag(m.tags, [\"----:com.apple.iTunes:RELEASETYPE\"], first=True)\n ),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"aART\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"\\xa9ART\"], split=True),\n remixer=_get_tag(m.tags, [\"----:com.apple.iTunes:REMIXER\"], split=True),\n producer=_get_tag(m.tags, [\"----:com.apple.iTunes:PRODUCER\"], split=True),\n composer=_get_tag(m.tags, [\"\\xa9wrt\"], split=True),\n conductor=_get_tag(m.tags, [\"----:com.apple.iTunes:CONDUCTOR\"], split=True),\n dj=_get_tag(m.tags, [\"----:com.apple.iTunes:DJMIXER\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n return AudioTags(\n id=_get_tag(m.tags, [\"roseid\"]),\n release_id=_get_tag(m.tags, [\"rosereleaseid\"]),\n title=_get_tag(m.tags, [\"title\"]),\n year=_parse_year(_get_tag(m.tags, [\"date\", \"year\"])),\n tracknumber=_get_tag(m.tags, [\"tracknumber\"], first=True),\n tracktotal=_parse_int(_get_tag(m.tags, [\"tracktotal\"], first=True)),\n discnumber=_get_tag(m.tags, [\"discnumber\"], first=True),\n disctotal=_parse_int(_get_tag(m.tags, [\"disctotal\"], first=True)),\n album=_get_tag(m.tags, [\"album\"]),\n genre=_split_tag(_get_tag(m.tags, [\"genre\"], split=True)),\n label=_split_tag(\n _get_tag(m.tags, [\"organization\", \"label\", \"recordlabel\"], split=True)\n ),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"releasetype\"], first=True)),\n albumartists=parse_artist_string(\n main=_get_tag(m.tags, [\"albumartist\"], split=True)\n ),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"artist\"], split=True),\n remixer=_get_tag(m.tags, [\"remixer\"], split=True),\n producer=_get_tag(m.tags, [\"producer\"], split=True),\n composer=_get_tag(m.tags, [\"composer\"], split=True),\n conductor=_get_tag(m.tags, [\"conductor\"], split=True),\n dj=_get_tag(m.tags, [\"djmixer\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n raise UnsupportedFiletypeError(f\"{p} is not a supported audio file\")\n\n @no_type_check\n def flush(self, *, validate: bool = True) -> None:\n \"\"\"Flush the current tags to the file on disk.\"\"\"\n m = mutagen.File(self.path)\n if not validate and \"pytest\" not in sys.modules:\n raise Exception(\"Validate can only be turned off by tests.\")\n\n self.releasetype = (self.releasetype or \"unknown\").lower()\n if validate and self.releasetype not in SUPPORTED_RELEASE_TYPES:\n raise UnsupportedTagValueTypeError(\n f\"Release type {self.releasetype} is not a supported release type.\\n\"\n f\"Supported release types: {', '.join(SUPPORTED_RELEASE_TYPES)}\"\n )\n\n if isinstance(m, mutagen.mp3.MP3):\n if m.tags is None:\n m.tags = mutagen.id3.ID3()\n\n def _write_standard_tag(key: str, value: str | None) -> None:\n m.tags.delall(key)\n frame = getattr(mutagen.id3, key)(text=value)\n if value:\n m.tags.add(frame)\n\n def _write_tag_with_description(name: str, value: str | None) -> None:\n key, desc = name.split(\":\", 1)\n # Since the ID3 tags work with the shared prefix key before `:`, manually preserve\n # the other tags with the shared prefix key.\n keep_fields = [f for f in m.tags.getall(key) if getattr(f, \"desc\", None) != desc]\n m.tags.delall(key)\n if value:\n frame = getattr(mutagen.id3, key)(desc=desc, text=value)\n m.tags.add(frame)\n for f in keep_fields:\n m.tags.add(f)\n\n _write_tag_with_description(\"TXXX:ROSEID\", self.id)\n _write_tag_with_description(\"TXXX:ROSERELEASEID\", self.release_id)\n _write_standard_tag(\"TIT2\", self.title)\n _write_standard_tag(\"TDRC\", str(self.year).zfill(4))\n _write_standard_tag(\"TRCK\", self.tracknumber)\n _write_standard_tag(\"TPOS\", self.discnumber)\n _write_standard_tag(\"TALB\", self.album)\n _write_standard_tag(\"TCON\", \";\".join(self.genre))\n _write_standard_tag(\"TPUB\", \";\".join(self.label))\n _write_tag_with_description(\"TXXX:RELEASETYPE\", self.releasetype)\n _write_standard_tag(\"TPE2\", format_artist_string(self.albumartists))\n _write_standard_tag(\"TPE1\", format_artist_string(self.trackartists))\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n m.tags.delall(\"TPE4\")\n m.tags.delall(\"TCOM\")\n m.tags.delall(\"TPE3\")\n # Delete all paired text frames, since these represent additional artist roles. We don't\n # want to preserve them.\n m.tags.delall(\"TIPL\")\n m.tags.delall(\"IPLS\")\n m.save()\n return\n if isinstance(m, mutagen.mp4.MP4):\n if m.tags is None:\n m.tags = mutagen.mp4.MP4Tags()\n m.tags[\"----:net.sunsetglow.rose:ID\"] = (self.id or \"\").encode()\n m.tags[\"----:net.sunsetglow.rose:RELEASEID\"] = (self.release_id or \"\").encode()\n m.tags[\"\\xa9nam\"] = self.title or \"\"\n m.tags[\"\\xa9day\"] = str(self.year).zfill(4)\n m.tags[\"\\xa9alb\"] = self.album or \"\"\n m.tags[\"\\xa9gen\"] = \";\".join(self.genre)\n m.tags[\"----:com.apple.iTunes:LABEL\"] = \";\".join(self.label).encode()\n m.tags[\"----:com.apple.iTunes:RELEASETYPE\"] = self.releasetype.encode()\n m.tags[\"aART\"] = format_artist_string(self.albumartists)\n m.tags[\"\\xa9ART\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:REMIXER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:PRODUCER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"\\xa9wrt\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:CONDUCTOR\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:DJMIXER\"]\n\n # The track and disc numbers in MP4 are a bit annoying, because they must be a\n # single-element list of 2-tuple ints. We preserve the previous tracktotal/disctotal (as\n # Rose does not care about those values), and then attempt to write our own tracknumber\n # and discnumber.\n try:\n prev_tracktotal = m.tags[\"trkn\"][0][1]\n except (KeyError, IndexError):\n prev_tracktotal = 1\n try:\n prev_disctotal = m.tags[\"disk\"][0][1]\n except (KeyError, IndexError):\n prev_disctotal = 1\n try:\n m.tags[\"trkn\"] = [(int(self.tracknumber or \"0\"), prev_tracktotal)]\n m.tags[\"disk\"] = [(int(self.discnumber or \"0\"), prev_disctotal)]\n except ValueError as e:\n raise UnsupportedTagValueTypeError(\n \"Could not write m4a trackno/discno tags: must be integers. \"\n f\"Got: {self.tracknumber=} / {self.discnumber=}\"\n ) from e\n\n m.save()\n return\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n if m.tags is None:\n if isinstance(m, mutagen.flac.FLAC):\n m.tags = mutagen.flac.VCFLACDict()\n elif isinstance(m, mutagen.oggvorbis.OggVorbis):\n m.tags = mutagen.oggvorbis.OggVCommentDict()\n else:\n m.tags = mutagen.oggopus.OggOpusVComment()\n assert not isinstance(m.tags, mutagen.flac.MetadataBlock)\n m.tags[\"roseid\"] = self.id or \"\"\n m.tags[\"rosereleaseid\"] = self.release_id or \"\"\n m.tags[\"title\"] = self.title or \"\"\n m.tags[\"date\"] = str(self.year).zfill(4)\n m.tags[\"tracknumber\"] = self.tracknumber or \"\"\n m.tags[\"discnumber\"] = self.discnumber or \"\"\n m.tags[\"album\"] = self.album or \"\"\n m.tags[\"genre\"] = \";\".join(self.genre)\n m.tags[\"organization\"] = \";\".join(self.label)\n m.tags[\"releasetype\"] = self.releasetype\n m.tags[\"albumartist\"] = format_artist_string(self.albumartists)\n m.tags[\"artist\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"remixer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"producer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"composer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"conductor\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"djmixer\"]\n m.save()\n return\n\n raise RoseError(f\"Impossible: unknown mutagen type: {type(m)=} ({repr(m)=})\")" }, { "identifier": "Config", "path": "rose/config.py", "snippet": "class Config:\n music_source_dir: Path\n fuse_mount_dir: Path\n cache_dir: Path\n # Maximum parallel processes for cache updates. Defaults to nproc/2.\n max_proc: int\n ignore_release_directories: list[str]\n\n # A map from parent artist -> subartists.\n artist_aliases_map: dict[str, list[str]]\n # A map from subartist -> parent artists.\n artist_aliases_parents_map: dict[str, list[str]]\n\n fuse_artists_whitelist: list[str] | None\n fuse_genres_whitelist: list[str] | None\n fuse_labels_whitelist: list[str] | None\n fuse_artists_blacklist: list[str] | None\n fuse_genres_blacklist: list[str] | None\n fuse_labels_blacklist: list[str] | None\n\n cover_art_stems: list[str]\n valid_art_exts: list[str]\n\n rename_source_files: bool\n path_templates: PathTemplateConfig\n\n stored_metadata_rules: list[MetadataRule]\n\n @classmethod\n def parse(cls, config_path_override: Path | None = None) -> Config:\n # As we parse, delete consumed values from the data dictionary. If any are left over at the\n # end of the config, warn that unknown config keys were found.\n cfgpath = config_path_override or CONFIG_PATH\n cfgtext = \"\"\n try:\n with cfgpath.open(\"r\") as fp:\n cfgtext = fp.read()\n data = tomllib.loads(cfgtext)\n except FileNotFoundError as e:\n raise ConfigNotFoundError(f\"Configuration file not found ({cfgpath})\") from e\n except tomllib.TOMLDecodeError as e:\n raise ConfigDecodeError(\n f\"Failed to decode configuration file: invalid TOML: {e}\"\n ) from e\n\n try:\n music_source_dir = Path(data[\"music_source_dir\"]).expanduser()\n del data[\"music_source_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key music_source_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for music_source_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n fuse_mount_dir = Path(data[\"fuse_mount_dir\"]).expanduser()\n del data[\"fuse_mount_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key fuse_mount_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_mount_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n cache_dir = Path(data[\"cache_dir\"]).expanduser()\n del data[\"cache_dir\"]\n except KeyError:\n cache_dir = XDG_CACHE_ROSE\n except (TypeError, ValueError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cache_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n cache_dir.mkdir(parents=True, exist_ok=True)\n\n try:\n max_proc = int(data[\"max_proc\"])\n del data[\"max_proc\"]\n if max_proc <= 0:\n raise ValueError(f\"must be a positive integer: got {max_proc}\")\n except KeyError:\n max_proc = max(1, multiprocessing.cpu_count() // 2)\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for max_proc in configuration file ({cfgpath}): must be a positive integer\"\n ) from e\n\n artist_aliases_map: dict[str, list[str]] = defaultdict(list)\n artist_aliases_parents_map: dict[str, list[str]] = defaultdict(list)\n try:\n for entry in data.get(\"artist_aliases\", []):\n if not isinstance(entry[\"artist\"], str):\n raise ValueError(f\"Artists must be of type str: got {type(entry['artist'])}\")\n artist_aliases_map[entry[\"artist\"]] = entry[\"aliases\"]\n if not isinstance(entry[\"aliases\"], list):\n raise ValueError(\n f\"Aliases must be of type list[str]: got {type(entry['aliases'])}\"\n )\n for s in entry[\"aliases\"]:\n if not isinstance(s, str):\n raise ValueError(f\"Each alias must be of type str: got {type(s)}\")\n artist_aliases_parents_map[s].append(entry[\"artist\"])\n with contextlib.suppress(KeyError):\n del data[\"artist_aliases\"]\n except (ValueError, TypeError, KeyError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for artist_aliases in configuration file ({cfgpath}): must be a list of {{ artist = str, aliases = list[str] }} records\"\n ) from e\n\n try:\n fuse_artists_whitelist = data[\"fuse_artists_whitelist\"]\n del data[\"fuse_artists_whitelist\"]\n if not isinstance(fuse_artists_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_whitelist)}\")\n for s in fuse_artists_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_whitelist = data[\"fuse_genres_whitelist\"]\n del data[\"fuse_genres_whitelist\"]\n if not isinstance(fuse_genres_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_whitelist)}\")\n for s in fuse_genres_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_whitelist = data[\"fuse_labels_whitelist\"]\n del data[\"fuse_labels_whitelist\"]\n if not isinstance(fuse_labels_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_whitelist)}\")\n for s in fuse_labels_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_artists_blacklist = data[\"fuse_artists_blacklist\"]\n del data[\"fuse_artists_blacklist\"]\n if not isinstance(fuse_artists_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_blacklist)}\")\n for s in fuse_artists_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_blacklist = data[\"fuse_genres_blacklist\"]\n del data[\"fuse_genres_blacklist\"]\n if not isinstance(fuse_genres_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_blacklist)}\")\n for s in fuse_genres_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_blacklist = data[\"fuse_labels_blacklist\"]\n del data[\"fuse_labels_blacklist\"]\n if not isinstance(fuse_labels_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_blacklist)}\")\n for s in fuse_labels_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n if fuse_artists_whitelist and fuse_artists_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_artists_whitelist and fuse_artists_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_genres_whitelist and fuse_genres_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_genres_whitelist and fuse_genres_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_labels_whitelist and fuse_labels_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_labels_whitelist and fuse_labels_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n\n try:\n cover_art_stems = data[\"cover_art_stems\"]\n del data[\"cover_art_stems\"]\n if not isinstance(cover_art_stems, list):\n raise ValueError(f\"Must be a list[str]: got {type(cover_art_stems)}\")\n for s in cover_art_stems:\n if not isinstance(s, str):\n raise ValueError(f\"Each cover art stem must be of type str: got {type(s)}\")\n except KeyError:\n cover_art_stems = [\"folder\", \"cover\", \"art\", \"front\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cover_art_stems in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n valid_art_exts = data[\"valid_art_exts\"]\n del data[\"valid_art_exts\"]\n if not isinstance(valid_art_exts, list):\n raise ValueError(f\"Must be a list[str]: got {type(valid_art_exts)}\")\n for s in valid_art_exts:\n if not isinstance(s, str):\n raise ValueError(f\"Each art extension must be of type str: got {type(s)}\")\n except KeyError:\n valid_art_exts = [\"jpg\", \"jpeg\", \"png\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for valid_art_exts in configuration file ({cfgpath}): {e}\"\n ) from e\n\n cover_art_stems = [x.lower() for x in cover_art_stems]\n valid_art_exts = [x.lower() for x in valid_art_exts]\n\n try:\n rename_source_files = data[\"rename_source_files\"]\n del data[\"rename_source_files\"]\n if not isinstance(rename_source_files, bool):\n raise ValueError(f\"Must be a bool: got {type(rename_source_files)}\")\n except KeyError:\n rename_source_files = False\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for rename_source_files in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n ignore_release_directories = data[\"ignore_release_directories\"]\n del data[\"ignore_release_directories\"]\n if not isinstance(ignore_release_directories, list):\n raise ValueError(f\"Must be a list[str]: got {type(ignore_release_directories)}\")\n for s in ignore_release_directories:\n if not isinstance(s, str):\n raise ValueError(f\"Each release directory must be of type str: got {type(s)}\")\n except KeyError:\n ignore_release_directories = []\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for ignore_release_directories in configuration file ({cfgpath}): {e}\"\n ) from e\n\n stored_metadata_rules: list[MetadataRule] = []\n for d in data.get(\"stored_metadata_rules\", []):\n if not isinstance(d, dict):\n raise InvalidConfigValueError(\n f\"Invalid value in stored_metadata_rules in configuration file ({cfgpath}): list values must be a dict: got {type(d)}\"\n )\n\n try:\n matcher = d[\"matcher\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(matcher, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a string\"\n )\n\n try:\n actions = d[\"actions\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(actions, list):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings\"\n )\n for action in actions:\n if not isinstance(action, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings: got {type(action)}\"\n )\n\n try:\n stored_metadata_rules.append(MetadataRule.parse(matcher, actions))\n except RuleSyntaxError as e:\n raise InvalidConfigValueError(\n f\"Failed to parse stored_metadata_rules in configuration file ({cfgpath}): rule {d}: {e}\"\n ) from e\n if \"stored_metadata_rules\" in data:\n del data[\"stored_metadata_rules\"]\n\n # Get the potential default template before evaluating the rest.\n default_templates = deepcopy(DEFAULT_TEMPLATE_PAIR)\n with contextlib.suppress(KeyError):\n default_templates.release = PathTemplate(data[\"path_templates\"][\"default\"][\"release\"])\n del data[\"path_templates\"][\"default\"][\"release\"]\n with contextlib.suppress(KeyError):\n default_templates.track = PathTemplate(data[\"path_templates\"][\"default\"][\"track\"])\n del data[\"path_templates\"][\"default\"][\"track\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"][\"default\"]:\n del data[\"path_templates\"][\"default\"]\n\n path_templates = PathTemplateConfig.with_defaults(default_templates)\n if tmpl_config := data.get(\"path_templates\", None):\n for key in [\n \"source\",\n \"all_releases\",\n \"new_releases\",\n \"recently_added_releases\",\n \"artists\",\n \"genres\",\n \"labels\",\n \"collages\",\n ]:\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).release = PathTemplate(tmpl_config[key][\"release\"])\n del tmpl_config[key][\"release\"]\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).track = PathTemplate(tmpl_config[key][\"track\"])\n del tmpl_config[key][\"track\"]\n with contextlib.suppress(KeyError):\n if not tmpl_config[key]:\n del tmpl_config[key]\n\n with contextlib.suppress(KeyError):\n path_templates.playlists = PathTemplate(tmpl_config[\"playlists\"])\n del tmpl_config[\"playlists\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"]:\n del data[\"path_templates\"]\n\n try:\n path_templates.parse()\n except InvalidPathTemplateError as e:\n raise InvalidConfigValueError(\n f\"Invalid path template in configuration file ({cfgpath}) for template {e.key}: {e}\"\n ) from e\n\n if data:\n unrecognized_accessors: list[str] = []\n # Do a DFS over the data keys to assemble the map of unknown keys. State is a tuple of\n # (\"accessor\", node).\n dfs_state: deque[tuple[str, dict[str, Any]]] = deque([(\"\", data)])\n while dfs_state:\n accessor, node = dfs_state.pop()\n if isinstance(node, dict):\n for k, v in node.items():\n child_accessor = k if not accessor else f\"{accessor}.{k}\"\n dfs_state.append((child_accessor, v))\n continue\n unrecognized_accessors.append(accessor)\n logger.warning(\n f\"Unrecognized options found in configuration file: {', '.join(unrecognized_accessors)}\"\n )\n\n return Config(\n music_source_dir=music_source_dir,\n fuse_mount_dir=fuse_mount_dir,\n cache_dir=cache_dir,\n max_proc=max_proc,\n artist_aliases_map=artist_aliases_map,\n artist_aliases_parents_map=artist_aliases_parents_map,\n fuse_artists_whitelist=fuse_artists_whitelist,\n fuse_genres_whitelist=fuse_genres_whitelist,\n fuse_labels_whitelist=fuse_labels_whitelist,\n fuse_artists_blacklist=fuse_artists_blacklist,\n fuse_genres_blacklist=fuse_genres_blacklist,\n fuse_labels_blacklist=fuse_labels_blacklist,\n cover_art_stems=cover_art_stems,\n valid_art_exts=valid_art_exts,\n path_templates=path_templates,\n rename_source_files=rename_source_files,\n ignore_release_directories=ignore_release_directories,\n stored_metadata_rules=stored_metadata_rules,\n )\n\n @functools.cached_property\n def valid_cover_arts(self) -> list[str]:\n return [s + \".\" + e for s in self.cover_art_stems for e in self.valid_art_exts]\n\n @functools.cached_property\n def cache_database_path(self) -> Path:\n return self.cache_dir / \"cache.sqlite3\"\n\n @functools.cached_property\n def watchdog_pid_path(self) -> Path:\n return self.cache_dir / \"watchdog.pid\"\n\n @functools.cached_property\n def sanitized_artist_aliases_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_map.items()}\n\n @functools.cached_property\n def sanitized_artist_aliases_parents_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_parents_map.items()}" }, { "identifier": "MetadataAction", "path": "rose/rule_parser.py", "snippet": "class MetadataAction:\n # The behavior of the action, along with behavior-specific parameters.\n behavior: ReplaceAction | SedAction | SplitAction | AddAction | DeleteAction\n # The tags to apply the action on. Defaults to the tag that the pattern matched.\n tags: list[Tag]\n # Only apply the action on values that match this pattern. None means that all values are acted\n # upon.\n pattern: MatcherPattern | None = None\n\n def __str__(self) -> str:\n r = \"\"\n r += stringify_tags(self.tags)\n if self.pattern:\n r += \":\" + str(self.pattern)\n if r:\n r += \"::\"\n\n if isinstance(self.behavior, ReplaceAction):\n r += \"replace\"\n elif isinstance(self.behavior, SedAction):\n r += \"sed\"\n elif isinstance(self.behavior, SplitAction):\n r += \"split\"\n elif isinstance(self.behavior, AddAction):\n r += \"add\"\n elif isinstance(self.behavior, DeleteAction):\n r += \"delete\"\n\n if isinstance(self.behavior, ReplaceAction):\n r += \":\" + self.behavior.replacement\n elif isinstance(self.behavior, SedAction):\n r += \":\" + str(self.behavior.src.pattern).replace(\":\", r\"\\:\")\n r += \":\"\n r += self.behavior.dst.replace(\":\", r\"\\:\")\n elif isinstance(self.behavior, SplitAction):\n r += \":\" + self.behavior.delimiter\n return r\n\n @classmethod\n def parse(\n cls,\n raw: str,\n action_number: int | None = None,\n # If there is a matcher for the action, pass it here to set the defaults.\n matcher: MetadataMatcher | None = None,\n ) -> MetadataAction:\n idx = 0\n # Common arguments to feed into Syntax Error.\n err = {\"rule\": raw, \"rule_name\": \"action\"}\n if action_number:\n err[\"rule_name\"] += f\" {action_number}\"\n\n # First, determine whether we have a matcher section or not. The matcher section is optional,\n # but present if there is an unescaped `::`.\n _, action_idx = take(raw, \"::\")\n has_tags_pattern_section = action_idx != len(raw)\n\n # Parse the (optional) tags+pattern section.\n if not has_tags_pattern_section:\n if not matcher:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Tags/pattern section not found. \"\n \"Must specify tags to modify, since there is no matcher to default to. \"\n \"Make sure you are formatting your action like {tags}:{pattern}::{kind}:{args} (where `:{pattern}` is optional)\",\n )\n tags: list[Tag] = [x for x in matcher.tags if x in MODIFIABLE_TAGS]\n pattern = matcher.pattern.pattern\n case_insensitive = matcher.pattern.case_insensitive\n else:\n # First, parse the tags. If the tag is matched, keep going, otherwise employ the list\n # parsing logic.\n if raw[idx:].startswith(\"matched:\"):\n if not matcher:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Cannot use `matched` in this context: there is no matcher to default to.\",\n )\n idx += len(\"matched:\")\n tags = [x for x in matcher.tags if x in MODIFIABLE_TAGS]\n pattern = matcher.pattern.pattern\n case_insensitive = matcher.pattern.case_insensitive\n else:\n tags = []\n found_colon = False\n while True:\n for t, resolved in ALL_TAGS.items():\n if not raw[idx:].startswith(t):\n continue\n if raw[idx:][len(t)] not in [\":\", \",\"]:\n continue\n for resolvedtag in resolved:\n if resolvedtag not in MODIFIABLE_TAGS:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Invalid tag: {t} is not modifiable.\",\n )\n tags.append(resolvedtag)\n idx += len(t) + 1\n found_colon = raw[idx - 1] == \":\"\n break\n else:\n tags_to_print: list[str] = []\n for t, resolvedtags in ALL_TAGS.items():\n if all(r in MODIFIABLE_TAGS for r in resolvedtags):\n tags_to_print.append(t)\n feedback = f\"Invalid tag: must be one of {{{', '.join(tags_to_print)}}}. The next character after a tag must be ':' or ','.\"\n if matcher:\n feedback = f\"Invalid tag: must be one of matched, {{{', '.join(tags_to_print)}}}. (And if the value is matched, it must be alone.) The next character after a tag must be ':' or ','.\"\n raise RuleSyntaxError(**err, index=idx, feedback=feedback)\n if found_colon:\n break\n\n # And now parse the optional pattern. If the next character is a `::`, then we have an\n # explicitly empty pattern, after which we reach the end of the tags+pattern section.\n pattern = None\n case_insensitive = False\n if raw[idx : idx + 2] == \"::\":\n idx += 2\n # Otherwise, if we hit a lone `:`, we've hit the end of the tags+pattern section, but\n # the pattern is not specified. In this case, default to the matcher's pattern, if we\n # have a matcher.\n # hit the end of the matcher, and we should proceed to the action.\n elif raw[idx] == \":\":\n idx += 1\n if matcher and tags == matcher.tags:\n pattern = matcher.pattern.pattern\n # And otherwise, parse the pattern!\n else:\n pattern, fwd = take(raw[idx:], \":\")\n idx += fwd\n # Set an empty pattern to null.\n pattern = pattern or None\n\n # If we don't see the second colon here, that means we are looking at\n # single-character flags. Only check this if pattern is not null though.\n if pattern and raw[idx : idx + 1] != \":\":\n flags, fwd = take(raw[idx:], \":\")\n if not flags:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"No flags specified: Please remove this section (by deleting the colon) or specify one of the supported flags: `i` (case insensitive).\",\n )\n for i, flag in enumerate(flags):\n if flag == \"i\":\n case_insensitive = True\n continue\n raise RuleSyntaxError(\n **err,\n index=idx + i,\n feedback=\"Unrecognized flag: Either you forgot a colon here (to end the matcher), or this is an invalid matcher flag. The only supported flag is `i` (case insensitive).\",\n )\n idx += fwd\n # Skip the second colon. Now we're at the start of the action.\n idx += 1\n\n # Then let's start parsing the action!\n action_kind, fwd = take(raw[idx:], \":\")\n valid_actions = [\n \"replace\",\n \"sed\",\n \"split\",\n \"add\",\n \"delete\",\n ]\n if action_kind not in valid_actions:\n feedback = f\"Invalid action kind: must be one of {{{', '.join(valid_actions)}}}.\"\n if idx == 0 and \":\" in raw:\n feedback += \" If this is pointing at your pattern, you forgot to put :: (double colons) between the matcher section and the action section.\"\n raise RuleSyntaxError(**err, index=idx, feedback=feedback)\n idx += fwd\n\n # Validate that the action type is supported for the given tags.\n if action_kind == \"split\" or action_kind == \"add\":\n single_valued_tags = [t for t in tags if t in SINGLE_VALUE_TAGS]\n if single_valued_tags:\n raise InvalidRuleError(\n f\"Single valued tags {', '.join(single_valued_tags)} cannot be modified by multi-value action {action_kind}\"\n )\n\n # And then parse each action kind separately.\n behavior: ReplaceAction | SedAction | SplitAction | AddAction | DeleteAction\n if action_kind == \"replace\":\n replacement, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n if replacement == \"\":\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Replacement not found: must specify a non-empty replacement. Use the delete action to remove a value.\",\n )\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the replacement, but the replacement must be the last section. Perhaps you meant to escape this colon?\",\n )\n behavior = ReplaceAction(replacement=replacement)\n elif action_kind == \"sed\":\n src_str, fwd = take(raw[idx:], \":\", including=False)\n if src_str == \"\":\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Empty sed pattern found: must specify a non-empty pattern. Example: {raw}:pattern:replacement\",\n )\n try:\n src = re.compile(src_str)\n except re.error as e:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Failed to compile the sed pattern regex: invalid pattern: {e}\",\n ) from e\n idx += fwd\n\n if len(raw) == idx or raw[idx] != \":\":\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Sed replacement not found: must specify a sed replacement section. Example: {raw}:replacement.\",\n )\n idx += 1\n\n dst, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the sed replacement, but the sed replacement must be the last section. Perhaps you meant to escape this colon?\",\n )\n behavior = SedAction(src=src, dst=dst)\n elif action_kind == \"split\":\n delimiter, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n if delimiter == \"\":\n feedback = \"Delimiter not found: must specify a non-empty delimiter to split on.\"\n if len(raw) > idx and raw[idx] == \":\":\n feedback += \" Perhaps you meant to escape this colon?\"\n raise RuleSyntaxError(**err, index=idx, feedback=feedback)\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the delimiter, but the delimiter must be the last section. Perhaps you meant to escape this colon?\",\n )\n behavior = SplitAction(delimiter=delimiter)\n elif action_kind == \"add\":\n value, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n if value == \"\":\n feedback = \"Value not found: must specify a non-empty value to add.\"\n if len(raw) > idx and raw[idx] == \":\":\n feedback += \" Perhaps you meant to escape this colon?\"\n raise RuleSyntaxError(**err, index=idx, feedback=feedback)\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the value, but the value must be the last section. Perhaps you meant to escape this colon?\",\n )\n behavior = AddAction(value=value)\n elif action_kind == \"delete\":\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the action kind, but the delete action has no parameters. Please remove this section.\",\n )\n behavior = DeleteAction()\n else: # pragma: no cover\n raise RoseError(f\"Impossible: unknown action_kind {action_kind=}\")\n\n action = MetadataAction(\n behavior=behavior,\n tags=tags,\n pattern=MatcherPattern(pattern=pattern, case_insensitive=case_insensitive)\n if pattern\n else None,\n )\n logger.debug(f\"Parsed rule action {raw=} {matcher=} as {action=}\")\n return action" }, { "identifier": "MetadataMatcher", "path": "rose/rule_parser.py", "snippet": "class MetadataMatcher:\n # Tags to test against the pattern. If any tags match the pattern, the action will be ran\n # against the track.\n tags: list[Tag]\n # The pattern to test the tag against.\n pattern: MatcherPattern\n\n def __str__(self) -> str:\n r = stringify_tags(self.tags)\n r += \":\"\n r += str(self.pattern)\n return r\n\n @classmethod\n def parse(cls, raw: str) -> MetadataMatcher:\n idx = 0\n # Common arguments to feed into Syntax Error.\n err = {\"rule_name\": \"matcher\", \"rule\": raw}\n\n # First, parse the tags.\n tags: list[Tag] = []\n found_colon = False\n while True:\n for t, resolved in ALL_TAGS.items():\n if not raw[idx:].startswith(t):\n continue\n try:\n if raw[idx:][len(t)] not in [\":\", \",\"]:\n continue\n except IndexError:\n raise RuleSyntaxError(\n **err,\n index=idx + len(t),\n feedback=\"Expected to find ',' or ':', found end of string.\",\n ) from None\n tags.extend(resolved)\n idx += len(t) + 1\n found_colon = raw[idx - 1] == \":\"\n break\n else:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Invalid tag: must be one of {{{', '.join(ALL_TAGS)}}}. The next character after a tag must be ':' or ','.\",\n )\n if found_colon:\n break\n\n # Then parse the pattern.\n pattern, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n\n # If more input is remaining, it should be optional single-character flags.\n case_insensitive = False\n if idx < len(raw) and raw[idx] == \":\":\n idx += 1\n flags, fwd = take(raw[idx:], \":\")\n if not flags:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"No flags specified: Please remove this section (by deleting the colon) or specify one of the supported flags: `i` (case insensitive).\",\n )\n for i, flag in enumerate(flags):\n if flag == \"i\":\n case_insensitive = True\n continue\n raise RuleSyntaxError(\n **err,\n index=idx + i,\n feedback=\"Unrecognized flag: Please specify one of the supported flags: `i` (case insensitive).\",\n )\n idx += fwd\n\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Extra input found after end of matcher. Perhaps you meant to escape this colon?\",\n )\n\n matcher = MetadataMatcher(\n tags=tags,\n pattern=MatcherPattern(pattern=pattern, case_insensitive=case_insensitive),\n )\n logger.debug(f\"Parsed rule matcher {raw=} as {matcher=}\")\n return matcher" }, { "identifier": "dump_track", "path": "rose/tracks.py", "snippet": "def dump_track(c: Config, track_id: str) -> str:\n track = get_track(c, track_id)\n if track is None:\n raise TrackDoesNotExistError(f\"Track {track_id} does not exist\")\n return json.dumps(track.dump())" }, { "identifier": "dump_tracks", "path": "rose/tracks.py", "snippet": "def dump_tracks(c: Config, matcher: MetadataMatcher | None = None) -> str:\n track_ids = None\n if matcher:\n track_ids = [t.id for t in fast_search_for_matching_tracks(c, matcher)]\n tracks = list_tracks(c, track_ids)\n if matcher:\n tracks = filter_track_false_positives_using_read_cache(matcher, tracks)\n return json.dumps([t.dump() for t in tracks])" }, { "identifier": "run_actions_on_track", "path": "rose/tracks.py", "snippet": "def run_actions_on_track(\n c: Config,\n track_id: str,\n actions: list[MetadataAction],\n *,\n dry_run: bool = False,\n confirm_yes: bool = False,\n) -> None:\n \"\"\"Run rule engine actions on a release.\"\"\"\n track = get_track(c, track_id)\n if track is None:\n raise TrackDoesNotExistError(f\"Track {track_id} does not exist\")\n audiotag = AudioTags.from_file(track.source_path)\n execute_metadata_actions(c, actions, [audiotag], dry_run=dry_run, confirm_yes=confirm_yes)" } ]
import json import pytest from pathlib import Path from rose.audiotags import AudioTags from rose.config import Config from rose.rule_parser import MetadataAction, MetadataMatcher from rose.tracks import dump_track, dump_tracks, run_actions_on_track
12,348
def test_run_action_on_track(config: Config, source_dir: Path) -> None: action = MetadataAction.parse("tracktitle::replace:Bop") af = AudioTags.from_file(source_dir / "Test Release 2" / "01.m4a") assert af.id is not None
def test_run_action_on_track(config: Config, source_dir: Path) -> None: action = MetadataAction.parse("tracktitle::replace:Bop") af = AudioTags.from_file(source_dir / "Test Release 2" / "01.m4a") assert af.id is not None
run_actions_on_track(config, af.id, [action])
6
2023-10-09 14:42:23+00:00
16k
grainseed/monitask
sam/segment_anything/build_sam.py
[ { "identifier": "Sam", "path": "sam/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\r\n mask_threshold: float = 0.0\r\n image_format: str = \"RGB\"\r\n\r\n def __init__(\r\n self,\r\n image_encoder: ImageEncoderViT,\r\n prompt_encoder: PromptEncoder,\r\n mask_decoder: MaskDecoder,\r\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\r\n pixel_std: List[float] = [58.395, 57.12, 57.375],\r\n ) -> None:\r\n \"\"\"\r\n SAM predicts object masks from an image and input prompts.\r\n\r\n Arguments:\r\n image_encoder (ImageEncoderViT): The backbone used to encode the\r\n image into image embeddings that allow for efficient mask prediction.\r\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\r\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\r\n and encoded prompts.\r\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\r\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\r\n \"\"\"\r\n super().__init__()\r\n self.image_encoder = image_encoder\r\n self.prompt_encoder = prompt_encoder\r\n self.mask_decoder = mask_decoder\r\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\r\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\r\n\r\n @property\r\n def device(self) -> Any:\r\n return self.pixel_mean.device\r\n\r\n def forward(\r\n self,\r\n batched_input: List[Dict[str, Any]],\r\n multimask_output: bool,\r\n hq_token_only: bool =False,\r\n ) -> List[Dict[str, torch.Tensor]]:\r\n \"\"\"\r\n Predicts masks end-to-end from provided images and prompts.\r\n If prompts are not known in advance, using SamPredictor is\r\n recommended over calling the model directly.\r\n\r\n Arguments:\r\n batched_input (list(dict)): A list over input images, each a\r\n dictionary with the following keys. A prompt key can be\r\n excluded if it is not present.\r\n 'image': The image as a torch tensor in 3xHxW format,\r\n already transformed for input to the model.\r\n 'original_size': (tuple(int, int)) The original size of\r\n the image before transformation, as (H, W).\r\n 'point_coords': (torch.Tensor) Batched point prompts for\r\n this image, with shape BxNx2. Already transformed to the\r\n input frame of the model.\r\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\r\n with shape BxN.\r\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\r\n Already transformed to the input frame of the model.\r\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\r\n in the form Bx1xHxW.\r\n multimask_output (bool): Whether the model should predict multiple\r\n disambiguating masks, or return a single mask.\r\n\r\n Returns:\r\n (list(dict)): A list over input images, where each element is\r\n as dictionary with the following keys.\r\n 'masks': (torch.Tensor) Batched binary mask predictions,\r\n with shape BxCxHxW, where B is the number of input prompts,\r\n C is determined by multimask_output, and (H, W) is the\r\n original size of the image.\r\n 'iou_predictions': (torch.Tensor) The model's predictions\r\n of mask quality, in shape BxC.\r\n 'low_res_logits': (torch.Tensor) Low resolution logits with\r\n shape BxCxHxW, where H=W=256. Can be passed as mask input\r\n to subsequent iterations of prediction.\r\n \"\"\"\r\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\r\n image_embeddings, interm_embeddings = self.image_encoder(input_images)\r\n interm_embeddings = interm_embeddings[0] # early layer\r\n\r\n outputs = []\r\n for image_record, curr_embedding, curr_interm in zip(batched_input, image_embeddings, interm_embeddings):\r\n if \"point_coords\" in image_record:\r\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\r\n else:\r\n points = None\r\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\r\n points=points,\r\n boxes=image_record.get(\"boxes\", None),\r\n masks=image_record.get(\"mask_inputs\", None),\r\n )\r\n low_res_masks, iou_predictions = self.mask_decoder(\r\n image_embeddings=curr_embedding.unsqueeze(0),\r\n image_pe=self.prompt_encoder.get_dense_pe(),\r\n sparse_prompt_embeddings=sparse_embeddings,\r\n dense_prompt_embeddings=dense_embeddings,\r\n multimask_output=multimask_output,\r\n hq_token_only=hq_token_only,\r\n interm_embeddings=curr_interm.unsqueeze(0).unsqueeze(0),\r\n )\r\n masks = self.postprocess_masks(\r\n low_res_masks,\r\n input_size=image_record[\"image\"].shape[-2:],\r\n original_size=image_record[\"original_size\"],\r\n )\r\n masks = masks > self.mask_threshold\r\n outputs.append(\r\n {\r\n \"masks\": masks,\r\n \"iou_predictions\": iou_predictions,\r\n \"low_res_logits\": low_res_masks,\r\n }\r\n )\r\n return outputs\r\n\r\n def postprocess_masks(\r\n self,\r\n masks: torch.Tensor,\r\n input_size: Tuple[int, ...],\r\n original_size: Tuple[int, ...],\r\n ) -> torch.Tensor:\r\n \"\"\"\r\n Remove padding and upscale masks to the original image size.\r\n\r\n Arguments:\r\n masks (torch.Tensor): Batched masks from the mask_decoder,\r\n in BxCxHxW format.\r\n input_size (tuple(int, int)): The size of the image input to the\r\n model, in (H, W) format. Used to remove padding.\r\n original_size (tuple(int, int)): The original size of the image\r\n before resizing for input to the model, in (H, W) format.\r\n\r\n Returns:\r\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\r\n is given by original_size.\r\n \"\"\"\r\n masks = F.interpolate(\r\n masks,\r\n (self.image_encoder.img_size, self.image_encoder.img_size),\r\n mode=\"bilinear\",\r\n align_corners=False,\r\n )\r\n masks = masks[..., : input_size[0], : input_size[1]]\r\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\r\n return masks\r\n\r\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\r\n # Normalize colors\r\n x = (x - self.pixel_mean) / self.pixel_std\r\n\r\n # Pad\r\n h, w = x.shape[-2:]\r\n padh = self.image_encoder.img_size - h\r\n padw = self.image_encoder.img_size - w\r\n x = F.pad(x, (0, padw, 0, padh))\r\n return x\r" }, { "identifier": "ImageEncoderViT", "path": "sam/segment_anything/modeling/image_encoder.py", "snippet": "class ImageEncoderViT(nn.Module):\r\n def __init__(\r\n self,\r\n img_size: int = 1024,\r\n patch_size: int = 16,\r\n in_chans: int = 3,\r\n embed_dim: int = 768,\r\n depth: int = 12,\r\n num_heads: int = 12,\r\n mlp_ratio: float = 4.0,\r\n out_chans: int = 256,\r\n qkv_bias: bool = True,\r\n norm_layer: Type[nn.Module] = nn.LayerNorm,\r\n act_layer: Type[nn.Module] = nn.GELU,\r\n use_abs_pos: bool = True,\r\n use_rel_pos: bool = False,\r\n rel_pos_zero_init: bool = True,\r\n window_size: int = 0,\r\n global_attn_indexes: Tuple[int, ...] = (),\r\n ) -> None:\r\n \"\"\"\r\n Args:\r\n img_size (int): Input image size.\r\n patch_size (int): Patch size.\r\n in_chans (int): Number of input image channels.\r\n embed_dim (int): Patch embedding dimension.\r\n depth (int): Depth of ViT.\r\n num_heads (int): Number of attention heads in each ViT block.\r\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\r\n qkv_bias (bool): If True, add a learnable bias to query, key, value.\r\n norm_layer (nn.Module): Normalization layer.\r\n act_layer (nn.Module): Activation layer.\r\n use_abs_pos (bool): If True, use absolute positional embeddings.\r\n use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\r\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\r\n window_size (int): Window size for window attention blocks.\r\n global_attn_indexes (list): Indexes for blocks using global attention.\r\n \"\"\"\r\n super().__init__()\r\n self.img_size = img_size\r\n\r\n self.patch_embed = PatchEmbed(\r\n kernel_size=(patch_size, patch_size),\r\n stride=(patch_size, patch_size),\r\n in_chans=in_chans,\r\n embed_dim=embed_dim,\r\n )\r\n\r\n self.pos_embed: Optional[nn.Parameter] = None\r\n if use_abs_pos:\r\n # Initialize absolute positional embedding with pretrain image size.\r\n self.pos_embed = nn.Parameter(\r\n torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)\r\n )\r\n\r\n self.blocks = nn.ModuleList()\r\n for i in range(depth):\r\n block = Block(\r\n dim=embed_dim,\r\n num_heads=num_heads,\r\n mlp_ratio=mlp_ratio,\r\n qkv_bias=qkv_bias,\r\n norm_layer=norm_layer,\r\n act_layer=act_layer,\r\n use_rel_pos=use_rel_pos,\r\n rel_pos_zero_init=rel_pos_zero_init,\r\n window_size=window_size if i not in global_attn_indexes else 0,\r\n input_size=(img_size // patch_size, img_size // patch_size),\r\n )\r\n self.blocks.append(block)\r\n\r\n self.neck = nn.Sequential(\r\n nn.Conv2d(\r\n embed_dim,\r\n out_chans,\r\n kernel_size=1,\r\n bias=False,\r\n ),\r\n LayerNorm2d(out_chans),\r\n nn.Conv2d(\r\n out_chans,\r\n out_chans,\r\n kernel_size=3,\r\n padding=1,\r\n bias=False,\r\n ),\r\n LayerNorm2d(out_chans),\r\n )\r\n\r\n def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n x = self.patch_embed(x)\r\n if self.pos_embed is not None:\r\n x = x + self.pos_embed\r\n\r\n interm_embeddings=[]\r\n for blk in self.blocks:\r\n x = blk(x)\r\n if blk.window_size == 0:\r\n interm_embeddings.append(x)\r\n\r\n x = self.neck(x.permute(0, 3, 1, 2))\r\n\r\n return x, interm_embeddings\r" }, { "identifier": "MaskDecoderHQ", "path": "sam/segment_anything/modeling/mask_decoder_hq.py", "snippet": "class MaskDecoderHQ(nn.Module):\r\n def __init__(\r\n self,\r\n *,\r\n transformer_dim: int,\r\n transformer: nn.Module,\r\n num_multimask_outputs: int = 3,\r\n activation: Type[nn.Module] = nn.GELU,\r\n iou_head_depth: int = 3,\r\n iou_head_hidden_dim: int = 256,\r\n vit_dim: int = 1024,\r\n ) -> None:\r\n \"\"\"\r\n Predicts masks given an image and prompt embeddings, using a\r\n transformer architecture.\r\n\r\n Arguments:\r\n transformer_dim (int): the channel dimension of the transformer\r\n transformer (nn.Module): the transformer used to predict masks\r\n num_multimask_outputs (int): the number of masks to predict\r\n when disambiguating masks\r\n activation (nn.Module): the type of activation to use when\r\n upscaling masks\r\n iou_head_depth (int): the depth of the MLP used to predict\r\n mask quality\r\n iou_head_hidden_dim (int): the hidden dimension of the MLP\r\n used to predict mask quality\r\n \"\"\"\r\n super().__init__()\r\n self.transformer_dim = transformer_dim\r\n self.transformer = transformer\r\n\r\n self.num_multimask_outputs = num_multimask_outputs\r\n\r\n self.iou_token = nn.Embedding(1, transformer_dim)\r\n self.num_mask_tokens = num_multimask_outputs + 1\r\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\r\n\r\n self.output_upscaling = nn.Sequential(\r\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\r\n LayerNorm2d(transformer_dim // 4),\r\n activation(),\r\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\r\n activation(),\r\n )\r\n self.output_hypernetworks_mlps = nn.ModuleList(\r\n [\r\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\r\n for i in range(self.num_mask_tokens)\r\n ]\r\n )\r\n\r\n self.iou_prediction_head = MLP(\r\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\r\n )\r\n\r\n # HQ-SAM parameters\r\n self.hf_token = nn.Embedding(1, transformer_dim) # HQ-Ouptput-Token\r\n self.hf_mlp = MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) # corresponding new MLP layer for HQ-Ouptput-Token\r\n self.num_mask_tokens = self.num_mask_tokens + 1\r\n \r\n # three conv fusion layers for obtaining HQ-Feature\r\n self.compress_vit_feat = nn.Sequential(\r\n nn.ConvTranspose2d(vit_dim, transformer_dim, kernel_size=2, stride=2),\r\n LayerNorm2d(transformer_dim),\r\n nn.GELU(), \r\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 8, kernel_size=2, stride=2))\r\n \r\n self.embedding_encoder = nn.Sequential(\r\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\r\n LayerNorm2d(transformer_dim // 4),\r\n nn.GELU(),\r\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\r\n )\r\n self.embedding_maskfeature = nn.Sequential(\r\n nn.Conv2d(transformer_dim // 8, transformer_dim // 4, 3, 1, 1), \r\n LayerNorm2d(transformer_dim // 4),\r\n nn.GELU(),\r\n nn.Conv2d(transformer_dim // 4, transformer_dim // 8, 3, 1, 1))\r\n\r\n\r\n\r\n def forward(\r\n self,\r\n image_embeddings: torch.Tensor,\r\n image_pe: torch.Tensor,\r\n sparse_prompt_embeddings: torch.Tensor,\r\n dense_prompt_embeddings: torch.Tensor,\r\n multimask_output: bool,\r\n hq_token_only: bool,\r\n interm_embeddings: torch.Tensor,\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"\r\n Predict masks given image and prompt embeddings.\r\n\r\n Arguments:\r\n image_embeddings (torch.Tensor): the embeddings from the ViT image encoder\r\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\r\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\r\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\r\n multimask_output (bool): Whether to return multiple masks or a single\r\n mask.\r\n\r\n Returns:\r\n torch.Tensor: batched predicted masks\r\n torch.Tensor: batched predictions of mask quality\r\n \"\"\"\r\n vit_features = interm_embeddings[0].permute(0, 3, 1, 2) # early-layer ViT feature, after 1st global attention block in ViT\r\n hq_features = self.embedding_encoder(image_embeddings) + self.compress_vit_feat(vit_features)\r\n\r\n masks, iou_pred = self.predict_masks(\r\n image_embeddings=image_embeddings,\r\n image_pe=image_pe,\r\n sparse_prompt_embeddings=sparse_prompt_embeddings,\r\n dense_prompt_embeddings=dense_prompt_embeddings,\r\n hq_features=hq_features,\r\n )\r\n\r\n # Select the correct mask or masks for output\r\n if multimask_output:\r\n # mask with highest score\r\n mask_slice = slice(1,self.num_mask_tokens-1)\r\n iou_pred = iou_pred[:, mask_slice]\r\n iou_pred, max_iou_idx = torch.max(iou_pred,dim=1)\r\n iou_pred = iou_pred.unsqueeze(1)\r\n masks_multi = masks[:, mask_slice, :, :]\r\n masks_sam = masks_multi[torch.arange(masks_multi.size(0)),max_iou_idx].unsqueeze(1)\r\n else:\r\n # singale mask output, default\r\n mask_slice = slice(0, 1)\r\n iou_pred = iou_pred[:,mask_slice]\r\n masks_sam = masks[:,mask_slice]\r\n\r\n masks_hq = masks[:,slice(self.num_mask_tokens-1, self.num_mask_tokens)]\r\n if hq_token_only:\r\n masks = masks_hq\r\n else:\r\n masks = masks_sam + masks_hq\r\n # Prepare output\r\n return masks, iou_pred\r\n\r\n def predict_masks(\r\n self,\r\n image_embeddings: torch.Tensor,\r\n image_pe: torch.Tensor,\r\n sparse_prompt_embeddings: torch.Tensor,\r\n dense_prompt_embeddings: torch.Tensor,\r\n hq_features: torch.Tensor,\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\r\n # Concatenate output tokens\r\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight, self.hf_token.weight], dim=0)\r\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\r\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\r\n\r\n # Expand per-image data in batch direction to be per-mask\r\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\r\n src = src + dense_prompt_embeddings\r\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\r\n b, c, h, w = src.shape\r\n\r\n # Run the transformer\r\n hs, src = self.transformer(src, pos_src, tokens)\r\n iou_token_out = hs[:, 0, :]\r\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\r\n\r\n # Upscale mask embeddings and predict masks using the mask tokens\r\n src = src.transpose(1, 2).view(b, c, h, w)\r\n\r\n upscaled_embedding_sam = self.output_upscaling(src)\r\n upscaled_embedding_hq = self.embedding_maskfeature(upscaled_embedding_sam) + hq_features.repeat(b,1,1,1)\r\n\r\n hyper_in_list: List[torch.Tensor] = []\r\n for i in range(self.num_mask_tokens):\r\n if i < self.num_mask_tokens - 1:\r\n hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\r\n else:\r\n hyper_in_list.append(self.hf_mlp(mask_tokens_out[:, i, :]))\r\n\r\n hyper_in = torch.stack(hyper_in_list, dim=1)\r\n b, c, h, w = upscaled_embedding_sam.shape\r\n\r\n masks_sam = (hyper_in[:,:self.num_mask_tokens-1] @ upscaled_embedding_sam.view(b, c, h * w)).view(b, -1, h, w)\r\n masks_sam_hq = (hyper_in[:,self.num_mask_tokens-1:] @ upscaled_embedding_hq.view(b, c, h * w)).view(b, -1, h, w)\r\n masks = torch.cat([masks_sam,masks_sam_hq],dim=1)\r\n # Generate mask quality predictions\r\n iou_pred = self.iou_prediction_head(iou_token_out)\r\n\r\n return masks, iou_pred\r" }, { "identifier": "MaskDecoder", "path": "sam/segment_anything/modeling/mask_decoder.py", "snippet": "class MaskDecoder(nn.Module):\r\n def __init__(\r\n self,\r\n *,\r\n transformer_dim: int,\r\n transformer: nn.Module,\r\n num_multimask_outputs: int = 3,\r\n activation: Type[nn.Module] = nn.GELU,\r\n iou_head_depth: int = 3,\r\n iou_head_hidden_dim: int = 256,\r\n ) -> None:\r\n \"\"\"\r\n Predicts masks given an image and prompt embeddings, using a\r\n transformer architecture.\r\n\r\n Arguments:\r\n transformer_dim (int): the channel dimension of the transformer\r\n transformer (nn.Module): the transformer used to predict masks\r\n num_multimask_outputs (int): the number of masks to predict\r\n when disambiguating masks\r\n activation (nn.Module): the type of activation to use when\r\n upscaling masks\r\n iou_head_depth (int): the depth of the MLP used to predict\r\n mask quality\r\n iou_head_hidden_dim (int): the hidden dimension of the MLP\r\n used to predict mask quality\r\n \"\"\"\r\n super().__init__()\r\n self.transformer_dim = transformer_dim\r\n self.transformer = transformer\r\n\r\n self.num_multimask_outputs = num_multimask_outputs\r\n\r\n self.iou_token = nn.Embedding(1, transformer_dim)\r\n self.num_mask_tokens = num_multimask_outputs + 1\r\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\r\n\r\n self.output_upscaling = nn.Sequential(\r\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\r\n LayerNorm2d(transformer_dim // 4),\r\n activation(),\r\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\r\n activation(),\r\n )\r\n self.output_hypernetworks_mlps = nn.ModuleList(\r\n [\r\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\r\n for i in range(self.num_mask_tokens)\r\n ]\r\n )\r\n\r\n self.iou_prediction_head = MLP(\r\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\r\n )\r\n\r\n def forward(\r\n self,\r\n image_embeddings: torch.Tensor,\r\n image_pe: torch.Tensor,\r\n sparse_prompt_embeddings: torch.Tensor,\r\n dense_prompt_embeddings: torch.Tensor,\r\n multimask_output: bool,\r\n hq_token_only: bool,\r\n interm_embeddings: torch.Tensor,\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"\r\n Predict masks given image and prompt embeddings.\r\n\r\n Arguments:\r\n image_embeddings (torch.Tensor): the embeddings from the image encoder\r\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\r\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\r\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\r\n multimask_output (bool): Whether to return multiple masks or a single\r\n mask.\r\n\r\n Returns:\r\n torch.Tensor: batched predicted masks\r\n torch.Tensor: batched predictions of mask quality\r\n \"\"\"\r\n masks, iou_pred = self.predict_masks(\r\n image_embeddings=image_embeddings,\r\n image_pe=image_pe,\r\n sparse_prompt_embeddings=sparse_prompt_embeddings,\r\n dense_prompt_embeddings=dense_prompt_embeddings,\r\n )\r\n\r\n # Select the correct mask or masks for output\r\n if multimask_output:\r\n mask_slice = slice(1, None)\r\n else:\r\n mask_slice = slice(0, 1)\r\n masks = masks[:, mask_slice, :, :]\r\n iou_pred = iou_pred[:, mask_slice]\r\n\r\n # Prepare output\r\n return masks, iou_pred\r\n\r\n def predict_masks(\r\n self,\r\n image_embeddings: torch.Tensor,\r\n image_pe: torch.Tensor,\r\n sparse_prompt_embeddings: torch.Tensor,\r\n dense_prompt_embeddings: torch.Tensor,\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\r\n # Concatenate output tokens\r\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\r\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\r\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\r\n\r\n # Expand per-image data in batch direction to be per-mask\r\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\r\n src = src + dense_prompt_embeddings\r\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\r\n b, c, h, w = src.shape\r\n\r\n # Run the transformer\r\n hs, src = self.transformer(src, pos_src, tokens)\r\n iou_token_out = hs[:, 0, :]\r\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\r\n\r\n # Upscale mask embeddings and predict masks using the mask tokens\r\n src = src.transpose(1, 2).view(b, c, h, w)\r\n upscaled_embedding = self.output_upscaling(src)\r\n hyper_in_list: List[torch.Tensor] = []\r\n for i in range(self.num_mask_tokens):\r\n hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\r\n hyper_in = torch.stack(hyper_in_list, dim=1)\r\n b, c, h, w = upscaled_embedding.shape\r\n masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)\r\n\r\n # Generate mask quality predictions\r\n iou_pred = self.iou_prediction_head(iou_token_out)\r\n\r\n return masks, iou_pred\r" }, { "identifier": "PromptEncoder", "path": "sam/segment_anything/modeling/prompt_encoder.py", "snippet": "class PromptEncoder(nn.Module):\r\n def __init__(\r\n self,\r\n embed_dim: int,\r\n image_embedding_size: Tuple[int, int],\r\n input_image_size: Tuple[int, int],\r\n mask_in_chans: int,\r\n activation: Type[nn.Module] = nn.GELU,\r\n ) -> None:\r\n \"\"\"\r\n Encodes prompts for input to SAM's mask decoder.\r\n\r\n Arguments:\r\n embed_dim (int): The prompts' embedding dimension\r\n image_embedding_size (tuple(int, int)): The spatial size of the\r\n image embedding, as (H, W).\r\n input_image_size (int): The padded size of the image as input\r\n to the image encoder, as (H, W).\r\n mask_in_chans (int): The number of hidden channels used for\r\n encoding input masks.\r\n activation (nn.Module): The activation to use when encoding\r\n input masks.\r\n \"\"\"\r\n super().__init__()\r\n self.embed_dim = embed_dim\r\n self.input_image_size = input_image_size\r\n self.image_embedding_size = image_embedding_size\r\n self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\r\n\r\n self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners\r\n point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]\r\n self.point_embeddings = nn.ModuleList(point_embeddings)\r\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\r\n\r\n self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])\r\n self.mask_downscaling = nn.Sequential(\r\n nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\r\n LayerNorm2d(mask_in_chans // 4),\r\n activation(),\r\n nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\r\n LayerNorm2d(mask_in_chans),\r\n activation(),\r\n nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\r\n )\r\n self.no_mask_embed = nn.Embedding(1, embed_dim)\r\n\r\n def get_dense_pe(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns the positional encoding used to encode point prompts,\r\n applied to a dense set of points the shape of the image encoding.\r\n\r\n Returns:\r\n torch.Tensor: Positional encoding with shape\r\n 1x(embed_dim)x(embedding_h)x(embedding_w)\r\n \"\"\"\r\n return self.pe_layer(self.image_embedding_size).unsqueeze(0)\r\n\r\n def _embed_points(\r\n self,\r\n points: torch.Tensor,\r\n labels: torch.Tensor,\r\n pad: bool,\r\n ) -> torch.Tensor:\r\n \"\"\"Embeds point prompts.\"\"\"\r\n points = points + 0.5 # Shift to center of pixel\r\n if pad:\r\n padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\r\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\r\n points = torch.cat([points, padding_point], dim=1)\r\n labels = torch.cat([labels, padding_label], dim=1)\r\n point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\r\n point_embedding[labels == -1] = 0.0\r\n point_embedding[labels == -1] += self.not_a_point_embed.weight\r\n point_embedding[labels == 0] += self.point_embeddings[0].weight\r\n point_embedding[labels == 1] += self.point_embeddings[1].weight\r\n return point_embedding\r\n\r\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Embeds box prompts.\"\"\"\r\n boxes = boxes + 0.5 # Shift to center of pixel\r\n coords = boxes.reshape(-1, 2, 2)\r\n corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\r\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\r\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\r\n return corner_embedding\r\n\r\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Embeds mask inputs.\"\"\"\r\n mask_embedding = self.mask_downscaling(masks)\r\n return mask_embedding\r\n\r\n def _get_batch_size(\r\n self,\r\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\r\n boxes: Optional[torch.Tensor],\r\n masks: Optional[torch.Tensor],\r\n ) -> int:\r\n \"\"\"\r\n Gets the batch size of the output given the batch size of the input prompts.\r\n \"\"\"\r\n if points is not None:\r\n return points[0].shape[0]\r\n elif boxes is not None:\r\n return boxes.shape[0]\r\n elif masks is not None:\r\n return masks.shape[0]\r\n else:\r\n return 1\r\n\r\n def _get_device(self) -> torch.device:\r\n return self.point_embeddings[0].weight.device\r\n\r\n def forward(\r\n self,\r\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\r\n boxes: Optional[torch.Tensor],\r\n masks: Optional[torch.Tensor],\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"\r\n Embeds different types of prompts, returning both sparse and dense\r\n embeddings.\r\n\r\n Arguments:\r\n points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\r\n and labels to embed.\r\n boxes (torch.Tensor or none): boxes to embed\r\n masks (torch.Tensor or none): masks to embed\r\n\r\n Returns:\r\n torch.Tensor: sparse embeddings for the points and boxes, with shape\r\n BxNx(embed_dim), where N is determined by the number of input points\r\n and boxes.\r\n torch.Tensor: dense embeddings for the masks, in the shape\r\n Bx(embed_dim)x(embed_H)x(embed_W)\r\n \"\"\"\r\n bs = self._get_batch_size(points, boxes, masks)\r\n sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\r\n if points is not None:\r\n coords, labels = points\r\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\r\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\r\n if boxes is not None:\r\n box_embeddings = self._embed_boxes(boxes)\r\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\r\n\r\n if masks is not None:\r\n dense_embeddings = self._embed_masks(masks)\r\n else:\r\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(\r\n bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]\r\n )\r\n\r\n return sparse_embeddings, dense_embeddings\r" }, { "identifier": "TwoWayTransformer", "path": "sam/segment_anything/modeling/transformer.py", "snippet": "class TwoWayTransformer(nn.Module):\r\n def __init__(\r\n self,\r\n depth: int,\r\n embedding_dim: int,\r\n num_heads: int,\r\n mlp_dim: int,\r\n activation: Type[nn.Module] = nn.ReLU,\r\n attention_downsample_rate: int = 2,\r\n ) -> None:\r\n \"\"\"\r\n A transformer decoder that attends to an input image using\r\n queries whose positional embedding is supplied.\r\n\r\n Args:\r\n depth (int): number of layers in the transformer\r\n embedding_dim (int): the channel dimension for the input embeddings\r\n num_heads (int): the number of heads for multihead attention. Must\r\n divide embedding_dim\r\n mlp_dim (int): the channel dimension internal to the MLP block\r\n activation (nn.Module): the activation to use in the MLP block\r\n \"\"\"\r\n super().__init__()\r\n self.depth = depth\r\n self.embedding_dim = embedding_dim\r\n self.num_heads = num_heads\r\n self.mlp_dim = mlp_dim\r\n self.layers = nn.ModuleList()\r\n\r\n for i in range(depth):\r\n self.layers.append(\r\n TwoWayAttentionBlock(\r\n embedding_dim=embedding_dim,\r\n num_heads=num_heads,\r\n mlp_dim=mlp_dim,\r\n activation=activation,\r\n attention_downsample_rate=attention_downsample_rate,\r\n skip_first_layer_pe=(i == 0),\r\n )\r\n )\r\n\r\n self.final_attn_token_to_image = Attention(\r\n embedding_dim, num_heads, downsample_rate=attention_downsample_rate\r\n )\r\n self.norm_final_attn = nn.LayerNorm(embedding_dim)\r\n\r\n def forward(\r\n self,\r\n image_embedding: Tensor,\r\n image_pe: Tensor,\r\n point_embedding: Tensor,\r\n ) -> Tuple[Tensor, Tensor]:\r\n \"\"\"\r\n Args:\r\n image_embedding (torch.Tensor): image to attend to. Should be shape\r\n B x embedding_dim x h x w for any h and w.\r\n image_pe (torch.Tensor): the positional encoding to add to the image. Must\r\n have the same shape as image_embedding.\r\n point_embedding (torch.Tensor): the embedding to add to the query points.\r\n Must have shape B x N_points x embedding_dim for any N_points.\r\n\r\n Returns:\r\n torch.Tensor: the processed point_embedding\r\n torch.Tensor: the processed image_embedding\r\n \"\"\"\r\n # BxCxHxW -> BxHWxC == B x N_image_tokens x C\r\n bs, c, h, w = image_embedding.shape\r\n image_embedding = image_embedding.flatten(2).permute(0, 2, 1)\r\n image_pe = image_pe.flatten(2).permute(0, 2, 1)\r\n\r\n # Prepare queries\r\n queries = point_embedding\r\n keys = image_embedding\r\n\r\n # Apply transformer blocks and final layernorm\r\n for layer in self.layers:\r\n queries, keys = layer(\r\n queries=queries,\r\n keys=keys,\r\n query_pe=point_embedding,\r\n key_pe=image_pe,\r\n )\r\n\r\n # Apply the final attention layer from the points to the image\r\n q = queries + point_embedding\r\n k = keys + image_pe\r\n attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)\r\n queries = queries + attn_out\r\n queries = self.norm_final_attn(queries)\r\n\r\n return queries, keys\r" }, { "identifier": "TinyViT", "path": "sam/segment_anything/modeling/tiny_vit_sam.py", "snippet": "class TinyViT(nn.Module):\r\n def __init__(self, img_size=224, in_chans=3, num_classes=1000,\r\n embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2],\r\n num_heads=[3, 6, 12, 24],\r\n window_sizes=[7, 7, 14, 7],\r\n mlp_ratio=4.,\r\n drop_rate=0.,\r\n drop_path_rate=0.1,\r\n use_checkpoint=False,\r\n mbconv_expand_ratio=4.0,\r\n local_conv_size=3,\r\n layer_lr_decay=1.0,\r\n ):\r\n super().__init__()\r\n self.img_size=img_size\r\n self.num_classes = num_classes\r\n self.depths = depths\r\n self.num_layers = len(depths)\r\n self.mlp_ratio = mlp_ratio\r\n\r\n activation = nn.GELU\r\n\r\n self.patch_embed = PatchEmbed(in_chans=in_chans,\r\n embed_dim=embed_dims[0],\r\n resolution=img_size,\r\n activation=activation)\r\n\r\n patches_resolution = self.patch_embed.patches_resolution\r\n self.patches_resolution = patches_resolution\r\n\r\n # stochastic depth\r\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate,\r\n sum(depths))] # stochastic depth decay rule\r\n\r\n # build layers\r\n self.layers = nn.ModuleList()\r\n for i_layer in range(self.num_layers):\r\n kwargs = dict(dim=embed_dims[i_layer],\r\n input_resolution=(patches_resolution[0] // (2 ** (i_layer-1 if i_layer == 3 else i_layer)),\r\n patches_resolution[1] // (2 ** (i_layer-1 if i_layer == 3 else i_layer))),\r\n # input_resolution=(patches_resolution[0] // (2 ** i_layer),\r\n # patches_resolution[1] // (2 ** i_layer)),\r\n depth=depths[i_layer],\r\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\r\n downsample=PatchMerging if (\r\n i_layer < self.num_layers - 1) else None,\r\n use_checkpoint=use_checkpoint,\r\n out_dim=embed_dims[min(\r\n i_layer + 1, len(embed_dims) - 1)],\r\n activation=activation,\r\n )\r\n if i_layer == 0:\r\n layer = ConvLayer(\r\n conv_expand_ratio=mbconv_expand_ratio,\r\n **kwargs,\r\n )\r\n else:\r\n layer = BasicLayer(\r\n num_heads=num_heads[i_layer],\r\n window_size=window_sizes[i_layer],\r\n mlp_ratio=self.mlp_ratio,\r\n drop=drop_rate,\r\n local_conv_size=local_conv_size,\r\n **kwargs)\r\n self.layers.append(layer)\r\n\r\n # Classifier head\r\n self.norm_head = nn.LayerNorm(embed_dims[-1])\r\n self.head = nn.Linear(\r\n embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()\r\n\r\n # init weights\r\n self.apply(self._init_weights)\r\n self.set_layer_lr_decay(layer_lr_decay)\r\n self.neck = nn.Sequential(\r\n nn.Conv2d(\r\n embed_dims[-1],\r\n 256,\r\n kernel_size=1,\r\n bias=False,\r\n ),\r\n LayerNorm2d(256),\r\n nn.Conv2d(\r\n 256,\r\n 256,\r\n kernel_size=3,\r\n padding=1,\r\n bias=False,\r\n ),\r\n LayerNorm2d(256),\r\n )\r\n def set_layer_lr_decay(self, layer_lr_decay):\r\n decay_rate = layer_lr_decay\r\n\r\n # layers -> blocks (depth)\r\n depth = sum(self.depths)\r\n lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]\r\n #print_log(\"LR SCALES:\", lr_scales)\r\n\r\n def _set_lr_scale(m, scale):\r\n for p in m.parameters():\r\n p.lr_scale = scale\r\n\r\n self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))\r\n i = 0\r\n for layer in self.layers:\r\n for block in layer.blocks:\r\n block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))\r\n i += 1\r\n if layer.downsample is not None:\r\n layer.downsample.apply(\r\n lambda x: _set_lr_scale(x, lr_scales[i - 1]))\r\n assert i == depth\r\n for m in [self.norm_head, self.head]:\r\n m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))\r\n\r\n for k, p in self.named_parameters():\r\n p.param_name = k\r\n\r\n def _check_lr_scale(m):\r\n for p in m.parameters():\r\n assert hasattr(p, 'lr_scale'), p.param_name\r\n\r\n self.apply(_check_lr_scale)\r\n\r\n def _init_weights(self, m):\r\n if isinstance(m, nn.Linear):\r\n trunc_normal_(m.weight, std=.02)\r\n if isinstance(m, nn.Linear) and m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m, nn.LayerNorm):\r\n nn.init.constant_(m.bias, 0)\r\n nn.init.constant_(m.weight, 1.0)\r\n\r\n @torch.jit.ignore\r\n def no_weight_decay_keywords(self):\r\n return {'attention_biases'}\r\n\r\n def forward_features(self, x):\r\n # x: (N, C, H, W)\r\n x = self.patch_embed(x)\r\n\r\n x = self.layers[0](x)\r\n start_i = 1\r\n\r\n interm_embeddings=[]\r\n for i in range(start_i, len(self.layers)):\r\n layer = self.layers[i]\r\n x = layer(x)\r\n # print_log('x shape:', x.shape, '---i:', i)\r\n if i == 1:\r\n interm_embeddings.append(x.view(x.shape[0], 64, 64, -1))\r\n\r\n B,_,C=x.size()\r\n x = x.view(B, 64, 64, C)\r\n x=x.permute(0, 3, 1, 2)\r\n x=self.neck(x)\r\n return x, interm_embeddings\r\n\r\n def forward(self, x):\r\n x, interm_embeddings = self.forward_features(x)\r\n #x = self.norm_head(x)\r\n #x = self.head(x)\r\n # print_log('come to here is correct'* 3)\r\n return x, interm_embeddings\r" } ]
import torch from functools import partial from .modeling import ImageEncoderViT, MaskDecoder,MaskDecoderHQ, PromptEncoder, Sam, TwoWayTransformer, TinyViT
11,297
# This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. def build_sam_vit_h(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1280, encoder_depth=32, encoder_num_heads=16, encoder_global_attn_indexes=[7, 15, 23, 31], checkpoint=checkpoint, ) build_sam = build_sam_vit_h def build_sam_vit_l(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1024, encoder_depth=24, encoder_num_heads=16, encoder_global_attn_indexes=[5, 11, 17, 23], checkpoint=checkpoint, ) def build_sam_vit_b(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_global_attn_indexes=[2, 5, 8, 11], checkpoint=checkpoint, ) def build_sam_vit_t(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoderHQ( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, vit_dim=160, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: device = "cuda" if torch.cuda.is_available() else "cpu" state_dict = torch.load(f, map_location=device) info = mobile_sam.load_state_dict(state_dict, strict=False) #print(info) for n, p in mobile_sam.named_parameters(): if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n: p.requires_grad = False return mobile_sam def build_mobile_sam(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ),
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. def build_sam_vit_h(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1280, encoder_depth=32, encoder_num_heads=16, encoder_global_attn_indexes=[7, 15, 23, 31], checkpoint=checkpoint, ) build_sam = build_sam_vit_h def build_sam_vit_l(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1024, encoder_depth=24, encoder_num_heads=16, encoder_global_attn_indexes=[5, 11, 17, 23], checkpoint=checkpoint, ) def build_sam_vit_b(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_global_attn_indexes=[2, 5, 8, 11], checkpoint=checkpoint, ) def build_sam_vit_t(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoderHQ( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, vit_dim=160, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: device = "cuda" if torch.cuda.is_available() else "cpu" state_dict = torch.load(f, map_location=device) info = mobile_sam.load_state_dict(state_dict, strict=False) #print(info) for n, p in mobile_sam.named_parameters(): if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n: p.requires_grad = False return mobile_sam def build_mobile_sam(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ),
mask_decoder=MaskDecoder(
3
2023-10-14 13:45:54+00:00
16k
zhaoyizhou1123/mbrcsl
examples/pointmaze/run_combo_maze.py
[ { "identifier": "MLP", "path": "offlinerlkit/nets/mlp.py", "snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n output_dim: Optional[int] = None,\n activation: nn.Module = nn.ReLU,\n dropout_rate: Optional[float] = None,\n init_last: bool = False\n ) -> None:\n super().__init__()\n hidden_dims = [input_dim] + list(hidden_dims)\n model = []\n for in_dim, out_dim in zip(hidden_dims[:-1], hidden_dims[1:]):\n model += [nn.Linear(in_dim, out_dim), activation()]\n if dropout_rate is not None:\n model += [nn.Dropout(p=dropout_rate)]\n\n self.output_dim = hidden_dims[-1]\n if output_dim is not None:\n last_layer = nn.Linear(hidden_dims[-1], output_dim)\n if init_last:\n nn.init.xavier_uniform_(last_layer.weight, gain=1e-2)\n nn.init.constant_(last_layer.bias, 0.0)\n model += [last_layer]\n self.output_dim = output_dim\n self.model = nn.Sequential(*model)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)" }, { "identifier": "ActorProb", "path": "offlinerlkit/modules/actor_module.py", "snippet": "class ActorProb(nn.Module):\n def __init__(\n self,\n backbone: nn.Module,\n dist_net: nn.Module,\n device: str = \"cpu\"\n ) -> None:\n super().__init__()\n\n self.device = torch.device(device)\n self.backbone = backbone.to(device)\n self.dist_net = dist_net.to(device)\n\n def forward(self, obs: Union[np.ndarray, torch.Tensor]) -> torch.distributions.Normal:\n obs = torch.as_tensor(obs, device=self.device, dtype=torch.float32)\n logits = self.backbone(obs)\n dist = self.dist_net(logits)\n return dist" }, { "identifier": "Critic", "path": "offlinerlkit/modules/critic_module.py", "snippet": "class Critic(nn.Module):\n def __init__(self, backbone: nn.Module, device: str = \"cpu\") -> None:\n super().__init__()\n\n self.device = torch.device(device)\n self.backbone = backbone.to(device)\n latent_dim = getattr(backbone, \"output_dim\")\n self.last = nn.Linear(latent_dim, 1).to(device)\n\n def forward(\n self,\n obs: Union[np.ndarray, torch.Tensor],\n actions: Optional[Union[np.ndarray, torch.Tensor]] = None\n ) -> torch.Tensor:\n obs = torch.as_tensor(obs, device=self.device, dtype=torch.float32)\n if actions is not None:\n actions = torch.as_tensor(actions, device=self.device, dtype=torch.float32).flatten(1)\n obs = torch.cat([obs, actions], dim=1)\n logits = self.backbone(obs)\n values = self.last(logits)\n return values" }, { "identifier": "TanhDiagGaussian", "path": "offlinerlkit/modules/dist_module.py", "snippet": "class TanhDiagGaussian(DiagGaussian):\n def __init__(\n self,\n latent_dim,\n output_dim,\n unbounded=False,\n conditioned_sigma=False,\n max_mu=1.0,\n sigma_min=-5.0,\n sigma_max=2.0\n ):\n super().__init__(\n latent_dim=latent_dim,\n output_dim=output_dim,\n unbounded=unbounded,\n conditioned_sigma=conditioned_sigma,\n max_mu=max_mu,\n sigma_min=sigma_min,\n sigma_max=sigma_max\n )\n\n def forward(self, logits):\n mu = self.mu(logits)\n if not self._unbounded:\n mu = self._max * torch.tanh(mu)\n if self._c_sigma:\n sigma = torch.clamp(self.sigma(logits), min=self._sigma_min, max=self._sigma_max).exp()\n else:\n shape = [1] * len(mu.shape)\n shape[1] = -1\n sigma = (self.sigma_param.view(shape) + torch.zeros_like(mu)).exp()\n return TanhNormalWrapper(mu, sigma)" }, { "identifier": "EnsembleDynamicsModel", "path": "offlinerlkit/modules/dynamics_module.py", "snippet": "class EnsembleDynamicsModel(nn.Module):\n def __init__(\n self,\n obs_dim: int,\n action_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n num_ensemble: int = 7,\n num_elites: int = 5,\n activation: nn.Module = Swish,\n weight_decays: Optional[Union[List[float], Tuple[float]]] = None,\n with_reward: bool = True,\n device: str = \"cpu\"\n ) -> None:\n super().__init__()\n\n self.num_ensemble = num_ensemble\n self.num_elites = num_elites\n self._with_reward = with_reward\n self.device = torch.device(device)\n\n self.activation = activation()\n\n assert len(weight_decays) == (len(hidden_dims) + 1)\n\n module_list = []\n hidden_dims = [obs_dim+action_dim] + list(hidden_dims)\n if weight_decays is None:\n weight_decays = [0.0] * (len(hidden_dims) + 1)\n for in_dim, out_dim, weight_decay in zip(hidden_dims[:-1], hidden_dims[1:], weight_decays[:-1]):\n module_list.append(EnsembleLinear(in_dim, out_dim, num_ensemble, weight_decay))\n self.backbones = nn.ModuleList(module_list)\n\n self.output_layer = EnsembleLinear(\n hidden_dims[-1],\n 2 * (obs_dim + self._with_reward),\n num_ensemble,\n weight_decays[-1]\n )\n\n self.register_parameter(\n \"max_logvar\",\n nn.Parameter(torch.ones(obs_dim + self._with_reward) * 0.5, requires_grad=True)\n )\n self.register_parameter(\n \"min_logvar\",\n nn.Parameter(torch.ones(obs_dim + self._with_reward) * -10, requires_grad=True)\n )\n\n self.register_parameter(\n \"elites\",\n nn.Parameter(torch.tensor(list(range(0, self.num_elites))), requires_grad=False)\n )\n\n self.to(self.device)\n\n def forward(self, obs_action: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor]:\n obs_action = torch.as_tensor(obs_action, dtype=torch.float32).to(self.device)\n output = obs_action\n for layer in self.backbones:\n output = self.activation(layer(output))\n mean, logvar = torch.chunk(self.output_layer(output), 2, dim=-1)\n logvar = soft_clamp(logvar, self.min_logvar, self.max_logvar)\n return mean, logvar\n\n def load_save(self) -> None:\n for layer in self.backbones:\n layer.load_save()\n self.output_layer.load_save()\n\n def update_save(self, indexes: List[int]) -> None:\n for layer in self.backbones:\n layer.update_save(indexes)\n self.output_layer.update_save(indexes)\n \n def get_decay_loss(self) -> torch.Tensor:\n decay_loss = 0\n for layer in self.backbones:\n decay_loss += layer.get_decay_loss()\n decay_loss += self.output_layer.get_decay_loss()\n return decay_loss\n\n def set_elites(self, indexes: List[int]) -> None:\n assert len(indexes) <= self.num_ensemble and max(indexes) < self.num_ensemble\n self.register_parameter('elites', nn.Parameter(torch.tensor(indexes), requires_grad=False))\n \n def random_elite_idxs(self, batch_size: int) -> np.ndarray:\n idxs = np.random.choice(self.elites.data.cpu().numpy(), size=batch_size)\n return idxs" }, { "identifier": "EnsembleDynamics", "path": "offlinerlkit/dynamics/ensemble_dynamics.py", "snippet": "class EnsembleDynamics(BaseDynamics):\n def __init__(\n self,\n model: nn.Module,\n optim: torch.optim.Optimizer,\n scaler: StandardScaler,\n terminal_fn: Callable[[np.ndarray, np.ndarray, np.ndarray], np.ndarray],\n penalty_coef: float = 0.0,\n uncertainty_mode: str = \"aleatoric\"\n ) -> None:\n super().__init__(model, optim)\n self.scaler = scaler\n self.terminal_fn = terminal_fn\n self._penalty_coef = penalty_coef\n self._uncertainty_mode = uncertainty_mode\n\n @ torch.no_grad()\n def step(\n self,\n obs: np.ndarray,\n action: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Dict]:\n '''\n Return:\n reward (B,1) (if obs has batch)\n terminal (B,1)\n '''\n \"imagine single forward step\"\n obs_act = np.concatenate([obs, action], axis=-1)\n obs_act = self.scaler.transform(obs_act)\n mean, logvar = self.model(obs_act)\n mean = mean.cpu().numpy()\n logvar = logvar.cpu().numpy()\n mean[..., :-1] += obs # We estimated delta_obs\n std = np.sqrt(np.exp(logvar))\n\n ensemble_samples = (mean + np.random.normal(size=mean.shape) * std).astype(np.float32)\n\n # choose one model from ensemble\n num_models, batch_size, _ = ensemble_samples.shape\n model_idxs = self.model.random_elite_idxs(batch_size)\n samples = ensemble_samples[model_idxs, np.arange(batch_size)]\n \n next_obs = samples[..., :-1]\n reward = samples[..., -1:]\n terminal = self.terminal_fn(obs, action, next_obs)\n info = {}\n info[\"raw_reward\"] = reward\n\n if self._penalty_coef:\n if self._uncertainty_mode == \"aleatoric\":\n penalty = np.amax(np.linalg.norm(std, axis=2), axis=0)\n elif self._uncertainty_mode == \"pairwise-diff\":\n next_obses_mean = mean[..., :-1]\n next_obs_mean = np.mean(next_obses_mean, axis=0)\n diff = next_obses_mean - next_obs_mean\n penalty = np.amax(np.linalg.norm(diff, axis=2), axis=0)\n elif self._uncertainty_mode == \"ensemble_std\":\n next_obses_mean = mean[..., :-1]\n penalty = np.sqrt(next_obses_mean.var(0).mean(1))\n else:\n raise ValueError\n penalty = np.expand_dims(penalty, 1).astype(np.float32)\n assert penalty.shape == reward.shape\n reward = reward - self._penalty_coef * penalty\n info[\"penalty\"] = penalty\n \n return next_obs, reward, terminal, info\n \n @ torch.no_grad()\n def sample_next_obss(\n self,\n obs: torch.Tensor,\n action: torch.Tensor,\n num_samples: int\n ) -> torch.Tensor:\n obs_act = torch.cat([obs, action], dim=-1)\n obs_act = self.scaler.transform_tensor(obs_act)\n mean, logvar = self.model(obs_act)\n mean[..., :-1] += obs\n std = torch.sqrt(torch.exp(logvar))\n\n mean = mean[self.model.elites.data.cpu().numpy()]\n std = std[self.model.elites.data.cpu().numpy()]\n\n samples = torch.stack([mean + torch.randn_like(std) * std for i in range(num_samples)], 0)\n next_obss = samples[..., :-1]\n return next_obss\n\n def format_samples_for_training(self, data: Dict) -> Tuple[np.ndarray, np.ndarray]:\n obss = data[\"observations\"]\n actions = data[\"actions\"]\n next_obss = data[\"next_observations\"]\n rewards = data[\"rewards\"]\n rewards = rewards.reshape(rewards.shape[0], -1)\n delta_obss = next_obss - obss\n inputs = np.concatenate((obss, actions), axis=-1)\n targets = np.concatenate((delta_obss, rewards), axis=-1)\n return inputs, targets\n\n def train(\n self,\n data: Dict,\n logger: Logger,\n max_epochs: Optional[float] = None,\n max_epochs_since_update: int = 5,\n batch_size: int = 256,\n holdout_ratio: float = 0.2,\n logvar_loss_coef: float = 0.01\n ) -> None:\n inputs, targets = self.format_samples_for_training(data)\n data_size = inputs.shape[0]\n holdout_size = min(int(data_size * holdout_ratio), 1000)\n train_size = data_size - holdout_size\n train_splits, holdout_splits = torch.utils.data.random_split(range(data_size), (train_size, holdout_size))\n train_inputs, train_targets = inputs[train_splits.indices], targets[train_splits.indices]\n holdout_inputs, holdout_targets = inputs[holdout_splits.indices], targets[holdout_splits.indices]\n\n self.scaler.fit(train_inputs)\n train_inputs = self.scaler.transform(train_inputs)\n holdout_inputs = self.scaler.transform(holdout_inputs)\n holdout_losses = [1e10 for i in range(self.model.num_ensemble)]\n\n data_idxes = np.random.randint(train_size, size=[self.model.num_ensemble, train_size])\n def shuffle_rows(arr):\n idxes = np.argsort(np.random.uniform(size=arr.shape), axis=-1)\n return arr[np.arange(arr.shape[0])[:, None], idxes]\n\n epoch = 0\n cnt = 0\n logger.log(\"Training dynamics:\")\n while True:\n epoch += 1\n train_loss = self.learn(train_inputs[data_idxes], train_targets[data_idxes], batch_size, logvar_loss_coef)\n new_holdout_losses = self.validate(holdout_inputs, holdout_targets)\n holdout_loss = (np.sort(new_holdout_losses)[:self.model.num_elites]).mean()\n logger.logkv(\"loss/dynamics_train_loss\", train_loss)\n logger.logkv(\"loss/dynamics_holdout_loss\", holdout_loss)\n logger.set_timestep(epoch)\n logger.dumpkvs(exclude=[\"policy_training_progress\"])\n\n # shuffle data for each base learner\n data_idxes = shuffle_rows(data_idxes)\n\n indexes = []\n for i, new_loss, old_loss in zip(range(len(holdout_losses)), new_holdout_losses, holdout_losses):\n improvement = (old_loss - new_loss) / old_loss\n if improvement > 0.01:\n indexes.append(i)\n holdout_losses[i] = new_loss\n \n if len(indexes) > 0:\n self.model.update_save(indexes)\n cnt = 0\n else:\n cnt += 1\n \n if (cnt >= max_epochs_since_update) or (max_epochs and (epoch >= max_epochs)):\n break\n\n indexes = self.select_elites(holdout_losses)\n self.model.set_elites(indexes)\n self.model.load_save()\n self.save(logger.model_dir)\n self.model.eval()\n logger.log(\"elites:{} , holdout loss: {}\".format(indexes, (np.sort(holdout_losses)[:self.model.num_elites]).mean()))\n \n def learn(\n self,\n inputs: np.ndarray,\n targets: np.ndarray,\n batch_size: int = 256,\n logvar_loss_coef: float = 0.01\n ) -> float:\n self.model.train()\n train_size = inputs.shape[1]\n losses = []\n\n for batch_num in range(int(np.ceil(train_size / batch_size))):\n inputs_batch = inputs[:, batch_num * batch_size:(batch_num + 1) * batch_size]\n targets_batch = targets[:, batch_num * batch_size:(batch_num + 1) * batch_size]\n targets_batch = torch.as_tensor(targets_batch).to(self.model.device)\n \n mean, logvar = self.model(inputs_batch)\n inv_var = torch.exp(-logvar)\n # Average over batch and dim, sum over ensembles.\n mse_loss_inv = (torch.pow(mean - targets_batch, 2) * inv_var).mean(dim=(1, 2)) # MLE for Gaussian\n var_loss = logvar.mean(dim=(1, 2))\n loss = mse_loss_inv.sum() + var_loss.sum()\n loss = loss + self.model.get_decay_loss()\n loss = loss + logvar_loss_coef * self.model.max_logvar.sum() - logvar_loss_coef * self.model.min_logvar.sum()\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n losses.append(loss.item())\n return np.mean(losses)\n \n @ torch.no_grad()\n def validate(self, inputs: np.ndarray, targets: np.ndarray) -> List[float]:\n self.model.eval()\n targets = torch.as_tensor(targets).to(self.model.device)\n mean, _ = self.model(inputs)\n loss = ((mean - targets) ** 2).mean(dim=(1, 2))\n val_loss = list(loss.cpu().numpy())\n return val_loss\n \n def select_elites(self, metrics: List) -> List[int]:\n pairs = [(metric, index) for metric, index in zip(metrics, range(len(metrics)))]\n pairs = sorted(pairs, key=lambda x: x[0])\n elites = [pairs[i][1] for i in range(self.model.num_elites)]\n return elites\n\n def save(self, save_path: str) -> None:\n torch.save(self.model.state_dict(), os.path.join(save_path, \"dynamics.pth\"))\n self.scaler.save_scaler(save_path)\n \n def load(self, load_path: str) -> None:\n self.model.load_state_dict(torch.load(os.path.join(load_path, \"dynamics.pth\"), map_location=self.model.device))\n self.scaler.load_scaler(load_path)" }, { "identifier": "StandardScaler", "path": "offlinerlkit/utils/scaler.py", "snippet": "class StandardScaler(object):\n def __init__(self, mu=None, std=None):\n self.mu = mu\n self.std = std\n\n def fit(self, data):\n \"\"\"Runs two ops, one for assigning the mean of the data to the internal mean, and\n another for assigning the standard deviation of the data to the internal standard deviation.\n This function must be called within a 'with <session>.as_default()' block.\n\n Arguments:\n data (np.ndarray): A numpy array containing the input\n\n Returns: None.\n \"\"\"\n self.mu = np.mean(data, axis=0, keepdims=True)\n self.std = np.std(data, axis=0, keepdims=True)\n self.std[self.std < 1e-12] = 1.0\n\n def transform(self, data):\n \"\"\"Transforms the input matrix data using the parameters of this scaler.\n\n Arguments:\n data (np.array): A numpy array containing the points to be transformed.\n\n Returns: (np.array) The transformed dataset.\n \"\"\"\n return (data - self.mu) / self.std\n\n def inverse_transform(self, data):\n \"\"\"Undoes the transformation performed by this scaler.\n\n Arguments:\n data (np.array): A numpy array containing the points to be transformed.\n\n Returns: (np.array) The transformed dataset.\n \"\"\"\n return self.std * data + self.mu\n \n def save_scaler(self, save_path):\n mu_path = path.join(save_path, \"mu.npy\")\n std_path = path.join(save_path, \"std.npy\")\n np.save(mu_path, self.mu)\n np.save(std_path, self.std)\n \n def load_scaler(self, load_path):\n mu_path = path.join(load_path, \"mu.npy\")\n std_path = path.join(load_path, \"std.npy\")\n self.mu = np.load(mu_path)\n self.std = np.load(std_path)\n\n def transform_tensor(self, data: torch.Tensor):\n device = data.device\n data = self.transform(data.cpu().numpy())\n data = torch.tensor(data, device=device)\n return data" }, { "identifier": "termination_fn_default", "path": "offlinerlkit/utils/termination_fns.py", "snippet": "def termination_fn_default(obs, act, next_obs):\n '''\n Return np.ndarray (obs.shape[0], 1)\n '''\n done = np.array([False] * obs.shape[0])\n done = done[:, None]\n return done" }, { "identifier": "ReplayBuffer", "path": "offlinerlkit/buffer/buffer.py", "snippet": "class ReplayBuffer:\n def __init__(\n self,\n buffer_size: int,\n obs_shape: Tuple,\n obs_dtype: np.dtype,\n action_dim: int,\n action_dtype: np.dtype,\n device: str = \"cpu\"\n ) -> None:\n self._max_size = buffer_size\n self.obs_shape = obs_shape\n self.obs_dtype = obs_dtype\n self.action_dim = action_dim\n self.action_dtype = action_dtype\n\n self._ptr = 0\n self._size = 0\n\n self.observations = np.zeros((self._max_size,) + self.obs_shape, dtype=obs_dtype)\n self.next_observations = np.zeros((self._max_size,) + self.obs_shape, dtype=obs_dtype)\n self.actions = np.zeros((self._max_size, self.action_dim), dtype=action_dtype)\n self.rewards = np.zeros((self._max_size, 1), dtype=np.float32)\n self.terminals = np.zeros((self._max_size, 1), dtype=np.float32)\n\n self.device = torch.device(device)\n\n def add(\n self,\n obs: np.ndarray,\n next_obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n terminal: np.ndarray\n ) -> None:\n # Copy to avoid modification by reference\n self.observations[self._ptr] = np.array(obs).copy()\n self.next_observations[self._ptr] = np.array(next_obs).copy()\n self.actions[self._ptr] = np.array(action).copy()\n self.rewards[self._ptr] = np.array(reward).copy()\n self.terminals[self._ptr] = np.array(terminal).copy()\n\n self._ptr = (self._ptr + 1) % self._max_size\n self._size = min(self._size + 1, self._max_size)\n \n def add_batch(\n self,\n obss: np.ndarray,\n next_obss: np.ndarray,\n actions: np.ndarray,\n rewards: np.ndarray,\n terminals: np.ndarray\n ) -> None:\n batch_size = len(obss)\n indexes = np.arange(self._ptr, self._ptr + batch_size) % self._max_size\n\n self.observations[indexes] = np.array(obss).copy()\n self.next_observations[indexes] = np.array(next_obss).copy()\n self.actions[indexes] = np.array(actions).copy()\n self.rewards[indexes] = np.array(rewards).copy()\n self.terminals[indexes] = np.array(terminals).copy()\n\n self._ptr = (self._ptr + batch_size) % self._max_size\n self._size = min(self._size + batch_size, self._max_size)\n \n def load_dataset(self, dataset: Dict[str, np.ndarray]) -> None:\n observations = np.array(dataset[\"observations\"], dtype=self.obs_dtype)\n next_observations = np.array(dataset[\"next_observations\"], dtype=self.obs_dtype)\n actions = np.array(dataset[\"actions\"], dtype=self.action_dtype)\n rewards = np.array(dataset[\"rewards\"], dtype=np.float32).reshape(-1, 1)\n terminals = np.array(dataset[\"terminals\"], dtype=np.float32).reshape(-1, 1)\n\n self.observations = observations\n self.next_observations = next_observations\n self.actions = actions\n self.rewards = rewards\n self.terminals = terminals\n\n self._ptr = len(observations)\n self._size = len(observations)\n \n def normalize_obs(self, eps: float = 1e-3) -> Tuple[np.ndarray, np.ndarray]:\n mean = self.observations.mean(0, keepdims=True)\n std = self.observations.std(0, keepdims=True) + eps\n self.observations = (self.observations - mean) / std\n self.next_observations = (self.next_observations - mean) / std\n obs_mean, obs_std = mean, std\n return obs_mean, obs_std\n\n def sample(self, batch_size: int) -> Dict[str, torch.Tensor]:\n\n batch_indexes = np.random.randint(0, self._size, size=batch_size)\n \n return {\n \"observations\": torch.tensor(self.observations[batch_indexes]).to(self.device),\n \"actions\": torch.tensor(self.actions[batch_indexes]).to(self.device),\n \"next_observations\": torch.tensor(self.next_observations[batch_indexes]).to(self.device),\n \"terminals\": torch.tensor(self.terminals[batch_indexes]).to(self.device),\n \"rewards\": torch.tensor(self.rewards[batch_indexes]).to(self.device)\n }\n \n def sample_all(self) -> Dict[str, np.ndarray]:\n return {\n \"observations\": self.observations[:self._size].copy(),\n \"actions\": self.actions[:self._size].copy(),\n \"next_observations\": self.next_observations[:self._size].copy(),\n \"terminals\": self.terminals[:self._size].copy(),\n \"rewards\": self.rewards[:self._size].copy()\n }" }, { "identifier": "Logger", "path": "offlinerlkit/utils/logger.py", "snippet": "class Logger(object):\n def __init__(self, dir: str, ouput_config: Dict) -> None:\n self._dir = dir\n self._init_dirs()\n self._init_ouput_handlers(ouput_config)\n self._name2val = defaultdict(float)\n self._name2cnt = defaultdict(int)\n self._level = INFO\n self._timestep = 0\n \n def _init_dirs(self) -> None:\n self._record_dir = os.path.join(self._dir, \"record\")\n self._checkpoint_dir = os.path.join(self._dir, \"checkpoint\")\n self._model_dir = os.path.join(self._dir, \"model\")\n self._result_dir = os.path.join(self._dir, \"result\")\n os.mkdir(self._record_dir)\n os.mkdir(self._checkpoint_dir)\n os.mkdir(self._model_dir)\n os.mkdir(self._result_dir)\n \n def _init_ouput_handlers(self, output_config: Dict) -> None:\n self._output_handlers = []\n for file_name, fmt in output_config.items():\n try:\n self._output_handlers.append(HANDLER[fmt](os.path.join(self._record_dir, file_name)))\n except KeyError:\n warnings.warn(\"Invalid output type, Valid types: stdout, csv, tensorboard\", DeprecationWarning)\n # default output to console\n self._output_handlers.append(StandardOutputHandler(sys.stdout))\n \n def log_hyperparameters(self, hyper_param: Dict) -> None:\n json_output_handler = JSONOutputHandler(os.path.join(self._record_dir, \"hyper_param\"))\n json_output_handler.writekvs(hyper_param)\n json_output_handler.close()\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.add_hyper_params_to_tb(hyper_param)\n\n def logkv(self, key: Any, val: Any) -> None:\n \"\"\"\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n \"\"\"\n self._name2val[key] = val\n\n def logkv_mean(self, key: Any, val: Number) -> None:\n \"\"\"\n The same as logkv(), but if called many times, values averaged.\n \"\"\"\n oldval, cnt = self._name2val[key], self._name2cnt[key]\n self._name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)\n self._name2cnt[key] = cnt + 1\n\n def dumpkvs(self, exclude:Optional[Union[str, Tuple[str, ...]]]=None) -> None:\n # log timestep\n self.logkv(DEFAULT_X_NAME, self._timestep)\n for handler in self._output_handlers:\n if isinstance(handler, KVWriter):\n if exclude is not None and handler.handler_name in exclude:\n continue\n handler.writekvs(self._name2val)\n self._name2val.clear()\n self._name2cnt.clear()\n\n def log(self, s: str, level=INFO) -> None:\n for handler in self._output_handlers:\n if isinstance(handler, StandardOutputHandler):\n handler.writestr(s)\n \n def set_timestep(self, timestep: int) -> None:\n self._timestep = timestep\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.set_step(timestep)\n\n def set_level(self, level) -> None:\n self._level = level\n\n @property\n def record_dir(self) -> str:\n return self._record_dir\n \n @property\n def checkpoint_dir(self) -> str:\n return self._checkpoint_dir\n\n @property\n def model_dir(self) -> str:\n return self._model_dir\n \n @property\n def result_dir(self) -> str:\n return self._result_dir\n \n def close(self) -> None:\n for handler in self._output_handlers:\n handler.close()" }, { "identifier": "make_log_dirs", "path": "offlinerlkit/utils/logger.py", "snippet": "def make_log_dirs(\n task_name: str,\n algo_name: str,\n exp_name: str,\n args: Dict,\n part: Optional[str] = None,\n record_params: Optional[List]=None\n) -> str:\n if record_params is not None:\n for param_name in record_params:\n algo_name += f\"&{param_name}={args[param_name]}\"\n\n if part is not None:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name, part)\n else:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name)\n os.makedirs(log_dirs)\n return log_dirs" }, { "identifier": "MBPolicyTrainer", "path": "offlinerlkit/policy_trainer/mb_policy_trainer.py", "snippet": "class MBPolicyTrainer:\n def __init__(\n self,\n policy: BasePolicy,\n eval_env: Union[gym.Env, gymnasium.Env],\n real_buffer: ReplayBuffer,\n fake_buffer: ReplayBuffer,\n logger: Logger,\n rollout_setting: Tuple[int, int, int],\n epoch: int = 1000,\n step_per_epoch: int = 1000,\n batch_size: int = 256,\n real_ratio: float = 0.05,\n eval_episodes: int = 10,\n lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n dynamics_update_freq: int = 0,\n horizon: Optional[int] = None,\n has_terminal = False,\n binary_ret = False\n ) -> None:\n self.policy = policy\n self.eval_env = eval_env\n self.horizon = horizon\n self.real_buffer = real_buffer\n self.fake_buffer = fake_buffer\n self.logger = logger\n\n self._rollout_freq, self._rollout_batch_size, \\\n self._rollout_length = rollout_setting\n self._dynamics_update_freq = dynamics_update_freq\n\n self._epoch = epoch\n self._step_per_epoch = step_per_epoch\n self._batch_size = batch_size\n self._real_ratio = real_ratio\n self._eval_episodes = eval_episodes\n self.lr_scheduler = lr_scheduler\n\n self.is_gymnasium_env = hasattr(self.eval_env, \"get_true_observation\")\n assert (not self.is_gymnasium_env) or (self.horizon is not None), \"Horizon must be specified for Gymnasium env\"\n self.has_terminal = has_terminal\n self.binary_ret = binary_ret\n\n def train(self, last_eval = False) -> Dict[str, float]:\n start_time = time.time()\n\n num_timesteps = 0\n last_10_performance = deque(maxlen=10)\n # train loop\n for e in range(1, self._epoch + 1):\n\n self.policy.train()\n\n pbar = tqdm(range(self._step_per_epoch), desc=f\"Epoch #{e}/{self._epoch}\")\n for it in pbar:\n if num_timesteps % self._rollout_freq == 0: # rollout periodically\n init_obss = self.real_buffer.sample(self._rollout_batch_size)[\"observations\"].cpu().numpy()\n rollout_transitions, rollout_info = self.policy.rollout(init_obss, self._rollout_length)\n self.fake_buffer.add_batch(**rollout_transitions)\n self.logger.log(\n \"num rollout transitions: {}, reward mean: {:.4f}\".\\\n format(rollout_info[\"num_transitions\"], rollout_info[\"reward_mean\"])\n )\n for _key, _value in rollout_info.items():\n self.logger.logkv_mean(\"rollout_info/\"+_key, _value)\n\n # Sample from both real (offline data) and fake (rollout data) according to real_ratio\n real_sample_size = int(self._batch_size * self._real_ratio)\n fake_sample_size = self._batch_size - real_sample_size\n real_batch = self.real_buffer.sample(batch_size=real_sample_size)\n fake_batch = self.fake_buffer.sample(batch_size=fake_sample_size)\n batch = {\"real\": real_batch, \"fake\": fake_batch}\n loss = self.policy.learn(batch)\n pbar.set_postfix(**loss)\n\n for k, v in loss.items():\n self.logger.logkv_mean(k, v)\n \n # update the dynamics if necessary\n if 0 < self._dynamics_update_freq and (num_timesteps+1)%self._dynamics_update_freq == 0:\n dynamics_update_info = self.policy.update_dynamics(self.real_buffer)\n for k, v in dynamics_update_info.items():\n self.logger.logkv_mean(k, v)\n \n num_timesteps += 1\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n \n if last_eval and e < self._epoch: # When last_eval is True, only evaluate on last epoch\n pass\n else:\n # evaluate current policy\n eval_info = self._evaluate()\n ep_reward_mean, ep_reward_std = np.mean(eval_info[\"eval/episode_reward\"]), np.std(eval_info[\"eval/episode_reward\"])\n ep_length_mean, ep_length_std = np.mean(eval_info[\"eval/episode_length\"]), np.std(eval_info[\"eval/episode_length\"])\n\n if not hasattr(self.eval_env, \"get_normalized_score\"): # gymnasium_env does not have normalized score\n last_10_performance.append(ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward\", ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward_std\", ep_reward_std) \n else: \n norm_ep_rew_mean = self.eval_env.get_normalized_score(ep_reward_mean) * 100\n norm_ep_rew_std = self.eval_env.get_normalized_score(ep_reward_std) * 100\n last_10_performance.append(norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward\", norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward_std\", norm_ep_rew_std)\n self.logger.logkv(\"eval/episode_length\", ep_length_mean)\n self.logger.logkv(\"eval/episode_length_std\", ep_length_std)\n self.logger.set_timestep(num_timesteps)\n self.logger.dumpkvs(exclude=[\"dynamics_training_progress\"])\n \n # save checkpoint\n torch.save(self.policy.state_dict(), os.path.join(self.logger.checkpoint_dir, \"policy.pth\"))\n\n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n torch.save(self.policy.state_dict(), os.path.join(self.logger.model_dir, \"policy.pth\"))\n self.policy.dynamics.save(self.logger.model_dir)\n self.logger.close()\n \n return {\"last_10_performance\": np.mean(last_10_performance)}\n\n def _evaluate(self) -> Dict[str, List[float]]:\n is_gymnasium_env = self.is_gymnasium_env\n \n self.policy.eval()\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n \n\n eval_ep_info_buffer = []\n num_episodes = 0\n episode_reward, episode_length = 0, 0\n\n if not self.has_terminal: # Finite horizon, terminal is unimportant\n while num_episodes < self._eval_episodes:\n for timestep in range(self.horizon): # One epoch\n # print(f\"Timestep {timestep}, obs {obs}\")\n action = self.policy.select_action(obs.reshape(1, -1), deterministic=True)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, _ = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n episode_length += 1\n\n obs = next_obs\n\n if self.binary_ret:\n episode_reward = 1 if episode_reward >= 1 else 0\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n num_episodes +=1\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n else:\n while num_episodes < self._eval_episodes:\n action = self.policy.select_action(obs.reshape(1, -1), deterministic=True)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, _ = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n episode_length += 1\n\n obs = next_obs\n\n if terminal: # Episode finishes\n if self.binary_ret:\n episode_reward = 1 if episode_reward >= 1 else 0\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n num_episodes +=1\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n \n return {\n \"eval/episode_reward\": [ep_info[\"episode_reward\"] for ep_info in eval_ep_info_buffer],\n \"eval/episode_length\": [ep_info[\"episode_length\"] for ep_info in eval_ep_info_buffer]\n }" }, { "identifier": "COMBOPolicy", "path": "offlinerlkit/policy/model_based/combo.py", "snippet": "class COMBOPolicy(CQLPolicy):\n \"\"\"\n Conservative Offline Model-Based Policy Optimization <Ref: https://arxiv.org/abs/2102.08363>\n \"\"\"\n\n def __init__(\n self,\n dynamics: BaseDynamics,\n actor: nn.Module,\n critic1: nn.Module,\n critic2: nn.Module,\n actor_optim: torch.optim.Optimizer,\n critic1_optim: torch.optim.Optimizer,\n critic2_optim: torch.optim.Optimizer,\n action_space: gym.spaces.Space,\n tau: float = 0.005,\n gamma: float = 0.99,\n alpha: Union[float, Tuple[float, torch.Tensor, torch.optim.Optimizer]] = 0.2,\n cql_weight: float = 1.0,\n temperature: float = 1.0,\n max_q_backup: bool = False,\n deterministic_backup: bool = True,\n with_lagrange: bool = True,\n lagrange_threshold: float = 10.0,\n cql_alpha_lr: float = 1e-4,\n num_repeart_actions:int = 10,\n uniform_rollout: bool = False,\n rho_s: str = \"mix\"\n ) -> None:\n super().__init__(\n actor,\n critic1,\n critic2,\n actor_optim,\n critic1_optim,\n critic2_optim,\n action_space,\n tau=tau,\n gamma=gamma,\n alpha=alpha,\n cql_weight=cql_weight,\n temperature=temperature,\n max_q_backup=max_q_backup,\n deterministic_backup=deterministic_backup,\n with_lagrange=with_lagrange,\n lagrange_threshold=lagrange_threshold,\n cql_alpha_lr=cql_alpha_lr,\n num_repeart_actions=num_repeart_actions\n )\n\n self.dynamics = dynamics\n self._uniform_rollout = uniform_rollout\n self._rho_s = rho_s\n\n def rollout(\n self,\n init_obss: np.ndarray,\n rollout_length: int\n ) -> Tuple[Dict[str, np.ndarray], Dict]:\n\n num_transitions = 0\n rewards_arr = np.array([])\n rollout_transitions = defaultdict(list)\n\n # rollout\n observations = init_obss\n for _ in range(rollout_length):\n if self._uniform_rollout:\n actions = np.random.uniform(\n self.action_space.low[0],\n self.action_space.high[0],\n size=(len(observations), self.action_space.shape[0])\n )\n else:\n actions = self.select_action(observations)\n next_observations, rewards, terminals, info = self.dynamics.step(observations, actions)\n rollout_transitions[\"obss\"].append(observations)\n rollout_transitions[\"next_obss\"].append(next_observations)\n rollout_transitions[\"actions\"].append(actions)\n rollout_transitions[\"rewards\"].append(rewards)\n rollout_transitions[\"terminals\"].append(terminals)\n\n num_transitions += len(observations)\n rewards_arr = np.append(rewards_arr, rewards.flatten())\n\n nonterm_mask = (~terminals).flatten()\n if nonterm_mask.sum() == 0:\n break\n\n observations = next_observations[nonterm_mask]\n \n for k, v in rollout_transitions.items():\n rollout_transitions[k] = np.concatenate(v, axis=0)\n\n return rollout_transitions, \\\n {\"num_transitions\": num_transitions, \"reward_mean\": rewards_arr.mean()}\n \n def learn(self, batch: Dict) -> Dict[str, float]:\n real_batch, fake_batch = batch[\"real\"], batch[\"fake\"]\n # Mix data from real (offline) and fake (rollout)\n mix_batch = {k: torch.cat([real_batch[k], fake_batch[k]], 0) for k in real_batch.keys()}\n\n obss, actions, next_obss, rewards, terminals = mix_batch[\"observations\"], mix_batch[\"actions\"], \\\n mix_batch[\"next_observations\"], mix_batch[\"rewards\"], mix_batch[\"terminals\"]\n batch_size = obss.shape[0]\n \n # update actor\n a, log_probs = self.actforward(obss)\n q1a, q2a = self.critic1(obss, a), self.critic2(obss, a)\n actor_loss = (self._alpha * log_probs - torch.min(q1a, q2a)).mean()\n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n\n if self._is_auto_alpha:\n log_probs = log_probs.detach() + self._target_entropy\n alpha_loss = -(self._log_alpha * log_probs).mean()\n self.alpha_optim.zero_grad()\n alpha_loss.backward()\n self.alpha_optim.step()\n self._alpha = self._log_alpha.detach().exp()\n \n # compute td error\n if self._max_q_backup:\n with torch.no_grad():\n tmp_next_obss = next_obss.unsqueeze(1) \\\n .repeat(1, self._num_repeat_actions, 1) \\\n .view(batch_size * self._num_repeat_actions, next_obss.shape[-1])\n tmp_next_actions, _ = self.actforward(tmp_next_obss)\n tmp_next_q1 = self.critic1_old(tmp_next_obss, tmp_next_actions) \\\n .view(batch_size, self._num_repeat_actions, 1) \\\n .max(1)[0].view(-1, 1)\n tmp_next_q2 = self.critic2_old(tmp_next_obss, tmp_next_actions) \\\n .view(batch_size, self._num_repeat_actions, 1) \\\n .max(1)[0].view(-1, 1)\n next_q = torch.min(tmp_next_q1, tmp_next_q2)\n else:\n with torch.no_grad():\n next_actions, next_log_probs = self.actforward(next_obss)\n next_q = torch.min(\n self.critic1_old(next_obss, next_actions),\n self.critic2_old(next_obss, next_actions)\n )\n if not self._deterministic_backup:\n next_q -= self._alpha * next_log_probs\n\n target_q = rewards + self._gamma * (1 - terminals) * next_q\n q1, q2 = self.critic1(obss, actions), self.critic2(obss, actions)\n critic1_loss = ((q1 - target_q).pow(2)).mean()\n critic2_loss = ((q2 - target_q).pow(2)).mean()\n\n # compute conservative loss\n if self._rho_s == \"model\":\n obss, actions, next_obss = fake_batch[\"observations\"], \\\n fake_batch[\"actions\"], fake_batch[\"next_observations\"]\n \n batch_size = len(obss)\n random_actions = torch.FloatTensor(\n batch_size * self._num_repeat_actions, actions.shape[-1]\n ).uniform_(self.action_space.low[0], self.action_space.high[0]).to(self.actor.device)\n # tmp_obss & tmp_next_obss: (batch_size * num_repeat, obs_dim)\n tmp_obss = obss.unsqueeze(1) \\\n .repeat(1, self._num_repeat_actions, 1) \\\n .view(batch_size * self._num_repeat_actions, obss.shape[-1])\n tmp_next_obss = next_obss.unsqueeze(1) \\\n .repeat(1, self._num_repeat_actions, 1) \\\n .view(batch_size * self._num_repeat_actions, obss.shape[-1])\n \n obs_pi_value1, obs_pi_value2 = self.calc_pi_values(tmp_obss, tmp_obss)\n next_obs_pi_value1, next_obs_pi_value2 = self.calc_pi_values(tmp_next_obss, tmp_obss)\n random_value1, random_value2 = self.calc_random_values(tmp_obss, random_actions)\n\n for value in [\n obs_pi_value1, obs_pi_value2, next_obs_pi_value1, next_obs_pi_value2,\n random_value1, random_value2\n ]:\n value.reshape(batch_size, self._num_repeat_actions, 1)\n \n # cat_q shape: (batch_size, 3 * num_repeat, 1)\n cat_q1 = torch.cat([obs_pi_value1, next_obs_pi_value1, random_value1], 1)\n cat_q2 = torch.cat([obs_pi_value2, next_obs_pi_value2, random_value2], 1)\n # Samples from the original dataset\n real_obss, real_actions = real_batch['observations'], real_batch['actions']\n q1, q2 = self.critic1(real_obss, real_actions), self.critic2(real_obss, real_actions)\n\n conservative_loss1 = \\\n torch.logsumexp(cat_q1 / self._temperature, dim=1).mean() * self._cql_weight * self._temperature - \\\n q1.mean() * self._cql_weight\n conservative_loss2 = \\\n torch.logsumexp(cat_q2 / self._temperature, dim=1).mean() * self._cql_weight * self._temperature - \\\n q2.mean() * self._cql_weight\n \n if self._with_lagrange:\n cql_alpha = torch.clamp(self.cql_log_alpha.exp(), 0.0, 1e6)\n conservative_loss1 = cql_alpha * (conservative_loss1 - self._lagrange_threshold)\n conservative_loss2 = cql_alpha * (conservative_loss2 - self._lagrange_threshold)\n\n self.cql_alpha_optim.zero_grad()\n cql_alpha_loss = -(conservative_loss1 + conservative_loss2) * 0.5\n cql_alpha_loss.backward(retain_graph=True)\n self.cql_alpha_optim.step()\n \n critic1_loss = critic1_loss + conservative_loss1\n critic2_loss = critic2_loss + conservative_loss2\n\n # update critic\n self.critic1_optim.zero_grad()\n critic1_loss.backward(retain_graph=True)\n self.critic1_optim.step()\n\n self.critic2_optim.zero_grad()\n critic2_loss.backward()\n self.critic2_optim.step()\n\n self._sync_weight()\n\n result = {\n \"loss/actor\": actor_loss.item(),\n \"loss/critic1\": critic1_loss.item(),\n \"loss/critic2\": critic2_loss.item()\n }\n\n if self._is_auto_alpha:\n result[\"loss/alpha\"] = alpha_loss.item()\n result[\"alpha\"] = self._alpha.item()\n if self._with_lagrange:\n result[\"loss/cql_alpha\"] = cql_alpha_loss.item()\n result[\"cql_alpha\"] = cql_alpha.item()\n \n return result" }, { "identifier": "none_or_str", "path": "offlinerlkit/utils/none_or_str.py", "snippet": "def none_or_str(value):\n if value == 'None':\n return None\n return value" }, { "identifier": "create_env_dataset", "path": "envs/pointmaze/create_maze_dataset.py", "snippet": "def create_env_dataset(args):\n '''\n Create env and dataset (if not created)\n '''\n maze_config = json.load(open(args.maze_config_file, 'r'))\n maze = maze_config[\"maze\"]\n map = maze['map'] \n\n start = maze['start']\n goal = maze['goal']\n\n sample_args = maze_config[\"sample_args\"]\n\n print(f\"Create point maze\")\n point_maze = PointMaze(data_path = os.path.join(args.data_dir, args.data_file), \n horizon = args.horizon,\n maze_map = map,\n start = np.array(start),\n goal = np.array(goal),\n sample_args = sample_args,\n debug=False,\n render=False) \n env = point_maze.env_cls()\n trajs = point_maze.dataset[0]\n return env, trajs" }, { "identifier": "get_pointmaze_dataset", "path": "envs/pointmaze/utils/trajectory.py", "snippet": "def get_pointmaze_dataset(\n trajs: List,\n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray, float]:\n '''\n Return:\n dataset: Dict. key 'rtgs' is set to zero, it will not be used in training\n init_obss\n max offline return\n '''\n num_trajs = int(len(trajs) * sample_ratio)\n idxs = np.random.choice(len(trajs), size=(num_trajs), replace = False)\n valid_trajs = [trajs[i] for i in list(idxs)]\n\n obss = [traj.observations[0:-1] for traj in valid_trajs]\n next_obss = [traj.observations[1:] for traj in valid_trajs]\n acts = [traj.actions[0:-1] for traj in valid_trajs]\n rs = [traj.rewards[0:-1] for traj in valid_trajs]\n init_obss = [traj.observations[0:1] for traj in valid_trajs] # initial observations\n\n obss = np.concatenate(obss, axis=0)\n next_obss = np.concatenate(next_obss, axis=0)\n acts = np.concatenate(acts, axis=0)\n rs = np.concatenate(rs, axis=0)\n terminals = np.array([False]).repeat(obss.shape[0])\n weights = np.ones_like(rs).astype(np.float32)\n init_obss = np.concatenate(init_obss, axis=0)\n\n rets = [sum(traj.rewards) for traj in valid_trajs]\n rtgs = np.zeros_like(rs) \n\n dataset = {\n \"observations\": obss,\n \"next_observations\": next_obss,\n \"actions\": acts,\n \"rewards\": rs,\n \"rtgs\": rtgs,\n \"terminals\": terminals,\n \"weights\": weights}\n\n return dataset, init_obss, max(rets)" }, { "identifier": "PointMazeObsWrapper", "path": "envs/pointmaze/utils/maze_utils.py", "snippet": "class PointMazeObsWrapper(Wrapper):\n def __init__(self, env):\n super().__init__(env)\n self.observation_space = env.observation_space['observation']\n\n def observation(self, obs: Dict[str, np.ndarray]) -> np.ndarray:\n return obs['observation']\n \n def step(self, action):\n '''\n use truncated signal as terminal\n '''\n next_obs, reward, _, truncated, info = self.env.step(action)\n next_obs = self.observation(next_obs)\n return next_obs, reward, truncated, info\n\n def reset(self, seed=None):\n obs, _ = self.env.reset(seed=seed)\n return self.observation(obs)" } ]
import argparse import random import datetime import numpy as np import torch from offlinerlkit.nets import MLP from offlinerlkit.modules import ActorProb, Critic, TanhDiagGaussian, EnsembleDynamicsModel from offlinerlkit.dynamics import EnsembleDynamics from offlinerlkit.utils.scaler import StandardScaler from offlinerlkit.utils.termination_fns import termination_fn_default from offlinerlkit.buffer import ReplayBuffer from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import MBPolicyTrainer from offlinerlkit.policy import COMBOPolicy from offlinerlkit.utils.none_or_str import none_or_str from envs.pointmaze.create_maze_dataset import create_env_dataset from envs.pointmaze.utils.trajectory import get_pointmaze_dataset from envs.pointmaze.utils.maze_utils import PointMazeObsWrapper
14,130
def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--algo_name", type=str, default="combo") parser.add_argument("--task", type=str, default="pointmaze") # Self-constructed environment parser.add_argument("--last_eval", action="store_true") # env config (general) parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=200, help="max path length for pickplace") # env config (pointmaze) parser.add_argument('--maze_config_file', type=str, default='envs/pointmaze/config/maze_default.json') parser.add_argument('--data_file', type=str, default='pointmaze.dat') parser.add_argument("--seed", type=int, default=0) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=1.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--uniform-rollout", type=bool, default=False) parser.add_argument("--rho-s", type=str, default="mix", choices=["model", "mix"]) parser.add_argument("--dynamics-lr", type=float, default=1e-3) parser.add_argument("--dynamics-hidden-dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics-weight-decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n-ensemble", type=int, default=7) parser.add_argument("--n-elites", type=int, default=5) parser.add_argument("--rollout-freq", type=int, default=1000) parser.add_argument("--rollout-batch-size", type=int, default=50000) parser.add_argument("--rollout-length", type=int, default=5) parser.add_argument("--model-retain-epochs", type=int, default=5) parser.add_argument("--real-ratio", type=float, default=0.5) parser.add_argument("--load-dynamics-path", type=none_or_str, default=None) parser.add_argument("--epoch", type=int, default=100) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=10) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pointmaze': env, trajs = create_env_dataset(args) env = PointMazeObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) dataset, _, _ = get_pointmaze_dataset(trajs) else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims)
def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--algo_name", type=str, default="combo") parser.add_argument("--task", type=str, default="pointmaze") # Self-constructed environment parser.add_argument("--last_eval", action="store_true") # env config (general) parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=200, help="max path length for pickplace") # env config (pointmaze) parser.add_argument('--maze_config_file', type=str, default='envs/pointmaze/config/maze_default.json') parser.add_argument('--data_file', type=str, default='pointmaze.dat') parser.add_argument("--seed", type=int, default=0) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=1.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--uniform-rollout", type=bool, default=False) parser.add_argument("--rho-s", type=str, default="mix", choices=["model", "mix"]) parser.add_argument("--dynamics-lr", type=float, default=1e-3) parser.add_argument("--dynamics-hidden-dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics-weight-decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n-ensemble", type=int, default=7) parser.add_argument("--n-elites", type=int, default=5) parser.add_argument("--rollout-freq", type=int, default=1000) parser.add_argument("--rollout-batch-size", type=int, default=50000) parser.add_argument("--rollout-length", type=int, default=5) parser.add_argument("--model-retain-epochs", type=int, default=5) parser.add_argument("--real-ratio", type=float, default=0.5) parser.add_argument("--load-dynamics-path", type=none_or_str, default=None) parser.add_argument("--epoch", type=int, default=100) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=10) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pointmaze': env, trajs = create_env_dataset(args) env = PointMazeObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) dataset, _, _ = get_pointmaze_dataset(trajs) else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims)
dist = TanhDiagGaussian(
3
2023-10-11 08:36:06+00:00
16k
wilhelmagren/finq
finq/portfolio.py
[ { "identifier": "Asset", "path": "finq/asset.py", "snippet": "class Asset(object):\n \"\"\" \"\"\"\n\n def __init__(\n self,\n data: pd.Series,\n name: str,\n *,\n market: Optional[str] = None,\n index_name: Optional[str] = None,\n price_type: str = \"Close\",\n pre_compute: bool = True,\n ):\n \"\"\" \"\"\"\n\n self._data = data\n self._name = name\n self._market = market\n self._index_name = index_name\n self._price_type = price_type\n self._pre_compute = pre_compute\n self._metrics = {}\n\n if pre_compute:\n log.info(\"pre-computing some common metrics...\")\n self.compute_common_metrics()\n log.info(\"OK!\")\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"\n Compare self with the other object. If ``other`` is of instance class\n ``Asset`` then compare their hashes. Otherwise ``False``.\n\n Parameters\n ----------\n other : Any\n The other object to compare equality against.\n\n Returns\n -------\n bool\n Whether or not they objects are equal.\n\n \"\"\"\n if isinstance(other, self.__class__):\n return hash(self) == hash(other)\n return False\n\n def __hash__(self) -> int:\n \"\"\"\n Compute a hash from the following attributes of the ``Asset`` object:\n (`_name`, `_market_`, `_index_name`, `_price_type`).\n\n NOTE: the ``Asset`` object is mutable, thus, the hash functionality\n can have unknown side effects... Use responsibly.\n\n Returns\n -------\n int\n The computed hash value.\n\n \"\"\"\n return hash(\n (\n len(self._data),\n self._data.mean(),\n self._data.std(),\n self._name,\n self._market,\n self._index_name,\n self._price_type,\n )\n )\n\n def __str__(self) -> str:\n \"\"\" \"\"\"\n\n format = f\"<{self.__class__.__name__} called `{self._name}`\"\n if self._market:\n format += f\" on {self._market}\"\n if self._index_name:\n format += f\" in {self._index_name}\"\n\n format += f\" (price type: {self._price_type})\"\n format += f\"\\n-- num samples:\\t\\t\\t{self._data.shape[0]}\"\n\n drm = self._metrics.get(\"daily_returns_mean\", None)\n if drm:\n format += f\"\\n-- daily returns mean:\\t\\t{drm:.5f}\"\n\n yrm = self._metrics.get(\"yearly_returns_mean\", None)\n if yrm:\n format += f\"\\n-- yearly returns mean:\\t\\t{yrm:.5f}\"\n\n yv = self._metrics.get(\"yearly_volatility\", None)\n if yv:\n format += f\"\\n-- yearly volatility:\\t\\t{yv:.5f}\"\n\n skew = self._metrics.get(\"skewness\", None)\n if skew:\n format += f\"\\n-- unbiased skewness:\\t\\t{self._metrics['skewness']:.5f}\"\n\n format += f\"\\nobject located at {hex(id(self))}>\"\n\n return format\n\n def compute_common_metrics(self):\n \"\"\" \"\"\"\n self._metrics[\"daily_returns\"] = self.period_returns(period=1)\n self._metrics[\"daily_returns_mean\"] = self.period_returns_mean(period=1)\n self._metrics[\"yearly_returns_mean\"] = self.period_returns_mean(period=252)\n self._metrics[\"yearly_volatility\"] = self.volatility(period=1, trading_days=252)\n self._metrics[\"skewness\"] = self.skewness()\n\n def period_returns(self, period: int = 1) -> pd.Series:\n \"\"\" \"\"\"\n return self._data.pct_change(periods=period)\n\n def period_returns_mean(self, period: int = 1) -> np.typing.DTypeLike:\n \"\"\" \"\"\"\n return self.period_returns(period=period).mean(axis=0)\n\n def volatility(\n self, period: int = 1, trading_days: int = 252\n ) -> np.typing.DTypeLike:\n \"\"\" \"\"\"\n return self.period_returns(period=period).std() * np.sqrt(trading_days)\n\n def skewness(self) -> np.float32:\n \"\"\"\n Computes the skewness of the saved data. Uses the ``Adjusted Fisher-Pearson\n standardized moment coefficient`` formula without bias [1, 2]. Skewness is a\n measure of the asymmetry of the probability distribution for a real-valued\n random variable around its mean.\n\n Returns\n -------\n np.float32\n The skewness measure for the saved historical price data.\n\n References\n ----------\n [1] Skewness calculation on scipy.\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skew.html\n [2] Moment calculation on scipy.\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.moment.html\n\n \"\"\"\n return self._data.skew().astype(np.float32)\n\n @property\n def data(self) -> pd.Series:\n \"\"\"\n Return the saved data by accessing it as a property of the ``Asset`` object.\n\n Returns\n -------\n pd.Series\n A ``pd.Series`` copy of the saved data.\n\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, data: pd.Series):\n \"\"\"\n Set the value of the data attribute for the ``Asset`` object.\n\n Parameters\n ----------\n data : pd.Series\n The new ``pd.Series`` to set as data attribute for the object.\n\n \"\"\"\n self._data = data\n\n @property\n def name(self) -> str:\n \"\"\"\n Get the name property of the ``Asset`` object.\n\n Returns\n -------\n str\n The name of the ``Asset``.\n\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name: str):\n \"\"\"\n Set the value of the name property for the ``Asset`` object.\n\n Parameters\n ----------\n name : str\n The new ``str`` to set as name attribute for the object.\n\n \"\"\"\n self._name = name\n\n def as_numpy(self, dtype: np.typing.DTypeLike = np.float32) -> np.ndarray:\n \"\"\"\n Return the saved data as an numpy array. It will have the shape (n_samples, ).\n\n Parameters\n ----------\n dtype : np.typing.DTypeLike\n The data type to create the new ``np.ndarray`` as.\n Defaults to ``np.float32``.\n\n Returns\n -------\n np.ndarray\n A new ``np.ndarray`` from the ``pd.Series`` data.\n\n \"\"\"\n return self._data.to_numpy().astype(dtype)" }, { "identifier": "Dataset", "path": "finq/datasets/dataset.py", "snippet": "class Dataset(object):\n \"\"\"\n A collection of ticker symbols and their historical price data. Fetches information\n and prices from Yahoo! Finance and optionally saves them to a local path for later\n use. Supports fixing missing values by interpolating ``NaN`` and verifying the\n integrity of the fetched data.\n\n Parameters\n ----------\n names : list | None\n The names of the financial assets to create a dataset with.\n symbols : list | None\n The ticker symbols corresponding to the names of the financial assets.\n market : str\n The name of the market to fetch the historical price data from.\n Defaults to ``OMX``.\n index_name : str | None\n The name of the financial index to get ticker symbols and names from.\n proxy : str | None\n The name of the proxy url to use for REST requests.\n cache_name: Path | str\n The name of the path to the file which stores the cache.\n Defaults to ``/home/.finq/http_cache``.\n n_requests : int\n The max number of requests to perform per ``t_interval``. Defaults to ``5``.\n t_interval : int\n The time interval (in seconds) to use with the ``CachedRateLimiter``.\n Defaults to ``1``.\n save : bool\n Wether or not to save the fetched data to a local file path.\n save_path : Path | str\n The local file path to potentially save any fetched data to.\n Defaults to ``.data/dataset/``.\n dataset_name : str\n The name of the ``Dataset`` class instance.\n separator : str\n The csv separator to use when loading and saving any ``pd.DataFrame``.\n Defaults to ``;``.\n\n \"\"\"\n\n def __init__(\n self,\n names: Optional[List[str]] = None,\n symbols: Optional[List[str]] = None,\n *,\n market: str = \"OMX\",\n index_name: Optional[str] = None,\n proxy: Optional[str] = None,\n cache_name: Union[Path, str] = default_finq_cache_path(),\n n_requests: int = 5,\n t_interval: int = 1,\n save: bool = False,\n save_path: Union[Path, str] = default_finq_save_path(),\n dataset_name: str = \"dataset\",\n separator: str = \";\",\n filter_symbols: Callable = lambda s: s,\n ) -> Optional[InvalidCombinationOfArgumentsError]:\n \"\"\" \"\"\"\n\n log.info(\n \"creating cached rate-limited session with \"\n f\"{n_requests} requests per {t_interval} seconds\"\n )\n\n # We combine a cache with rate-limiting to avoid triggering\n # Yahoo! Finance's rate-limiter that can otherwise corrupt data.\n # We specify a maximum number of requests N per X seconds.\n session = CachedRateLimiter(\n cache_name=cache_name,\n limiter=Limiter(\n RequestRate(\n n_requests,\n Duration.SECOND * t_interval,\n ),\n ),\n )\n\n if proxy:\n session.proxies.update(\n {\n \"https\": proxy,\n }\n )\n\n self._proxy = proxy\n self._session = session\n self._n_requests = n_requests\n self._t_interval = t_interval\n\n if (not names or not symbols) and isinstance(index_name, str):\n if market == \"OMX\":\n\n def filter_symbols(s):\n return s.replace(\" \", \"-\") + \".ST\"\n\n names, symbols = fetch_names_and_symbols(\n index_name,\n market=market,\n session=session,\n filter_symbols=filter_symbols,\n )\n\n if not names or not symbols:\n raise InvalidCombinationOfArgumentsError(\n \"You did not pass in a list of names and symbols, and if you \"\n \"passed in an index name to fetch, the request failed since \"\n f\"`{names=}` and `{symbols=}`. Did you pass in a valid index name?\"\n )\n\n if not (len(names) == len(symbols)):\n raise InvalidCombinationOfArgumentsError(\n \"Number of names does not match the number of ticker symbols, \"\n f\"{len(names)} != {len(symbols)}.\\n{names=}\\n{symbols=}\"\n )\n\n self._data = None\n self._info = None\n\n self._names = names\n self._symbols = symbols\n self._market = market\n self._index_name = index_name\n\n self._save = save\n self._save_path = Path(save_path) / dataset_name\n self._dataset_name = dataset_name\n self._separator = separator\n\n def __getitem__(self, key: str) -> Optional[pd.DataFrame]:\n \"\"\"\n Get the ``pd.DataFrame`` from the locally stored dictionary which maps ticker\n symbols to their corresponding historical price data.\n\n Parameters\n ----------\n key : str\n The dictionary key to get data for.\n\n Returns\n -------\n pd.DataFrame\n The data that is associated with the provided ticker key.\n\n \"\"\"\n return self._data.get(key, None)\n\n def __len__(self) -> int:\n \"\"\"\n Get the number of names in the dataset.\n\n Returns\n -------\n int\n The number of names.\n\n \"\"\"\n return len(self._symbols)\n\n @staticmethod\n def _save_data(data: pd.DataFrame, path: Union[Path, str], separator: str):\n \"\"\"\n Save the historical price data for a ticker to a local csv file.\n\n Parameters\n ----------\n data : pd.DataFrame\n The ``pd.DataFrame`` to save as a csv file.\n path : Path | str\n The local file name to save the csv to.\n separator : str\n The csv separator to use when saving the data. Defaults to ``;``.\n\n \"\"\"\n data.to_csv(\n path,\n sep=separator,\n header=True,\n )\n\n @staticmethod\n def _save_info(info: dict, path: Union[Path, str]):\n \"\"\"\n Save the ticker information dictionary to a local file as a ``json`` object.\n\n Parameters\n ----------\n info : dict\n The ticker information dictionary to save as a ``json`` file.\n path : Path | str\n The local file name to save the dictionary to.\n\n \"\"\"\n with open(path, \"w\") as f:\n json.dump(info, f)\n\n @staticmethod\n def _load_data(path: Union[Path, str], separator: str) -> pd.DataFrame:\n \"\"\"\n Create a new ``pd.DataFrame`` from data that is stored locally as a ``csv``.\n\n Parameters\n ----------\n path : Path | str\n The local file path to read the csv from.\n separator : str\n The separator to use for parsing the csv.\n\n Returns\n -------\n pd.DataFrame\n The data that was stored in the csv.\n\n \"\"\"\n return pd.read_csv(path, sep=separator, index_col=\"Date\")\n\n @staticmethod\n def _load_info(path: Union[Path, str]) -> dict:\n \"\"\"\n Parameters\n ----------\n path : Path | str\n The local file path to read the json object from.\n\n Returns\n -------\n dict\n A dictionary containing the information for the ticker.\n\n \"\"\"\n with open(path, \"r\") as f:\n return json.load(f)\n\n @staticmethod\n def _extract_dates_from_data(data: pd.DataFrame) -> Tuple[List, Dict]:\n \"\"\"\n Extract the ``Date`` column from a ``pd.DataFrame`` and produce a sorted list of\n unique dates for the ticker.\n\n Parameters\n ----------\n data : pd.DataFrame\n The data to extract ``Date`` column from.\n\n Returns\n -------\n tuple\n A list of the unique dates (sorted in ascending order) and a dictionary\n containing all ticker dates as key: ``str`` and value: ``list``.\n\n \"\"\"\n dates = {}\n all_dates = []\n\n for ticker, df in data.items():\n dates[ticker] = df.index.to_list()\n all_dates.extend(dates[ticker])\n\n unique_dates = sorted(list(set(all_dates)), reverse=False)\n\n return (unique_dates, dates)\n\n def _save_tickers_data(self):\n \"\"\" \"\"\"\n\n log.info(f\"saving fetched tickers data to {self._save_path}...\")\n\n for ticker in self._symbols:\n self._save_data(\n self._data[ticker],\n self._save_path / \"data\" / f\"{ticker}.csv\",\n separator=self._separator,\n )\n\n log.info(\"OK!\")\n\n def _save_tickers_info(self):\n \"\"\" \"\"\"\n\n log.info(f\"saving fetched tickers info to {self._save_path}...\")\n\n for ticker in self._symbols:\n self._save_info(\n self._info[ticker],\n self._save_path / \"info\" / f\"{ticker}.json\",\n )\n\n log.info(\"OK!\")\n\n def _save_data_and_info(self):\n \"\"\"\n Saves the info and data objects to a local file path.\n\n \"\"\"\n\n self._save_tickers_data()\n self._save_tickers_info()\n\n def _fetch_tickers_data(\n self,\n period: str,\n cols: List[str],\n ):\n \"\"\" \"\"\"\n\n data = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fetching ticker {ticker} data from Yahoo! Finance\")\n\n yf_ticker = yf.Ticker(ticker, session=self._session)\n data[ticker] = yf_ticker.history(\n period=period,\n proxy=self._proxy,\n )[\n cols\n ].tz_localize(None)\n\n all_dates, dates = self._extract_dates_from_data(data)\n\n self._data = data\n self._dates = dates\n self._all_dates = all_dates\n\n def _fetch_tickers_info(self):\n \"\"\" \"\"\"\n\n info = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fetching ticker {ticker} info from Yahoo! Finance\")\n\n yf_ticker = yf.Ticker(ticker, session=self._session)\n info[ticker] = yf_ticker.get_info(proxy=self._proxy)\n\n self._info = info\n\n def _fetch_tickers_data_and_info(\n self,\n period: str,\n cols: List[str],\n ):\n \"\"\"\n Use the `yfinance` library to fetch historical ticker data for the specified time\n period. The performance of the REST requests is highly dependent on three things:\n the config of your `CachedRateLimiter`, the amount of tickers you want to fetch,\n and the multi-threading support of your CPU.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from.\n cols : list\n The columns of the fetched ticker data to collect.\n\n \"\"\"\n\n self._fetch_tickers_data(period, cols)\n self._fetch_tickers_info()\n\n def load_local_data_files(self) -> Optional[DirectoryNotFoundError]:\n \"\"\" \"\"\"\n\n path = Path(self._save_path)\n data_path = path / \"data\"\n\n if not path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {path} does not exist. Perhaps you haven't yet \"\n \"tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n if not data_path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {data_path} does not exist. Perhaps you haven't \"\n \"yet tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n data = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Loading ticker {ticker} data from local path {path}\")\n\n data[ticker] = self._load_data(\n data_path / f\"{ticker}.csv\",\n separator=self._separator,\n )\n\n if not isinstance(data[ticker].index, pd.DatetimeIndex):\n data[ticker].index = pd.to_datetime(data[ticker].index)\n\n all_dates, dates = self._extract_dates_from_data(data)\n\n self._data = data\n self._dates = dates\n self._all_dates = all_dates\n\n def load_local_info_files(self) -> Optional[DirectoryNotFoundError]:\n \"\"\" \"\"\"\n path = Path(self._save_path)\n info_path = path / \"info\"\n\n if not path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {path} does not exist. Perhaps you haven't yet \"\n \"tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n if not info_path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {info_path} does not exist. Perhaps you haven't \"\n \"yet tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n info = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Loading ticker {ticker} data from local path {path}\")\n\n info[ticker] = self._load_info(\n info_path / f\"{ticker}.json\",\n )\n\n self._info = info\n\n def load_local_files(self):\n \"\"\"\n Load the locally saved info and data files. The info is read from file as a\n ``json`` and the data is read from ``csv`` as a ``pd.DataFrame``.\n\n Raises\n ------\n DirectoryNotFoundError\n When either of the paths to the saved ``info`` and ``data`` is not a directory.\n\n \"\"\"\n\n self.load_local_data_files()\n self.load_local_info_files()\n\n def fetch_data(\n self,\n period: str,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n ) -> Dataset:\n \"\"\"\n Fetch the historical ticker data for the specified time period. If there exists\n locally saved files for all tickers, will try and load them instead of fetching\n from Yahoo! Finance. Saves the fetched files if ``save=True`` was specified in\n the class constructor.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from. Valid values are (``1d``,\n ``5d``, ``1mo``, ``3mo``, ``6mo``, ``1y``, ``2y``, ``5y``, ``10y``,\n ``ytd``, ``max``).\n cols : list\n The columns of the fetched ticker data to collect. Defaults to\n (``Date``, ``Open``, ``High``, ``Low``, ``Close``).\n\n Returns\n -------\n Dataset\n The initialized instance of ``self`` with ticker data loaded or fetched.\n\n \"\"\"\n\n if all_tickers_data_saved(self._save_path, self._symbols):\n log.info(\n f\"found existing local data files for {self.__class__.__name__}, \"\n \"attempting local load of data files...\"\n )\n\n try:\n self.load_local_data_files()\n log.info(\"OK!\")\n return self\n\n except DirectoryNotFoundError:\n log.warning(\"failed to load local data files, attempting new fetch...\")\n\n self._fetch_tickers_data(period, cols)\n\n if self._save:\n setup_finq_save_data_path(self._save_path)\n self._save_tickers_data()\n\n return self\n\n def fetch_info(\n self,\n ) -> Dataset:\n \"\"\" \"\"\"\n\n if all_tickers_info_saved(self._save_path, self._symbols):\n log.info(\n f\"found existing local info files for {self.__class__.__name__}, \"\n \"attempting local load of info files...\"\n )\n\n try:\n self.load_local_info_files()\n log.info(\"OK!\")\n return self\n\n except DirectoryNotFoundError:\n log.warning(\"failed to load local info files, attempting new fetch...\")\n\n self._fetch_tickers_info()\n\n if self._save:\n setup_finq_save_info_path(self._save_path)\n\n return self\n\n def fetch_data_and_info(\n self,\n period: str,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n ) -> Dataset:\n \"\"\" \"\"\"\n self = self.fetch_data(period, cols=cols)\n self = self.fetch_info()\n return self\n\n def fix_missing_data(\n self,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n resave: bool = True,\n ) -> Dataset:\n \"\"\"\n Compares each tickers dates in their corresponding ``pd.DataFrame`` and compares\n to the known set of dates collected. If there are any missing values, will add\n the missing dates to the dataframe and then use ``df.interpolate()`` to fix them.\n Default interpolation strategy is ``linear``.\n\n Parameters\n ----------\n cols : list\n The columns of the ``pd.DataFrame`` to consider when looking for missing data\n to interpolate. Defaults to (``Open``, ``High``, ``Low``, ``Close``).\n resave : bool\n Whether or not to resave the data to local path after fixing missing values.\n Defaults to ``True`` but will onlyesave if there existed missing data.\n\n Returns\n -------\n Dataset\n The initialized instance of ``self``.\n\n \"\"\"\n\n log.info(\"attempting to fix any missing data...\")\n\n n_missing_data = 0\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fixing ticker {ticker} potential missing values\")\n\n df = self._data[ticker]\n diff = set(self._all_dates) - set(self._dates[ticker])\n\n if diff:\n n_missing_data += 1\n\n df_missed = pd.DataFrame(index=list(diff))\n df_missed.index.name = \"Date\"\n\n df_fixed = pd.concat((df, df_missed)).sort_index(inplace=False)\n df_fixed[cols] = df_fixed[cols].interpolate()\n\n if df_fixed[df_fixed.isnull().any(axis=1)].index.values.size:\n log.error(\n f\"failed to interpolate missing prices for ticker {ticker}!\"\n )\n\n self._data[ticker] = df_fixed\n self._dates[ticker] = self._all_dates\n\n if n_missing_data and resave:\n log.info(f\"fixed {n_missing_data} tickers with missing data\")\n if self._save:\n log.info(f\"saving fixed data to {self._save_path}...\")\n self._save_tickers_data()\n\n log.info(\"OK!\")\n return self\n\n def verify_data(self) -> Union[ValueError, Dataset]:\n \"\"\"\n Tries to verify that the stored data does not contain any missing values.\n This is performed by comparing the dates in each ticker ``pd.DataFrame``\n with the known set of all fetched dates.\n\n Returns\n -------\n Dataset\n The initialized instance of ``self``.\n\n Raises\n ------\n ValueError\n If there exists missing values in any stored ``pd.DataFrame``.\n\n \"\"\"\n\n log.info(\"verifying that stored data has no missing values...\")\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Verifying ticker {ticker} data\")\n\n diff = set(self._all_dates) - set(self._dates[ticker])\n if diff:\n raise ValueError(\n f\"There is a difference in dates for symbol {ticker}, have you \"\n \"tried fixing missing values prior to verifying? To do that, run \"\n \"dataset.fix_missing_data() with your initialized Dataset class.\"\n )\n\n log.info(\"OK!\")\n return self\n\n def run(self, period: str = \"1y\") -> Dataset:\n \"\"\"\n Call the three core methods for the ``Dataset`` class which fetches data,\n tries to fix missing values, and lastly verifies that there is no missing data.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from. Valid values are (``1d``,\n ``5d``, ``1mo``, ``3mo``, ``6mo``, ``1y``, ``2y``, ``5y``, ``10y``,\n ``ytd``, ``max``). Defaults to ``1y``.\n\n Returns\n -------\n Dataset\n The intialized instance of ``self``.\n\n \"\"\"\n return self.fetch_data(period).fix_missing_data().verify_data()\n\n def visualize_ticker(\n self,\n ticker: str,\n **kwargs: Dict[str, Any],\n ):\n \"\"\" \"\"\"\n\n if kwargs.get(\"title\", None) is None:\n kwargs[\"title\"] = f\"{ticker} historical OHLC prices [{self._market}]\"\n\n mpf.plot(\n self._data[ticker],\n **kwargs,\n )\n\n def visualize(\n self,\n *,\n title: str = \"Historical stock data\",\n xlabel: str = \"Dates\",\n ylabel: str = \"Closing price [$]\",\n ticks_rotation: int = 70,\n legend_loc: str = \"best\",\n log_scale: bool = False,\n save_path: Optional[str] = None,\n price_type: str = \"Close\",\n show: bool = True,\n block: bool = True,\n ):\n \"\"\"\n Plot the historical ticker price data over time.\n\n Parameters\n ----------\n title : str\n The header title to set on the generated plot.\n xlabel : str\n The label to use for the x-axis.\n ylabel : str\n The label to use for the y-axis.\n ticks_rotation : int\n The amount of degrees to rotate the x-axis ticks with. Defaults to ``70``.\n legend_loc : str\n The location of the legend. Some possible values are (``best``, ``center``,\n ``upper left``, ``upper right``, ``lower left``, ``lower right``).\n Defaults to ``best``.\n log_scale : bool\n ``True`` if the historical data should be log scaled, otherwise ``False``.\n save_path : str | None\n The local file to save the generated plot to. Does not save the plot if\n the argument is ``None``.\n price_type : str\n The price type of the historical data to plot. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n show : bool\n ``True`` if the generated plot should be shown on the screen, otherwise\n ``False``. Defaults to ``True``.\n block : bool\n Whether to wait for all figures to be closed before returning. When ``False``\n the figure windows will be displayed and returned immediately. Defaults to\n ``True``.\n\n \"\"\"\n\n for ticker, data in self._data.items():\n plt.plot(\n np.log(data[price_type]) if log_scale else data[price_type],\n label=ticker,\n )\n\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.xticks(rotation=ticks_rotation)\n plt.legend(loc=legend_loc)\n\n if save_path:\n log.info(f\"saving plot to path {save_path}\")\n plt.savefig(save_path)\n log.info(\"OK!\")\n\n if show:\n plt.show(block=block)\n plt.close()\n\n def get_tickers(self) -> List[str]:\n \"\"\"\n Return the saved list of ticker symbols.\n\n Returns\n -------\n list\n A list of ``str`` ticker symbols.\n\n \"\"\"\n return self._symbols\n\n def get_data(self) -> Dict[str, pd.DataFrame]:\n \"\"\"\n Return the saved dictionary which maps ticker symbols to their\n corresponding historical data with the following columns:\n (``Date``, ``Open``, ``High``, ``Low``, ``Close``).\n\n Returns\n -------\n dict\n A dictionary with key: ``str`` and value: ``pd.DataFrame``.\n\n \"\"\"\n return self._data\n\n def as_assets(self, price_type: str = \"Close\") -> Dict[str, Asset]:\n \"\"\"\n Create a list of Assets for each ticker and specified price type.\n\n Parameters\n ----------\n price_type : str\n The price type data to create an ``Asset`` object with. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n\n Returns\n -------\n dict\n A dictionary of newly created ``Asset`` objects with ticker symbols as keys.\n\n \"\"\"\n return {\n ticker: Asset(\n self._data[ticker][price_type],\n self._names[i],\n market=self._market,\n index_name=self._index_name,\n price_type=price_type,\n pre_compute=False,\n )\n for i, ticker in enumerate(self._symbols)\n }\n\n def as_df(self, price_type: str = \"Close\") -> pd.DataFrame:\n \"\"\"\n Create an aggregated ``pd.DataFrame`` for the specified price type.\n It will have the shape (n_samples, n_tickers).\n\n Parameters\n ----------\n price_type : str\n The price type data to create the ``pd.DataFrame`` object with. Has to\n be one of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n\n Returns\n -------\n pd.DataFrame\n A new ``pd.DataFrame`` with ticker names as columns.\n\n \"\"\"\n\n return pd.DataFrame(\n {t: d[price_type] for t, d in zip(self._symbols, self._data.values())},\n index=self._all_dates,\n )\n\n def as_numpy(\n self,\n price_type: str = \"Close\",\n *,\n dtype: np.typing.DTypeLike = np.float32,\n ) -> np.ndarray:\n \"\"\"\n Extract the specified price type from stored data as np.ndarray.\n It will have the shape (n_tickers, n_samples).\n\n Parameters\n ----------\n price_type : str\n The price type data to create the ``np.ndarray`` with. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n dtype : np.typing.DTypeLike\n The data type to create the new ``np.ndarray`` as.\n Defaults to ``np.float32``.\n\n Returns\n -------\n np.ndarray\n A new ``np.ndarray`` from the specified price type and dtype.\n\n \"\"\"\n return np.array(\n [d[price_type].to_numpy().astype(dtype) for d in self._data.values()]\n )" }, { "identifier": "FinqError", "path": "finq/exceptions.py", "snippet": "class FinqError(Exception):\n \"\"\" \"\"\"" }, { "identifier": "InvalidCombinationOfArgumentsError", "path": "finq/exceptions.py", "snippet": "class InvalidCombinationOfArgumentsError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "InvalidPortfolioWeightsError", "path": "finq/exceptions.py", "snippet": "class InvalidPortfolioWeightsError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "ObjectiveFunctionError", "path": "finq/exceptions.py", "snippet": "class ObjectiveFunctionError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "PortfolioNotYetOptimizedError", "path": "finq/exceptions.py", "snippet": "class PortfolioNotYetOptimizedError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "period_returns", "path": "finq/formulas.py", "snippet": "def period_returns(x: np.ndarray, period: int = 1) -> np.ndarray:\n \"\"\" \"\"\"\n\n return (x[:, period:] / x[:, :-period]) - 1" }, { "identifier": "sharpe_ratio", "path": "finq/formulas.py", "snippet": "def sharpe_ratio(\n r: Union[float, np.ndarray],\n v: Union[float, np.ndarray],\n rfr: float,\n) -> Union[float, np.ndarray]:\n \"\"\" \"\"\"\n\n return (r - rfr) / v" }, { "identifier": "weighted_returns", "path": "finq/formulas.py", "snippet": "def weighted_returns(w: np.ndarray, r: np.ndarray) -> np.ndarray:\n \"\"\" \"\"\"\n\n return np.dot(w, r)" }, { "identifier": "weighted_variance", "path": "finq/formulas.py", "snippet": "def weighted_variance(w: np.ndarray, cov: np.ndarray) -> np.ndarray:\n \"\"\" \"\"\"\n\n return np.dot(w, np.dot(cov, w.T))" } ]
import logging import pandas as pd import numpy as np import scipy.optimize as scipyopt import matplotlib.pyplot as plt from functools import wraps from tqdm import tqdm from finq.asset import Asset from finq.datasets import Dataset from finq.exceptions import ( FinqError, InvalidCombinationOfArgumentsError, InvalidPortfolioWeightsError, ObjectiveFunctionError, PortfolioNotYetOptimizedError, ) from finq.formulas import ( period_returns, sharpe_ratio, weighted_returns, weighted_variance, ) from typing import ( Any, Callable, List, Dict, Tuple, Union, Optional, )
10,835
def period_returns(self, period: int) -> np.ndarray: """ """ return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """
""" MIT License Copyright (c) 2023 Wilhelm Ågren Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. File created: 2023-10-20 Last updated: 2023-11-10 """ log = logging.getLogger(__name__) class Portfolio(object): """ """ # For a full list of `scipy` optimization methods and references, see the link below. # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html _supported_optimization_methods = ( "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", ) _weight_initializations = { "lognormal": np.random.lognormal, "normal": np.random.normal, "uniform": np.random.uniform, } def __init__( self, data: Union[Dataset, List[Asset], np.ndarray, pd.DataFrame], *, weights: Optional[np.ndarray] = None, names: Optional[Union[Dict[str, str], List[str]]] = None, symbols: Optional[Union[Dict[str, str], List[str]]] = None, confidence_level: float = 0.95, risk_free_rate: float = 5e-3, n_trading_days: int = 252, objective_function: Optional[Callable] = None, objective_function_args: Tuple[Any, ...] = (), objective_bounds: Optional[List[Tuple[int, ...]]] = None, objective_constraints: Optional[Tuple[Dict, ...]] = None, ): """ """ if isinstance(data, Dataset): assets = data.as_assets() data = list(assets.values()) symbols = list(assets.keys()) if not isinstance(data, list): if names is None and symbols is None and not isinstance(data, pd.DataFrame): raise InvalidCombinationOfArgumentsError( "You need to provide the names and ticker symbols of each asset that you " "want to include in your portfolio if the data you provided is neither a " "`list` of `Asset` objects or a `pd.DataFrame`. You can also try " "providing only one of the arguments `names` and `symbols`, but then as " "a dictionary of the form `key=name` `value=symbol`." ) if isinstance(data, list): symbols = [a.name for a in data] data = np.array([a.data for a in data]) if isinstance(data, pd.DataFrame): symbols = data.columns data = data.to_numpy().T if isinstance(names, dict): symbols = list(names.values()) names = list(names.keys()) if isinstance(symbols, dict): names = list(symbols.keys()) symbols = list(symbols.values()) self._data = data self._weights = weights self._names = names self._symbols = symbols self._confidence_level = confidence_level self._risk_free_rate = risk_free_rate self._n_trading_days = n_trading_days self._random_portfolios = None self._objective_function = objective_function self._objective_function_args = objective_function_args self._objective_bounds = objective_bounds self._objective_constraints = objective_constraints def weights_are_normalized(self) -> bool: """ """ return np.allclose(self._weights.sum(), 1.0, rtol=1e-6) def initialize_random_weights( self, distribution: Union[str, Callable], *args: Tuple[Any, ...], **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) weights = distribution(*args, **kwargs) self._weights = weights / weights.sum() def check_valid_weights(func) -> Callable: """ """ @wraps(func) def _check_valid_weights(self, *args, **kwargs) -> Optional[FinqError]: """ """ if self._weights is None: raise PortfolioNotYetOptimizedError( "Portfolio weights are `None`. Perhaps you have not yet optimized it? " ) if not self.weights_are_normalized(): raise InvalidPortfolioWeightsError( "Your portfolio weights are not normalized. Make sure to normalize them " "(they sum to one) before calculating any analytical quantities. " ) return func(self, *args, **kwargs) return _check_valid_weights def daily_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=1) def yearly_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=self._n_trading_days) def period_returns(self, period: int) -> np.ndarray: """ """ return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """
return weighted_returns(self._weights.T, self.daily_returns_mean())
9
2023-10-09 19:02:54+00:00
16k
lmb-freiburg/ldce
scripts/ldce.py
[ { "identifier": "disabled_train", "path": "sampling_helpers.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "get_model", "path": "sampling_helpers.py", "snippet": "def get_model(cfg_path=\"configs/latent-diffusion/cin256-v2.yaml\", ckpt_path=\"models/ldm/cin256-v2/model.ckpt\"):\n config = OmegaConf.load(cfg_path)\n model = load_model_from_config(config, ckpt_path)\n return model" }, { "identifier": "_unmap_img", "path": "sampling_helpers.py", "snippet": "def _unmap_img(x, from_image_net_dist=False):\n \"\"\"\n from 0 to 1 to -1 to 1\n \"\"\"\n\n return 2. * x - 1" }, { "identifier": "generate_samples", "path": "sampling_helpers.py", "snippet": "def generate_samples(\n model, \n sampler, \n target_y, \n ddim_steps, \n scale, \n init_image=None, \n t_enc=None,\n init_latent=None, \n ccdddim=False, \n ddim_eta=0., \n latent_t_0=True, \n prompts: list = None,\n seed: int = 0\n):\n torch.cuda.empty_cache()\n \n all_samples = []\n all_probs = []\n all_videos = []\n all_masks = []\n all_cgs = []\n\n with torch.no_grad():\n with model.ema_scope():\n tic = time.time()\n print(f\"rendering target classes '{target_y}' in {len(sampler.ddim_timesteps)} or {ddim_steps} steps and using s={scale:.2f}.\")\n batch_size = target_y.shape[0]\n if \"class_label\" == model.cond_stage_key: # class-conditional\n uc = model.get_learned_conditioning({model.cond_stage_key: torch.tensor(batch_size * [1000]).to(model.device)})\n c = model.get_learned_conditioning({model.cond_stage_key: target_y.to(model.device)})\n elif \"txt\" == model.cond_stage_key: # text-conditional\n uc = model.get_learned_conditioning(batch_size * [\"\"])\n if prompts is None:\n raise ValueError(\"Prompts are not defined!\")\n c = model.get_learned_conditioning(prompts)\n else:\n raise NotImplementedError\n \n if init_latent is not None:\n if seed!=-1:\n noises_per_batch = []\n for b in range(batch_size):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.cuda.manual_seed_all(seed)\n noises_per_batch.append(torch.randn_like(init_latent[b]))\n noise = torch.stack(noises_per_batch, dim=0)\n else:\n noise = None\n z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc] * (batch_size)).to(\n init_latent.device), noise=noise) if not latent_t_0 else init_latent\n\n if seed!=-1:\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n # decode it\n if ccdddim:\n out = sampler.decode(\n z_enc, \n c, \n t_enc, \n unconditional_guidance_scale=scale,\n unconditional_conditioning=uc, \n y=target_y.to(model.device), \n latent_t_0=latent_t_0,\n )\n samples = out[\"x_dec\"]\n prob = out[\"prob\"]\n vid = out[\"video\"]\n mask = out[\"mask\"]\n cg = out[\"concensus_regions\"]\n\n else:\n samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=scale,\n unconditional_conditioning=uc)\n\n x_samples = model.decode_first_stage(samples)\n x_samples_ddim = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)\n cat_samples = x_samples_ddim #torch.cat([init_image[:1], x_samples_ddim], dim=0)\n else:\n\n samples_ddim, _ = sampler.sample(S=ddim_steps,\n conditioning=c,\n batch_size=batch_size,\n shape=[3, 64, 64],\n verbose=False,\n unconditional_guidance_scale=scale,\n unconditional_conditioning=uc,\n eta=ddim_eta)\n\n x_samples_ddim = model.decode_first_stage(samples_ddim)\n x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0,\n min=0.0, max=1.0)\n cat_samples = x_samples_ddim\n\n all_samples.append(cat_samples)\n all_probs.append(prob) if ccdddim and prob is not None else None\n all_videos.append(vid) if ccdddim and vid is not None else None\n all_masks.append(mask) if ccdddim and mask is not None else None\n all_cgs.append(cg) if ccdddim and cg is not None else None\n tac = time.time()\n\n out = {}\n out[\"samples\"] = all_samples\n out[\"probs\"] = all_probs if len(all_probs) > 0 else None\n out[\"videos\"] = all_videos if len(all_videos) > 0 else None\n out[\"masks\"] = all_masks if len(all_masks) > 0 else None\n out[\"cgs\"] = all_cgs if len(all_cgs) > 0 else None\n \n return out" }, { "identifier": "load_model_hf", "path": "sampling_helpers.py", "snippet": "def load_model_hf(repo_id, filename, dir, ckpt_config_filename, device='cpu'):\n cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)\n\n args = SLConfig.fromfile(cache_config_file)\n args.device = device\n model = build_model(args)\n\n cache_file = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=dir)\n checkpoint = torch.load(cache_file, map_location='cpu')\n log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)\n print(\"Model loaded from {} \\n => {}\".format(cache_file, log))\n _ = model.eval()\n return model.to(device)" }, { "identifier": "CCMDDIMSampler", "path": "ldm/models/diffusion/cc_ddim.py", "snippet": "class CCMDDIMSampler(object):\n def __init__(self, model, classifier, model_type=\"latent\", schedule=\"linear\", guidance=\"free\", lp_custom=False,\n deg_cone_projection=10., denoise_dist_input=True, classifier_lambda=1, dist_lambda=0.15,\n enforce_same_norms=True, seg_model=None, detect_model=None, masked_guidance=False,\n backprop_diffusion=True, log_backprop_gradients: bool = False, mask_alpha = 5., cone_projection_type= 'default', self_recurrence=0, classifier_wrapper: bool = True, record_intermediate_results:bool=False, verbose:bool=True,**kwargs):\n\n super().__init__()\n self.model_type = model_type\n self.lp_custom = lp_custom\n self.images = []\n self.probs = []\n self.classifier_lambda = classifier_lambda\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.classifier = classifier\n self.guidance = guidance\n self.backprop_diffusion = backprop_diffusion\n self.log_backprop_gradients = log_backprop_gradients\n # self.projected_counterfactuals = projected_counterfactuals\n self.deg_cone_projection = deg_cone_projection\n self.cone_projection_type = cone_projection_type\n self.denoise_dist_input = denoise_dist_input\n self.dist_lambda = dist_lambda\n self.enforce_same_norms = enforce_same_norms\n self.seg_model = seg_model\n self.masked_guidance = masked_guidance\n self.mask_alpha = mask_alpha\n self.self_recurrence = self_recurrence\n self.classifier_wrapper = classifier_wrapper\n self.record_intermediate_results = record_intermediate_results\n self.verbose = verbose\n\n self.init_images = None\n self.init_labels = None \n self.mask = None\n self.concensus_regions = []\n \n self.detect_model = detect_model\n self.classification_criterion = torch.nn.CrossEntropyLoss()\n self.binary_classification_criterion = torch.nn.BCEWithLogitsLoss()\n \n self.dino_pipeline = False\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom:\n self.distance_criterion = DinoLoss(dino=torch.hub.load('facebookresearch/dino:main', 'dino_vitb16').eval(), loss_identifier=self.lp_custom.split(\"_\")[-1])\n self.dino_init_features = None\n self.dino_pipeline = True\n elif isinstance(self.lp_custom, int):\n if self.lp_custom == 1:\n self.distance_criterion = torch.nn.L1Loss(reduction='sum')\n elif self.lp_custom == 2:\n self.distance_criterion = torch.nn.MSELoss(reduction='sum')\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n def get_classifier_dist(self, x, t=None):\n \"\"\"\n Create a distribution over the classifier output space\n Args:\n x: input image for which to create the distribution over the classifier output space range [-1, 1]\n\n Returns:\n dist: torch distribution over the classifier output space\n\n \"\"\"\n x = tf.center_crop(x, 224)\n x = normalize(_map_img(x))\n logit = self.classifier(x) # (TODO) add option for t here\n dist = torchd.independent.Independent(OneHotDist(logit, validate_args = False), 0) # 0 here is the batch dimension, so event_shape is (num_classes, )\n return dist\n\n def get_classifier_logits(self, x, t=None):\n \"\"\"\n Returns classifier logits\n Args:\n x: input image for which to create the prediction\n\n Returns:\n logits: logits of output layer of target model\n\n \"\"\"\n x = _map_img(x)\n if not self.classifier_wrapper: # only works for ImageNet!\n x = tf.center_crop(x, 224)\n x = normalize(x)\n return self.classifier(x)\n\n def get_dino_features(self, x, device):\n x = normalize(_map_img(tf.center_crop(x, output_size=224)))\n return self.distance_criterion.dino(x.to(device))\n\n def get_mask_clip_seg(self):\n \"\"\"\n this function returns a negative mask given by a segmentation model for the region of interest\n values are higher outside the region of interest\n \"\"\"\n if self.mask is not None:\n return self.mask\n\n prompts = []\n\n for l in self.init_labels:\n prompts.append(re.sub(r'\\b(\\w)', lambda m: m.group(1).upper(), i2h[l]))\n\n with torch.no_grad():\n img_to_seg = F.interpolate(normalize(self.init_images), size=(352, 352), mode='bilinear',\n align_corners=False).to(self.init_images.device)\n preds = self.seg_model(img_to_seg, prompts)[0]\n preds = F.interpolate(preds, size=self.init_images.shape[-2:], mode='bilinear', align_corners=False)\n preds = torch.sigmoid(preds) # torch.softmax(preds.view(preds.shape[0], -1), dim=1).view(*preds.shape)\n # penalty = 1-preds\n preds = (preds - preds.min()) / (preds.max() - preds.min())\n preds = torch.sigmoid(self.mask_alpha*2*(preds-0.5))\n self.mask = preds.to(self.init_images.device)\n return self.mask\n\n def get_mask(self):\n \"\"\"\n this function returns a negative mask given by a segmentation model for the region of interest\n values are higher outside the region of interest\n \"\"\"\n\n if self.mask is not None:\n return self.mask\n\n with torch.no_grad():\n print(\"input range\", self.init_images.min(), self.init_images.max())\n image_int8 = (self.init_images[0].permute(1, 2, 0).cpu().numpy() * 255.).astype(np.uint8)\n # detected_boxes = detect(image, text_prompt=i2h[label], model=groundingdino_model, image_source=image_image)\n detected_boxes = detect(normalize(self.init_images[0]).squeeze(),\n text_prompt=i2h[self.init_labels[0]].split(',')[0],\n model=self.detect_model) # , image_source=image_int8)\n segmented_frame_masks = segment(image_int8, self.seg_model, boxes=detected_boxes)\n preds = torch.any(segmented_frame_masks, dim=0)\n preds = preds.unsqueeze(0).repeat(self.init_images.shape[0], *(1,) * len(preds.shape))\n # print(\"preds range after first seg \", preds.min(), preds.max())\n self.mask = preds.to(self.init_images.device)\n\n return self.mask\n\n def get_output(self, x, t, c, index, unconditional_conditioning, use_original_steps=True, quantize_denoised=True,\n return_decoded=False, return_pred_latent_x0=False):\n b, device = x.shape[0], x.device\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n with torch.enable_grad() if self.backprop_diffusion else torch.no_grad():\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n\n if return_decoded:\n # getting the original denoised image\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n # current prediction for x_0\n # get the original image with range [0, 1] if it is in latent space\n pred_latent_x0 = (x - sqrt_one_minus_at * e_t_uncond) / a_t.sqrt() # e_t - > e_t_uncond\n if quantize_denoised:\n pred_latent_x0, _, *_ = self.model.first_stage_model.quantize(pred_latent_x0)\n\n pred_x0 = self.model.differentiable_decode_first_stage(\n pred_latent_x0) # if self.model_type == \"latent\" else pred_latent_x0\n # pred_x0 = torch.clamp((pred_x0 + 1.0) / 2.0, min=0.0, max=1.0)\n \n if return_pred_latent_x0:\n return e_t_uncond, e_t, pred_x0, pred_latent_x0\n else:\n return e_t_uncond, e_t, pred_x0\n else:\n return e_t_uncond, e_t\n\n def conditional_score(self, x, t, c, index, use_original_steps, quantize_denoised, unconditional_guidance_scale=1.,\n unconditional_conditioning=None, y=None):\n \"\"\"\n\n Args:\n x: input image\n t: time step\n c: conditioning\n index: index for the schedule\n use_original_steps: whether to use the original steps\n quantize_denoised: whether to quantize the denoised image\n unconditional_guidance_scale: scale for the unconditional guidance\n unconditional_conditioning: unconditional conditioning\n y: target class\n\n\n Returns:\n e_t: score after conditioning\n\n \"\"\"\n b, *_, device = *x.shape, x.device\n x = x.detach() # .requires_grad_()\n # x.requires_grad = True\n prob_best_class = None\n mask_guidance = None\n\n ## check if gradient tracking is on for x\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n return e_t\n\n # print(\"check gradient tracking onf e \", e_t.requires_grad)\n if self.guidance == \"free\":\n e_t_uncond, e_t, pred_x0 = self.get_output(x, t, c, index, unconditional_conditioning, use_original_steps,\n quantize_denoised, return_decoded=True)\n\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n return e_t\n\n # print(\"check gradient tracking onf e \", e_t.requires_grad)\n score_out = torch.zeros_like(x)\n\n with torch.enable_grad():\n x_noise = x.detach().requires_grad_()\n ret_vals = self.get_output(x_noise, t, c, index, unconditional_conditioning,\n use_original_steps, quantize_denoised=quantize_denoised,\n return_decoded=True, return_pred_latent_x0=self.log_backprop_gradients)\n if self.log_backprop_gradients:\n e_t_uncond, e_t, pred_x0, pred_latent_x0 = ret_vals\n else:\n e_t_uncond, e_t, pred_x0 = ret_vals\n\n with torch.no_grad():\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom: # retain_graph causes cuda oom issues for dino distance regularizer...\n with torch.enable_grad():\n pred_x0_0to1 = torch.clamp(_map_img(pred_x0), min=0.0, max=1.0)\n lp_dist = self.distance_criterion(pred_x0_0to1, self.dino_init_features.to(x.device).detach())\n lp_grad = torch.autograd.grad(lp_dist.mean(), x_noise, retain_graph=False)[0]\n elif self.lp_custom:\n with torch.enable_grad():\n pred_x0_0to1 = torch.clamp(_map_img(pred_x0), min=0.0, max=1.0)\n lp_dist = self.distance_criterion(pred_x0_0to1, self.init_images.to(x.device))\n lp_grad = torch.autograd.grad(lp_dist.mean(), x_noise, retain_graph=True)[0]\n \n if self.classifier_lambda != 0:\n with torch.enable_grad():\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom:\n x_noise = x.detach().requires_grad_()\n ret_vals = self.get_output(x_noise, t, c, index, unconditional_conditioning,\n use_original_steps, quantize_denoised=quantize_denoised,\n return_decoded=True, return_pred_latent_x0=self.log_backprop_gradients)\n if self.log_backprop_gradients:\n e_t_uncond, e_t, pred_x0, pred_latent_x0 = ret_vals\n else:\n e_t_uncond, e_t, pred_x0 = ret_vals\n pred_logits = self.get_classifier_logits(pred_x0)\n if len(pred_logits.shape) == 2: # multi-class\n log_probs = torch.nn.functional.log_softmax(pred_logits, dim=-1)\n log_probs = log_probs[range(log_probs.size(0)), y.view(-1)]\n prob_best_class = torch.exp(log_probs).detach()\n else: # binary\n loss = self.binary_classification_criterion(pred_logits, y)\n loss *= -1 # minimize this\n log_probs = loss\n prob_best_class = pred_logits.sigmoid().detach()\n\n if self.log_backprop_gradients: pred_latent_x0.retain_grad()\n\n if self.dino_pipeline:\n grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=False)[0]\n else:\n grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=True)[0]\n # grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=True)[0]\n # grad_classifier2 = torch.autograd.grad(log_probs[0].sum(), x_noise, retain_graph=False)[0]\n\n if self.log_backprop_gradients:\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_t_sqrt = a_t.sqrt()\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n grad_pred_latent_x0 = pred_latent_x0.grad.data\n grad_unet_wrt_zt = (grad_classifier*a_t_sqrt/grad_pred_latent_x0 - 1)*(-1/sqrt_one_minus_at)\n\n cossim = torch.nn.CosineSimilarity()\n cossim_wpre = cossim(grad_classifier.view(2, -1), grad_pred_latent_x0.view(2, -1))\n \n print(torch.norm(grad_classifier, dim=(2,3)), torch.norm(grad_pred_latent_x0, dim=(2,3)), torch.norm(grad_unet_wrt_zt, dim=(2,3)))\n print(cossim_wpre)\n\n # assert e_t_uncond.requires_grad == True and e_t.requires_grad == True, \"e_t_uncond and e_t should require gradients\"\n\n # if self.guidance == \"projected\":\n implicit_classifier_score = (e_t - e_t_uncond) # .detach()\n # check gradient tracking on implicit_classifier_score\n assert implicit_classifier_score.requires_grad == False, \"implicit_classifier_score requires grad\"\n\n if self.lp_custom or self.classifier_lambda != 0:\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n\n if self.classifier_lambda != 0:\n classifier_score = -1 * grad_classifier * (1 - a_t).sqrt()\n assert classifier_score.requires_grad == False, \"classifier_score requires grad\"\n # project the gradient of the classifier on the implicit classifier\n\n\n projection_fn = cone_project if self.cone_projection_type == \"default\" else cone_project_chuncked\n projection_fn = cone_project_chuncked_zero if \"zero\" in self.cone_projection_type else projection_fn\n \n \n proj_out = projection_fn(implicit_classifier_score.view(x.shape[0], -1),\n classifier_score.view(x.shape[0], -1),\n self.deg_cone_projection,\n orig_shp=implicit_classifier_score.shape) \\\n if self.guidance == \"projected\" else classifier_score\n \n classifier_score = proj_out if self.cone_projection_type == \"default\" else proj_out[0].view_as(classifier_score)\n concensus_region = proj_out[1].unsqueeze(1) if self.cone_projection_type == \"binning\" else None\n #print(classifier_score.shape, concensus_region.shape)\n if self.enforce_same_norms:\n score_, norm_ = _renormalize_gradient(classifier_score,\n implicit_classifier_score) # e_t_uncond (AWAREE!!)\n classifier_score = self.classifier_lambda * score_\n\n else:\n classifier_score *= self.classifier_lambda\n\n score_out += classifier_score\n\n # distance gradients\n if self.lp_custom:\n\n lp_score = -1 * lp_grad * (1 - a_t).sqrt()\n\n if self.enforce_same_norms:\n score_, norm_ = _renormalize_gradient(lp_score,\n implicit_classifier_score)\n lp_score = self.dist_lambda * score_\n\n else:\n\n lp_score *= self.dist_lambda\n\n score_out -= lp_score\n\n e_t = e_t_uncond + unconditional_guidance_scale * score_out # (1 - a_t).sqrt() * grad_out\n\n \n if self.record_intermediate_results:\n # adding images to create a gif\n pred_x0_copy = pred_x0.clone().detach()\n img = torch.clamp(_map_img(pred_x0_copy), min=0.0, max=1.0)\n #img = torch.permute(img, (1, 2, 0, 3)).reshape((img.shape[1], img.shape[2], -1))\n\n self.images.append(img.detach().cpu())\n if self.classifier_lambda != 0 and self.cone_projection_type == \"binning\":\n self.concensus_regions.append(concensus_region.detach().cpu())\n \n if prob_best_class is not None:\n self.probs.append(prob_best_class.detach().cpu())\n\n return e_t\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n #pass\n # TODO: this is a hack to make it work on CPU\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)\n #print(\"DDIM timesteps: \", self.ddim_timesteps, \"with length: \", len(self.ddim_timesteps))\n #print all input parameters\n #print(\"DDIM parameters: \", self.ddim_timesteps, ddim_discretize, ddim_eta)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta, verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, ):\n\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, y=None):\n b, *_, device = *x.shape, x.device\n\n e_t = self.conditional_score(x=x, c=c, t=t, index=index, use_original_steps=use_original_steps,\n quantize_denoised=quantize_denoised,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, y=y)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas).to(x0.device)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas.to(x0.device)\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, y=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, latent_t_0=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n if self.masked_guidance:\n print(\"### Getting the mask ###\")\n mask = self.get_mask()\n mask = F.interpolate(mask.to(torch.uint8), size=x_latent.shape[-2:])\n # mask = self.get_mask()\n # mask = F.interpolate(mask, size=x_latent.shape[-2:], mode='bilinear', align_corners=True)\n # mask = (mask - mask.min()) / (mask.max() - mask.min())\n # mask[mask < 0.5] = 0.\n # mask[mask >= 0.5] = 1.\n\n if self.verbose:\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n else:\n iterator = range(time_range)\n\n # if latent_t_0:\n # x_orig = x_latent\n # x_dec = self.stochastic_encode(x_latent.clone(),\n # torch.tensor([t_start] * (x_latent.shape[0])).to(x_latent.device))\n # else:\n x_dec = x_latent if not latent_t_0 else self.stochastic_encode(x_latent.clone(), torch.tensor([t_start] * (x_latent.shape[0])).to(x_latent.device))\n for i, step in enumerate(iterator):\n tic = time.time()\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n\n if self.masked_guidance and latent_t_0:\n #print(\"blending with original image\")\n img_orig = self.model.q_sample(x_latent.clone(), ts)\n x_dec = img_orig * (1. - mask) + (mask) * x_dec\n\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, y=y)\n x_dec = x_dec.detach()\n for j in range(self.self_recurrence):\n print(\"self recurrence\")\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale = 1)\n\n #workaround for long running time\n elapsed_time = time.time() - tic\n if elapsed_time > 6:\n print(f\"Iteration time {elapsed_time} exceeded limit 6 secs, terminating program...\")\n print(\"x_dec device: \", x_dec.device)\n sys.exit(1) # Terminate the program with exit code 1 (indicating an error) \n \n out = {}\n out['x_dec'] = x_dec\n out['video'] = torch.stack(self.images, dim=1) if len(self.images) != 0 else None\n out[\"mask\"] = self.mask.to(torch.float32) if self.mask is not None else None\n # print(f\"Video shape: {out['video'].shape}\")\n #out['prob'] = self.probs[-1].item() if len(self.probs) != 0 else None\n out['prob'] = self.probs[-1].detach().cpu().numpy() if len(self.probs) != 0 else None\n out['concensus_regions'] = torch.stack(self.concensus_regions, dim=1) if len(self.concensus_regions) != 0 else None\n #print(out['concensus_regions'].shape, (out[\"concensus_regions\"]>200).to(torch.float32).mean())\n self.images = []\n self.probs = []\n \n self.concensus_regions = []\n self.mask = None\n\n return out" }, { "identifier": "name_map", "path": "data/imagenet_classnames.py", "snippet": "" }, { "identifier": "DecisionDensenetModel", "path": "utils/DecisionDensenetModel.py", "snippet": "class DecisionDensenetModel(nn.Module):\n\n def __init__(self, num_classes=40, pretrained=False, query_label=-1):\n super().__init__()\n self.feat_extract = DenseNet121(pretrained=pretrained)\n self.classifier = nn.Linear(self.feat_extract.output_size, num_classes)\n self.query_label = query_label\n\n def forward(self, x, before_sigmoid=True):\n\n x = self.feat_extract(x)\n x = self.classifier(x)\n if not before_sigmoid:\n x = torch.sigmoid(x)\n return x[:, self.query_label]" }, { "identifier": "Normalizer", "path": "utils/preprocessor.py", "snippet": "class Normalizer(torch.nn.Module):\n '''\n normalizing module. Useful for computing the gradient\n to a x image (x in [0, 1]) when using a classifier with\n different normalization inputs (i.e. f((x - mu) / sigma))\n '''\n def __init__(self, classifier,\n mu=[0.485, 0.456, 0.406],\n sigma=[0.229, 0.224, 0.225]):\n super().__init__()\n self.classifier = classifier\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "CropAndNormalizer", "path": "utils/preprocessor.py", "snippet": "class CropAndNormalizer(torch.nn.Module):\n def __init__(self, classifier, crop_size: int=224, mu=[0.485, 0.456, 0.406], sigma=[0.229, 0.224, 0.225]) -> None:\n super().__init__()\n self.classifier = classifier\n self.crop_size = crop_size\n self.center_crop = torchvision.transforms.CenterCrop(crop_size)\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n # assumes x in [0, 1]!\n # x = F.center_crop(x, self.crop_size)\n x = self.center_crop(x)\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "ResizeAndNormalizer", "path": "utils/preprocessor.py", "snippet": "class ResizeAndNormalizer(torch.nn.Module):\n def __init__(self, classifier, resolution: tuple=(224, 224), mu=[0.485, 0.456, 0.406], sigma=[0.229, 0.224, 0.225]) -> None:\n super().__init__()\n self.classifier = classifier\n self.resolution = resolution\n self.resize = torchvision.transforms.Resize(resolution)\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.resize(x)\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "GenericPreprocessing", "path": "utils/preprocessor.py", "snippet": "class GenericPreprocessing(torch.nn.Module):\n def __init__(self, classifier, preprocessor) -> None:\n super().__init__()\n self.classifier = classifier\n self.preprocessor = preprocessor\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.preprocessor(x)\n return self.classifier(x)" }, { "identifier": "Crop", "path": "utils/preprocessor.py", "snippet": "class Crop(torch.nn.Module):\n def __init__(self, classifier, crop_size: int=224) -> None:\n super().__init__()\n self.classifier = classifier\n self.crop_size = crop_size\n self.center_crop = torchvision.transforms.CenterCrop(crop_size)\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.center_crop(x)\n return self.classifier(x)" }, { "identifier": "VisionLanguageWrapper", "path": "utils/vision_language_wrapper.py", "snippet": "class VisionLanguageWrapper(nn.Module):\n def __init__(self, model, tokenizer, prompts) -> None:\n super().__init__()\n self.model = model\n self.tokenizer = tokenizer\n self.prompts = prompts\n\n device = next(self.model.parameters()).device\n\n text = tokenizer(prompts)\n with torch.no_grad():\n self.text_features = model.encode_text(text.to(device))\n self.text_features = self.text_features / self.text_features.norm(dim=-1, keepdim=True)\n\n def forward(self, x):\n image_features = self.model.encode_image(x)\n image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n logits = 100.0 * image_features @ self.text_features.T\n return logits" }, { "identifier": "MadryNet", "path": "utils/madry_net.py", "snippet": "def MadryNet(ckpt, device):\n norm = \"l2\"\n model = load_model(\n modelname=\"Engstrom2019Robustness\", norm=norm, device=device\n )\n state_dict = torch.load(ckpt, map_location=\"cpu\")\n model.model.load_state_dict(state_dict, strict=True)\n return model" }, { "identifier": "LinearClassifier", "path": "utils/dino_linear.py", "snippet": "class LinearClassifier(nn.Module):\n \"\"\"Linear layer to train on top of frozen features\"\"\"\n def __init__(self, dim, num_labels=1000):\n super(LinearClassifier, self).__init__()\n self.num_labels = num_labels\n self.linear = nn.Linear(dim, num_labels)\n self.linear.weight.data.normal_(mean=0.0, std=0.01)\n self.linear.bias.data.zero_()\n\n def forward(self, x):\n # flatten\n x = x.view(x.size(0), -1)\n\n # linear layer\n return self.linear(x)" }, { "identifier": "DINOLinear", "path": "utils/dino_linear.py", "snippet": "class DINOLinear(nn.Module):\n def __init__(self, dino, linear_classifier) -> None:\n super().__init__()\n self.dino = dino\n self.linear = linear_classifier\n \n def forward(self, x):\n x = self.dino(x)\n return self.linear(x)" } ]
import argparse import os import psutil import yaml import copy import random import matplotlib.pyplot as plt import numpy as np import pathlib import torch import hydra import wandb import torchvision import json import sys import regex as re import open_clip from contextlib import nullcontext from torch import autocast from omegaconf import OmegaConf, open_dict from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from torchvision import transforms, datasets from torchvision.utils import save_image from sampling_helpers import disabled_train, get_model, _unmap_img, generate_samples from sampling_helpers import load_model_hf from ldm import * from ldm.models.diffusion.cc_ddim import CCMDDIMSampler from data.imagenet_classnames import name_map, openai_imagenet_classes from utils.DecisionDensenetModel import DecisionDensenetModel from utils.preprocessor import Normalizer, CropAndNormalizer, ResizeAndNormalizer, GenericPreprocessing, Crop from utils.vision_language_wrapper import VisionLanguageWrapper from utils.madry_net import MadryNet from utils.dino_linear import LinearClassifier, DINOLinear
13,341
classifier_model = VisionLanguageWrapper(model, tokenizer, prompts) # try running optimization on 224x224 pixel image # transforms_list = [preprocess.transforms[0], preprocess.transforms[1], preprocess.transforms[4]] if cfg.classifier_model.classifier_wrapper: transforms_list = [preprocess.transforms[1], preprocess.transforms[4]] # CenterCrop(224, 224), Normalize classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) else: raise NotImplementedError return classifier_model def get_dataset(cfg, last_data_idx: int = 0): if "ImageNet" in cfg.data._target_: out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), transforms.ToTensor() ] transform = transforms.Compose(transform_list) dataset = instantiate(cfg.data, start_sample=cfg.data.start_sample, end_sample=cfg.data.end_sample, transform=transform, restart_idx=last_data_idx) elif "CelebAHQDataset" in cfg.data._target_: dataset = instantiate( cfg.data, image_size=256, data_dir=cfg.data.data_dir, random_crop=False, random_flip=False, partition='test', query_label=cfg.data.query_label, normalize=False, shard=cfg.data.shard, num_shards=cfg.data.num_shards, restart_idx=last_data_idx ) elif "Flowers102" in cfg.data._target_: transform = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), ]) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) elif "OxfordIIIPets" in cfg.data._target_: # try running on 224x224 img def _convert_to_rgb(image): return image.convert('RGB') out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), # transforms.CenterCrop(out_size), _convert_to_rgb, transforms.ToTensor(), ] transform = transforms.Compose(transform_list) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) else: raise NotImplementedError return dataset @hydra.main(version_base=None, config_path="../configs/ldce", config_name="v1") def main(cfg : DictConfig) -> None: if "verbose" not in cfg: with open_dict(cfg): cfg.verbose = True if "record_intermediate_results" not in cfg: with open_dict(cfg): cfg.record_intermediate_results = True if "verbose" in cfg and not cfg.verbose: blockPrint() os.makedirs(cfg.output_dir, exist_ok=True) os.chmod(cfg.output_dir, 0o777) if "ImageNet" in cfg.data._target_: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.start_sample}_{cfg.data.end_sample}") else: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.shard}_{cfg.data.num_shards}") os.makedirs(out_dir, exist_ok=True) os.chmod(out_dir, 0o777) checkpoint_path = os.path.join(out_dir, "last_saved_id.pth") config = {} if "ImageNet" in cfg.data._target_: run_id = f"{cfg.data.start_sample}_{cfg.data.end_sample}" else: run_id = f"{cfg.data.shard}_{cfg.data.num_shards}" if cfg.resume: print("run ID to resume: ", run_id) else: print("starting new run", run_id) config.update(OmegaConf.to_container(cfg, resolve=True)) print("current run id: ", run_id) last_data_idx = 0 if cfg.resume: # or os.path.isfile(checkpoint_path): resume only if asked to, allow restarts print(f"resuming from {checkpoint_path}") #check if checkpoint exists if not os.path.exists(checkpoint_path): print("checkpoint does not exist! starting from 0 ...") else: checkpoint = torch.load(checkpoint_path)# torch.load(restored_file.name) last_data_idx = checkpoint["last_data_idx"] + 1 if "last_data_idx" in checkpoint else 0 print(f"resuming from batch {last_data_idx}") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") # there seems to be a CUDA/autograd instability in gradient computation print(f"using device: {device}") model = get_model(cfg_path=cfg.diffusion_model.cfg_path, ckpt_path = cfg.diffusion_model.ckpt_path).to(device).eval() classifier_model = get_classifier(cfg, device) classifier_model.to(device).eval()
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = Crop(classifier_model) else: classifier_model = getattr(torchvision.models, classifier_name)(pretrained=True) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = CropAndNormalizer(classifier_model) elif "CelebAHQDataset" in cfg.data._target_: assert cfg.data.query_label in [20, 31, 39], 'Query label MUST be 20 (Gender), 31 (Smile), or 39 (Age) for CelebAHQ' ql = 0 if cfg.data.query_label in [31, 39]: ql = 1 if cfg.data.query_label == 31 else 2 classifier_model = DecisionDensenetModel(3, pretrained=False, query_label=ql) classifier_model.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location='cpu')['model_state_dict']) if cfg.classifier_model.classifier_wrapper: classifier_model = Normalizer( classifier_model, [0.5] * 3, [0.5] * 3 ) elif "Flowers102" in cfg.data._target_: # fine-tuned Dino ViT B/8: https://arxiv.org/pdf/2104.14294.pdf dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').to(device).eval() dim = dino.embed_dim linear_classifier = LinearClassifier(dim*cfg.classifier_model.n_last_blocks, 102) linear_classifier.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location="cpu"), strict=True) linear_classifier = linear_classifier.eval().to(device) classifier_model = DINOLinear(dino, linear_classifier) transforms_list = [transforms.CenterCrop(224), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))] classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) elif "OxfordIIIPets" in cfg.data._target_: # zero-shot OpenClip: https://arxiv.org/pdf/2212.07143.pdf model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') model = model.to(device).eval() tokenizer = open_clip.get_tokenizer('ViT-B-32') # prompts following https://github.com/openai/CLIP/blob/main/data/prompts.md with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) prompts = [f"a photo of a {label}, a type of pet." for label in pets_idx_to_classname.values()] classifier_model = VisionLanguageWrapper(model, tokenizer, prompts) # try running optimization on 224x224 pixel image # transforms_list = [preprocess.transforms[0], preprocess.transforms[1], preprocess.transforms[4]] if cfg.classifier_model.classifier_wrapper: transforms_list = [preprocess.transforms[1], preprocess.transforms[4]] # CenterCrop(224, 224), Normalize classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) else: raise NotImplementedError return classifier_model def get_dataset(cfg, last_data_idx: int = 0): if "ImageNet" in cfg.data._target_: out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), transforms.ToTensor() ] transform = transforms.Compose(transform_list) dataset = instantiate(cfg.data, start_sample=cfg.data.start_sample, end_sample=cfg.data.end_sample, transform=transform, restart_idx=last_data_idx) elif "CelebAHQDataset" in cfg.data._target_: dataset = instantiate( cfg.data, image_size=256, data_dir=cfg.data.data_dir, random_crop=False, random_flip=False, partition='test', query_label=cfg.data.query_label, normalize=False, shard=cfg.data.shard, num_shards=cfg.data.num_shards, restart_idx=last_data_idx ) elif "Flowers102" in cfg.data._target_: transform = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), ]) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) elif "OxfordIIIPets" in cfg.data._target_: # try running on 224x224 img def _convert_to_rgb(image): return image.convert('RGB') out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), # transforms.CenterCrop(out_size), _convert_to_rgb, transforms.ToTensor(), ] transform = transforms.Compose(transform_list) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) else: raise NotImplementedError return dataset @hydra.main(version_base=None, config_path="../configs/ldce", config_name="v1") def main(cfg : DictConfig) -> None: if "verbose" not in cfg: with open_dict(cfg): cfg.verbose = True if "record_intermediate_results" not in cfg: with open_dict(cfg): cfg.record_intermediate_results = True if "verbose" in cfg and not cfg.verbose: blockPrint() os.makedirs(cfg.output_dir, exist_ok=True) os.chmod(cfg.output_dir, 0o777) if "ImageNet" in cfg.data._target_: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.start_sample}_{cfg.data.end_sample}") else: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.shard}_{cfg.data.num_shards}") os.makedirs(out_dir, exist_ok=True) os.chmod(out_dir, 0o777) checkpoint_path = os.path.join(out_dir, "last_saved_id.pth") config = {} if "ImageNet" in cfg.data._target_: run_id = f"{cfg.data.start_sample}_{cfg.data.end_sample}" else: run_id = f"{cfg.data.shard}_{cfg.data.num_shards}" if cfg.resume: print("run ID to resume: ", run_id) else: print("starting new run", run_id) config.update(OmegaConf.to_container(cfg, resolve=True)) print("current run id: ", run_id) last_data_idx = 0 if cfg.resume: # or os.path.isfile(checkpoint_path): resume only if asked to, allow restarts print(f"resuming from {checkpoint_path}") #check if checkpoint exists if not os.path.exists(checkpoint_path): print("checkpoint does not exist! starting from 0 ...") else: checkpoint = torch.load(checkpoint_path)# torch.load(restored_file.name) last_data_idx = checkpoint["last_data_idx"] + 1 if "last_data_idx" in checkpoint else 0 print(f"resuming from batch {last_data_idx}") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") # there seems to be a CUDA/autograd instability in gradient computation print(f"using device: {device}") model = get_model(cfg_path=cfg.diffusion_model.cfg_path, ckpt_path = cfg.diffusion_model.ckpt_path).to(device).eval() classifier_model = get_classifier(cfg, device) classifier_model.to(device).eval()
classifier_model.train = disabled_train
0
2023-10-10 09:40:10+00:00
16k
cpuimage/minSDXLTF
stable_diffusion_xl/stable_diffusion_xl.py
[ { "identifier": "SimpleTokenizer", "path": "stable_diffusion_xl/clip_tokenizer.py", "snippet": "class SimpleTokenizer:\n def __init__(self, bpe_path=None):\n bpe_path = bpe_path or tf.keras.utils.get_file(\n \"bpe_simple_vocab_16e6.txt.gz\",\n \"https://github.com/openai/CLIP/blob/main/clip/bpe_simple_vocab_16e6.txt.gz?raw=true\", # noqa: E501\n file_hash=\"924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a\", # noqa: E501\n )\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split(\"\\n\")\n merges = merges[1: 49152 - 256 - 2 + 1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v + \"</w>\" for v in vocab]\n for merge in merges:\n vocab.append(\"\".join(merge))\n vocab.extend([\"<|startoftext|>\", \"<|endoftext|>\"])\n self.vocab = vocab\n self.encoder = self._create_encoder(self.vocab)\n self.decoder = self._create_decoder(self.encoder)\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n\n self.special_tokens = {\n \"<|startoftext|>\": \"<|startoftext|>\",\n \"<|endoftext|>\": \"<|endoftext|>\",\n }\n self.cache = {\n \"<|startoftext|>\": \"<|startoftext|>\",\n \"<|endoftext|>\": \"<|endoftext|>\",\n }\n self.pat = self._create_pat()\n\n def _create_encoder(self, vocab):\n return dict(zip(vocab, range(len(vocab))))\n\n def _create_decoder(self, encoder):\n return {v: k for k, v in encoder.items()}\n\n def _create_pat(self):\n return re.compile(\n \"|\".join([re.escape(key) for key in self.special_tokens.keys()])\n + r\"\"\"|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\",\n re.IGNORECASE,\n )\n\n @property\n def end_of_text(self):\n return self.encoder[\"<|endoftext|>\"]\n\n @property\n def start_of_text(self):\n return self.encoder[\"<|startoftext|>\"]\n\n def add_tokens(self, tokens):\n if isinstance(tokens, str):\n tokens = [tokens]\n tokens_added = 0\n for token in tokens:\n if token in self.vocab:\n continue\n tokens_added += 1\n self.vocab.append(token)\n self.special_tokens[token] = token\n self.cache[token] = token\n self.encoder = self._create_encoder(self.vocab)\n self.decoder = self._create_decoder(self.encoder)\n self.pat = self._create_pat()\n return tokens_added\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + (token[-1] + \"</w>\",)\n pairs = get_pairs(word)\n\n if not pairs:\n return token + \"</w>\"\n\n while True:\n bigram = min(\n pairs, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\"))\n )\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if (word[i] == first\n and i < len(word) - 1\n and word[i + 1] == second):\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = \" \".join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = \"\".join(self.byte_encoder[b] for b in token.encode(\"utf-8\"))\n bpe_tokens.extend(\n self.encoder[bpe_token]\n for bpe_token in self.bpe(token).split(\" \")\n )\n return [self.start_of_text] + bpe_tokens + [self.end_of_text]\n\n def decode(self, tokens):\n text = \"\".join([self.decoder[token] for token in tokens])\n text = (\n bytearray([self.byte_decoder[c] for c in text])\n .decode(\"utf-8\", errors=\"replace\")\n .replace(\"</w>\", \" \")\n )\n return text" }, { "identifier": "DiffusionXLModel", "path": "stable_diffusion_xl/diffusion_model.py", "snippet": "class DiffusionXLModel(tf.keras.Model):\n @staticmethod\n def push_block(hidden_states, res_stack):\n res_stack.append(hidden_states)\n return res_stack\n\n @staticmethod\n def pop_block(hidden_states, res_stack):\n res_hidden_states = res_stack.pop()\n hidden_states = tf.concat([hidden_states, res_hidden_states], axis=-1)\n return hidden_states, res_stack\n\n def __init__(self, img_height=1024, img_width=1024, name=None, ckpt_path=None, lora_dict=None):\n sample = tf.keras.layers.Input((img_height // 8, img_width // 8, 4))\n timestep = tf.keras.layers.Input(())\n text_emb = tf.keras.layers.Input((None, 2048))\n text_embeds = tf.keras.layers.Input((1280,))\n time_ids = tf.keras.layers.Input((6,))\n # 1. time\n t_emb = Timesteps(320, name=\"time_proj\")(timestep)\n t_emb = tf.reshape(t_emb, (-1, 320))\n t_emb = Linear(1280, name=\"time_embedding.linear_1\")(tf.cast(t_emb, sample.dtype))\n t_emb = tf.keras.layers.Activation(\"swish\")(t_emb)\n t_emb = Linear(1280, name=\"time_embedding.linear_2\")(t_emb)\n time_embeds = Timesteps(256, name=\"add_time_proj\")(time_ids)\n time_embeds = tf.reshape(time_embeds, (-1, 1536)) # 6*256 = 1536\n add_embeds = tf.concat([text_embeds, time_embeds], axis=-1)\n add_embeds = tf.cast(add_embeds, sample.dtype)\n add_embeds = Linear(1280, name=\"add_embedding.linear_1\")(add_embeds)\n add_embeds = tf.keras.layers.Activation(\"swish\")(add_embeds)\n add_embeds = Linear(1280, name=\"add_embedding.linear_2\")(add_embeds)\n time_emb = tf.keras.layers.Activation(\"swish\")(t_emb + add_embeds)\n # 2. pre-process\n hidden_states = tf.keras.layers.Conv2D(320, kernel_size=3, strides=1, name=\"conv_in\")(\n tf.keras.layers.ZeroPadding2D(1)(sample))\n res_stack = [hidden_states]\n # 3. blocks\n # DownBlock2D\n hidden_states = ResnetBlock(320, name=\"down_blocks.0.resnets.0\")((hidden_states, time_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"down_blocks.0.resnets.1\")((hidden_states, time_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = DownSampler(320, name=\"down_blocks.0.downsamplers.0\")(hidden_states)\n res_stack = self.push_block(hidden_states, res_stack)\n # CrossAttnDownBlock2D\n hidden_states = ResnetBlock(640, name=\"down_blocks.1.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"down_blocks.1.attentions.0\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"down_blocks.1.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"down_blocks.1.attentions.1\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = DownSampler(640, name=\"down_blocks.1.downsamplers.0\")(hidden_states)\n res_stack = self.push_block(hidden_states, res_stack)\n # CrossAttnDownBlock2D\n hidden_states = ResnetBlock(1280, name=\"down_blocks.2.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"down_blocks.2.attentions.0\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"down_blocks.2.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"down_blocks.2.attentions.1\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n # UNetMidBlock2DCrossAttn\n hidden_states = ResnetBlock(1280, name=\"mid_block.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"mid_block.attentions.0\")((hidden_states, text_emb))\n hidden_states = ResnetBlock(1280, name=\"mid_block.resnets.1\")((hidden_states, time_emb))\n # CrossAttnUpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.0\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.1\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.2\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.2\")((hidden_states, text_emb))\n hidden_states = UpSampler(1280, name=\"up_blocks.0.upsamplers.0\")(hidden_states)\n # CrossAttnUpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.0\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.1\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.2\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.2\")((hidden_states, text_emb))\n hidden_states = UpSampler(640, name=\"up_blocks.1.upsamplers.0\")(hidden_states)\n # UpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.0\")((hidden_states, time_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.1\")((hidden_states, time_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.2\")((hidden_states, time_emb))\n hidden_states = GroupNormalization(32, epsilon=1e-05, center=True, scale=True,\n name=\"conv_norm_out\")(\n hidden_states)\n hidden_states = tf.keras.layers.Activation(\"swish\")(hidden_states)\n output = tf.keras.layers.Conv2D(4, kernel_size=3, strides=1, name=\"conv_out\")(\n tf.keras.layers.ZeroPadding2D(1)(hidden_states))\n super().__init__([sample, timestep, text_emb, time_ids, text_embeds], output, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/unet/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"diffusion_model\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=UNET_KEY_MAPPING,\n lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=UNET_KEY_MAPPING,\n lora_dict=lora_dict)" }, { "identifier": "ImageDecoder", "path": "stable_diffusion_xl/image_decoder.py", "snippet": "class ImageDecoder(tf.keras.Sequential):\n def __init__(self, img_height=1024, img_width=1024, name=None, ckpt_path=None):\n super().__init__(\n [\n tf.keras.layers.Input((img_height // 8, img_width // 8, 4)),\n tf.keras.layers.Rescaling(1.0 / 0.13025),\n tf.keras.layers.Conv2D(4, 1, strides=1),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(512, 3, strides=1),\n VaeResnetBlock(512),\n VaeAttentionBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n UpSampler(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n UpSampler(512),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n UpSampler(256),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n GroupNormalization(epsilon=1e-5),\n tf.keras.layers.Activation(\"swish\"),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(3, 3, strides=1),\n ],\n name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae_1_0/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"decoder\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)" }, { "identifier": "ImageEncoder", "path": "stable_diffusion_xl/image_encoder.py", "snippet": "class ImageEncoder(tf.keras.Sequential):\n \"\"\"ImageEncoder is the VAE Encoder for StableDiffusionXL.\"\"\"\n\n def __init__(self, ckpt_path=None):\n super().__init__(\n [\n tf.keras.layers.Input((None, None, 3)),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(128, 3, strides=1),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n DownSampler(128, padding=((0, 1), (0, 1))),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n DownSampler(256, padding=((0, 1), (0, 1))),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n DownSampler(512, padding=((0, 1), (0, 1))),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeAttentionBlock(512),\n VaeResnetBlock(512),\n GroupNormalization(epsilon=1e-5),\n tf.keras.layers.Activation(\"swish\"),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(8, 3, strides=1),\n tf.keras.layers.Conv2D(8, 1, strides=1),\n tf.keras.layers.Lambda(lambda x: tf.split(x, num_or_size_splits=2, axis=-1)[0] * 0.13025),\n ])\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae_1_0/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"encoder\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)" }, { "identifier": "get_weighted_text_embeddings", "path": "stable_diffusion_xl/long_prompt_weighting.py", "snippet": "def get_weighted_text_embeddings(\n tokenizer,\n text_encoder,\n prompt: Union[str, List[str]],\n max_embeddings_multiples: Optional[int] = 4,\n no_boseos_middle: Optional[bool] = False,\n skip_parsing: Optional[bool] = False,\n skip_weighting: Optional[bool] = False,\n model_max_length=77,\n pad_token_id=49407,\n text_encoder_pool=None,\n):\n r\"\"\"\n Prompts can be assigned with local weights using brackets. For example,\n prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',\n and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.\n\n Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.\n\n Args:\n tokenizer : provide access to the tokenizer\n text_encoder : provide access to the text encoder.\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n max_embeddings_multiples (`int`, *optional*, defaults to `1`):\n The max multiple length of prompt embeddings compared to the max output length of text encoder.\n no_boseos_middle (`bool`, *optional*, defaults to `False`):\n If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and\n ending token in each of the chunk in the middle.\n skip_parsing (`bool`, *optional*, defaults to `False`):\n Skip the parsing of brackets.\n skip_weighting (`bool`, *optional*, defaults to `False`):\n Skip the weighting. When the parsing is skipped, it is forced True.\n \"\"\"\n max_length = (model_max_length - 2) * max_embeddings_multiples + 2\n if isinstance(prompt, str):\n prompt = [prompt]\n\n if not skip_parsing:\n prompt_tokens, prompt_weights = get_prompts_with_weights(tokenizer, prompt, max_length - 2)\n else:\n prompt_tokens = [\n token[1:-1]\n for token in tokenizer.encode(prompt)[:max_length]\n ]\n prompt_weights = [[1.0] * len(token) for token in prompt_tokens]\n\n # round up the longest length of tokens to a multiple of (model_max_length - 2)\n max_length = max([len(token) for token in prompt_tokens])\n\n max_embeddings_multiples = min(\n max_embeddings_multiples,\n (max_length - 1) // (model_max_length - 2) + 1,\n )\n max_embeddings_multiples = max(1, max_embeddings_multiples)\n max_length = (model_max_length - 2) * max_embeddings_multiples + 2\n\n # pad the length of tokens and weights\n bos = tokenizer.start_of_text\n eos = tokenizer.end_of_text\n pad = pad_token_id\n prompt_tokens, prompt_weights = pad_tokens_and_weights(\n prompt_tokens,\n prompt_weights,\n max_length,\n bos,\n eos,\n pad,\n no_boseos_middle=no_boseos_middle,\n chunk_length=model_max_length,\n )\n prompt_tokens = np.array(prompt_tokens, dtype=np.int32)\n # get the embeddings\n if pad_token_id != 0:\n text_embeddings_pool = None\n text_embeddings = get_unweighted_text_embeddings_openai(\n text_encoder,\n prompt_tokens,\n model_max_length,\n no_boseos_middle=no_boseos_middle,\n )\n else:\n text_embeddings, text_embeddings_pool = get_unweighted_text_embeddings_laion(\n text_encoder,\n prompt_tokens,\n model_max_length,\n no_boseos_middle=no_boseos_middle,\n text_encoder_pool=text_encoder_pool,\n )\n prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)\n if (not skip_parsing) and (not skip_weighting):\n previous_mean = text_embeddings.mean(axis=(-2, -1))\n text_embeddings *= prompt_weights[:, :, None]\n text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]\n return text_embeddings, text_embeddings_pool" }, { "identifier": "Scheduler", "path": "stable_diffusion_xl/scheduler.py", "snippet": "class Scheduler(object):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n\n\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n active_lcm (`bool`, defaults true):\n apply lcm or not.\n original_inference_steps (`int`, *optional*, defaults to 50):\n The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we\n will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule.\n timestep_scaling (`float`, defaults to 10.0):\n The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions\n `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation\n error at the default of `10.0` is already pretty small).\n \"\"\"\n\n def __init__(self, num_train_timesteps: int = 1000, beta_start: float = 0.00085, beta_end: float = 0.012,\n original_inference_steps: int = 50, timestep_scaling: float = 10.0, active_lcm=True):\n self.active_lcm = active_lcm\n self.num_train_timesteps = num_train_timesteps\n self.original_inference_steps = original_inference_steps\n self.timestep_scaling = timestep_scaling\n # this schedule is very specific to the latent diffusion model.\n self.alphas_cumprod = np.cumprod(\n 1. - np.square(np.linspace(np.sqrt(beta_start), np.sqrt(beta_end), num_train_timesteps)), axis=0)\n self.signal_rates = np.sqrt(self.alphas_cumprod)\n self.noise_rates = np.sqrt(1. - self.alphas_cumprod)\n self.final_alpha_cumprod = 1.0\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n # setable values\n self.num_inference_steps = None\n self.timesteps = np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int32)\n self._step_index = None\n\n # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index\n def _init_step_index(self, timestep):\n index_candidates = np.nonzero(self.timesteps == timestep)\n # The sigma index that is taken for the **very** first `step`\n # is always the second index (or the last index if there is only 1)\n # This way we can ensure we don't accidentally skip a sigma in\n # case we start in the middle of the denoising schedule (e.g. for image-to-image)\n if len(index_candidates) > 1:\n step_index = index_candidates[1]\n else:\n step_index = index_candidates[0]\n self._step_index = step_index\n\n @property\n def step_index(self):\n return self._step_index\n\n def set_timesteps(self, num_inference_steps: int, original_inference_steps: Optional[int] = None,\n strength: int = 1.0):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n original_inference_steps (`int`, *optional*):\n The original number of inference steps, which will be used to generate a linearly-spaced timestep\n schedule (which is different from the standard `diffusers` implementation). We will then take\n `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as\n our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute.\n \"\"\"\n\n if num_inference_steps > self.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config_train_timesteps`:\"\n f\" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.num_train_timesteps} timesteps.\")\n self.num_inference_steps = num_inference_steps\n if self.active_lcm:\n original_steps = (\n original_inference_steps if original_inference_steps is not None else self.original_inference_steps)\n\n if original_steps > self.num_train_timesteps:\n raise ValueError(\n f\"`original_steps`: {original_steps} cannot be larger than `self.config_train_timesteps`:\"\n f\" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.num_train_timesteps} timesteps.\")\n if num_inference_steps > original_steps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:\"\n f\" {original_steps} because the final timestep schedule will be a subset of the\"\n f\" `original_inference_steps`-sized initial timestep schedule.\")\n # LCM Timesteps Setting\n # Currently, only linear spacing is supported.\n c = self.num_train_timesteps // original_steps\n # LCM Training Steps Schedule\n lcm_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * c - 1\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n # LCM Inference Steps Schedule\n timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps]\n else:\n timesteps = np.linspace(0, 1000 - 1, num_inference_steps, dtype=np.int32)[::-1]\n self.timesteps = timesteps.copy().astype(np.int32)\n self._step_index = None\n\n def get_scalings_for_boundary_condition_discrete(self, timestep, sigma_data=0.5):\n scaled_timestep = timestep * self.timestep_scaling\n c_skip = sigma_data ** 2 / (scaled_timestep ** 2 + sigma_data ** 2)\n c_out = scaled_timestep / (scaled_timestep ** 2 + sigma_data ** 2) ** 0.5\n return c_skip, c_out\n\n def step(self, latent: np.ndarray, timestep: int, latent_prev: np.ndarray):\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n latent (`np.ndarray`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n latent_prev (`np.ndarray`):\n A current instance of a sample created by the diffusion process.\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\")\n\n if self.step_index is None:\n self._init_step_index(timestep)\n # 1. get previous step value\n prev_step_index = self.step_index + 1\n if prev_step_index < len(self.timesteps):\n prev_timestep = self.timesteps[prev_step_index]\n else:\n prev_timestep = timestep\n next_signal_rates = self.signal_rates[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n next_noise_rates = self.noise_rates[prev_timestep]\n signal_rates = self.signal_rates[timestep]\n noise_rates = self.noise_rates[timestep]\n # 2. Compute the predicted original sample x_0 based on the model parameterization\n pred_x0 = (latent_prev - noise_rates * latent) / signal_rates\n # 3. Denoise model output using boundary conditions\n if self.active_lcm:\n # 4. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)\n denoised = c_out * pred_x0 + c_skip * latent_prev\n # 5. Sample and inject noise z ~ N(0, I) for MultiStep Inference\n # Noise is not used on the final timestep of the timestep schedule.\n # This also means that noise is not used for one-step sampling.\n if self.step_index != self.num_inference_steps - 1:\n noise = np.random.randn(*latent.shape).astype(np.float32)\n latent = next_signal_rates * denoised + next_noise_rates * noise\n else:\n latent = denoised\n else:\n if self.step_index != self.num_inference_steps - 1:\n latent = next_signal_rates * pred_x0 + next_noise_rates * latent\n else:\n latent = pred_x0\n # upon completion increase step index by one\n self._step_index += 1\n return latent\n\n def __len__(self):\n return self.num_train_timesteps" }, { "identifier": "TextEncoderLaion", "path": "stable_diffusion_xl/text_encoder_laion.py", "snippet": "class TextEncoderLaion(tf.keras.Model):\n def __init__(self, max_length=77, embed_dim=1280, vocab_size=49408, num_heads=20, num_layers=32, name=None,\n ckpt_path=None, lora_dict=None):\n tokens = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"tokens\")\n positions = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"positions\")\n clip_emb = CLIPEmbedding(vocab_size, embed_dim, max_length, name=\"embeddings\")([tokens, positions])\n x = clip_emb\n out = []\n for idx in range(num_layers):\n x = CLIPEncoderLayer(embed_dim, num_heads, activation=gelu,\n name=\"text_model.encoder.layers.{}\".format(idx))(x)\n out.append(x)\n embedded = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"text_model.final_layer_norm\")(out[-1])\n super().__init__([tokens, positions], [out[-2], embedded], name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.fp16.safetensors\"\n ckpt_mapping = [('text_model.embeddings.token_embedding.weight', None),\n ('text_model.embeddings.position_embedding.weight', None)]\n for idx in range(0, num_layers):\n layers_name = 'text_model.encoder.layers.{}'.format(idx)\n ckpt_mapping.append(('{}.layer_norm1.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.q_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.q_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.k_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.k_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.v_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.v_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.out_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.out_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc1.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc2.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc2.bias'.format(layers_name), None))\n ckpt_mapping.append(('text_model.final_layer_norm.weight', None))\n ckpt_mapping.append(('text_model.final_layer_norm.bias', None))\n # ckpt_mapping.append(('text_projection.weight', (1, 0)))\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)" }, { "identifier": "TextEncoderLaionProj", "path": "stable_diffusion_xl/text_encoder_laion.py", "snippet": "class TextEncoderLaionProj(tf.keras.Model):\n def __init__(self, embed_dim=1280, name=None, ckpt_path=None, lora_dict=None):\n embedded = tf.keras.layers.Input(shape=(embed_dim,), dtype=\"float32\", name=\"embedded\")\n proje_out = tf.keras.layers.Dense(1280, name=\"text_projection\", use_bias=False)(embedded)\n super().__init__(embedded, proje_out, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.fp16.safetensors\"\n ckpt_mapping = [('text_projection.weight', (1, 0))]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)" }, { "identifier": "TextEncoderOpenAi", "path": "stable_diffusion_xl/text_encoder_openai.py", "snippet": "class TextEncoderOpenAi(tf.keras.Model):\n def __init__(self, max_length=77, embed_dim=768, vocab_size=49408, num_heads=12, num_layers=12, clip_skip=-2,\n final_layer_norm=False,\n name=None,\n ckpt_path=None, lora_dict=None):\n tokens = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"tokens\")\n positions = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"positions\")\n clip_emb = CLIPEmbedding(vocab_size, embed_dim, max_length, name=\"embeddings\")([tokens, positions])\n x = clip_emb\n out = []\n for idx in range(num_layers):\n x = CLIPEncoderLayer(embed_dim, num_heads, activation=quick_gelu,\n name=\"text_model.encoder.layers.{}\".format(idx))(x)\n out.append(x)\n embedded = out[clip_skip]\n if final_layer_norm:\n embedded = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"text_model.final_layer_norm\")(embedded)\n super().__init__([tokens, positions], embedded, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder/model.fp16.safetensors\"\n ckpt_mapping = [('text_model.embeddings.token_embedding.weight', None),\n ('text_model.embeddings.position_embedding.weight', None)]\n for idx in range(0, num_layers + clip_skip + 1):\n layers_name = 'text_model.encoder.layers.{}'.format(idx)\n ckpt_mapping.append(('{}.layer_norm1.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.q_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.q_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.k_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.k_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.v_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.v_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.out_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.out_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc1.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc2.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc2.bias'.format(layers_name), None))\n if final_layer_norm:\n ckpt_mapping.append(('text_model.final_layer_norm.weight', None))\n ckpt_mapping.append(('text_model.final_layer_norm.bias', None))\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)" } ]
import numpy as np import tensorflow as tf from PIL import Image from scipy.ndimage import correlate1d from .clip_tokenizer import SimpleTokenizer from .diffusion_model import DiffusionXLModel from .image_decoder import ImageDecoder from .image_encoder import ImageEncoder from .long_prompt_weighting import get_weighted_text_embeddings from .scheduler import Scheduler from .text_encoder_laion import TextEncoderLaion, TextEncoderLaionProj from .text_encoder_openai import TextEncoderOpenAi
12,889
seed = int(seed) except: seed = None return tf.random.stateless_normal( (batch_size, self.img_height // 8, self.img_width // 8, 4), seed=[seed, seed], ) else: return tf.random.normal( (batch_size, self.img_height // 8, self.img_width // 8, 4) ) def _get_initial_diffusion_latent(self, batch_size, init_latent=None, init_time=None, seed=None, noise=None): if noise is None: noise = self._get_initial_diffusion_noise(batch_size, seed=seed) if init_latent is None: latent = noise else: latent = self.scheduler.signal_rates[init_time] * np.repeat(init_latent, batch_size, axis=0) + \ self.scheduler.noise_rates[init_time] * noise return latent @staticmethod def _get_pos_ids(): return np.asarray([list(range(MAX_PROMPT_LENGTH))], dtype=np.int32) class StableDiffusionXL(StableDiffusionXLBase): """Keras implementation of Stable Diffusion. Note that the StableDiffusionXL API, as well as the APIs of the sub-components of StableDiffusionXL (e.g. ImageEncoder, DiffusionModel) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL from PIL import Image model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/CompVis/stable-diffusion) """ # noqa: E501 def __init__( self, img_height=1024, img_width=1024, jit_compile=True, unet_ckpt=None, text_encoder_ckpt=None, text_encoder2_ckpt=None, vae_ckpt=None, ): super().__init__(img_height, img_width, jit_compile) self.unet_ckpt = unet_ckpt self.text_encoder_ckpt = text_encoder_ckpt self.text_encoder2_ckpt = text_encoder2_ckpt self.vae_ckpt = vae_ckpt @property def text_encoder_openai(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_openai is None: self._text_encoder_openai = TextEncoderOpenAi(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder_ckpt) if self.jit_compile: self._text_encoder_openai.compile(jit_compile=True) return self._text_encoder_openai @property def text_encoder_laion(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion is None: self._text_encoder_laion = TextEncoderLaion(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder2_ckpt) if self.jit_compile: self._text_encoder_laion.compile(jit_compile=True) return self._text_encoder_laion @property def text_encoder_laion_proj(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion_proj is None:
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras implementation of StableDiffusionXL.""" MAX_PROMPT_LENGTH = 77 class StableDiffusionXLBase: """Base class for stable diffusion xl model.""" def __init__(self, img_height=1024, img_width=1024, jit_compile=False, active_lcm=False): self.img_height = img_height self.img_width = img_width # lazy initialize the component models and the tokenizer self._image_encoder = None self._text_encoder_laion = None self._text_encoder_laion_proj = None self._text_encoder_openai = None self._diffusion_model = None self._image_decoder = None self._tokenizer = None self.jit_compile = jit_compile self.active_lcm = active_lcm self.scheduler = Scheduler(active_lcm=active_lcm) def text_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def image_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def inpaint( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, inpaint_mask=None, mask_blur_strength=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, inpaint_mask=inpaint_mask, mask_blur_strength=mask_blur_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def encode_text(self, prompt): """Encodes a prompt into a latent text encoding. The encoding produced by this method should be used as the `encoded_text` parameter of `StableDiffusion.generate_image`. Encoding text separately from generating an image can be used to arbitrarily modify the text encoding prior to image generation, e.g. for walking between two prompts. Args: prompt: a string to encode, must be 77 tokens or shorter. Example: ```python from keras_cv.models import StableDiffusion model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) encoded_text = model.encode_text("Tacos at dawn") img = model.generate_image(encoded_text) ``` """ # Tokenize prompt (i.e. starting context) context_openai, _ = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_openai, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=49407) context_laion, add_text_embeds = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_laion, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=0, text_encoder_pool=self.text_encoder_laion_proj) return np.concatenate([context_openai, context_laion], axis=-1), add_text_embeds def gaussian_blur(self, image, radius=3, h_axis=1, v_axis=2): def build_filter1d(kernel_size): if kernel_size == 1: filter1d = [1] else: triangle = [[1, 1]] for i in range(1, kernel_size - 1): cur_row = [1] prev_row = triangle[i - 1] for j in range(len(prev_row) - 1): cur_row.append(prev_row[j] + prev_row[j + 1]) cur_row.append(1) triangle.append(cur_row) filter1d = triangle[-1] filter1d = np.reshape(filter1d, (kernel_size,)) return filter1d / np.sum(filter1d) weights = build_filter1d(radius) # Apply filter horizontally blurred_image = correlate1d(image, weights, axis=h_axis, output=None, mode="reflect", cval=0.0, origin=0) # Apply filter vertically blurred_image = correlate1d(blurred_image, weights, axis=v_axis, output=None, mode="reflect", cval=0.0, origin=0) return blurred_image @staticmethod def resize(image_array, new_h=None, new_w=None): h, w, c = image_array.shape if new_h == h and new_w == w: return image_array h_bounds = 0, h - 1 w_bounds = 0, w - 1 y = np.expand_dims(np.linspace(h_bounds[0], h_bounds[1], new_h), axis=-1) x = np.expand_dims(np.linspace(w_bounds[0], w_bounds[1], new_w), axis=0) # Calculate the floor and ceiling values of x and y x_floor = np.floor(x).astype(int) x_ceil = np.ceil(x).astype(int) y_floor = np.floor(y).astype(int) y_ceil = np.ceil(y).astype(int) # Clip the values to stay within the image bounds x_floor = np.clip(x_floor, w_bounds[0], w_bounds[1]) x_ceil = np.clip(x_ceil, w_bounds[0], w_bounds[1]) y_floor = np.clip(y_floor, h_bounds[0], h_bounds[1]) y_ceil = np.clip(y_ceil, h_bounds[0], h_bounds[1]) # Calculate the fractional part of x and y dx = x - x_floor dy = y - y_floor # Get the values of the four neighboring pixels dx = np.expand_dims(dx, axis=-1) dy = np.expand_dims(dy, axis=-1) q11 = image_array[y_floor, x_floor, :] q21 = image_array[y_floor, x_ceil, :] q12 = image_array[y_ceil, x_floor, :] q22 = image_array[y_ceil, x_ceil, :] # Perform bilinear interpolation top_interp = q11 * (1.0 - dx) + q21 * dx bottom_interp = q12 * (1.0 - dx) + q22 * dx interpolated = top_interp * (1.0 - dy) + bottom_interp * dy return interpolated def preprocessed_image(self, x): if type(x) is str: x = np.array(Image.open(x).convert("RGB")) else: x = np.asarray(x) image_array = self.resize(x, self.img_height, self.img_width) image_array = np.array(image_array, dtype=np.float32) / 255.0 input_image_array = image_array[None, ..., :3] input_image_tensor = input_image_array * 2.0 - 1.0 return input_image_array, input_image_tensor def preprocessed_mask(self, x, blur_radius=5): if type(x) is str: x = np.array(Image.open(x).convert("L")) else: x = np.asarray(x) if len(x.shape) == 2: x = np.expand_dims(x, axis=-1) mask_array = self.resize(x, self.img_height, self.img_width) if mask_array.shape[-1] != 1: mask_array = np.mean(mask_array, axis=-1, keepdims=True) input_mask_array = np.array(mask_array, dtype=np.float32) / 255.0 if blur_radius is not None: input_mask_array = self.gaussian_blur(input_mask_array, radius=blur_radius, h_axis=0, v_axis=1) latent_mask_tensor = self.resize(input_mask_array, self.img_width // 8, self.img_height // 8) return np.expand_dims(input_mask_array, axis=0), np.expand_dims(latent_mask_tensor, axis=0) def rescale_noise_cfg(self, noise_cfg, noise_pred_text, guidance_rescale=0.0, epsilon=1e-05): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891). See Section 3.4 """ std_text = np.std(noise_pred_text, axis=tuple(range(1, len(noise_pred_text.shape))), keepdims=True) std_cfg = np.std(noise_cfg, axis=tuple(range(1, len(noise_cfg.shape))), keepdims=True) + epsilon # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg return noise_cfg def generate_image( self, encoded_text, add_text_embeds, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, diffusion_noise=None, seed=None, inpaint_mask=None, mask_blur_strength=None, reference_image=None, reference_image_strength=0.8, callback=None, original_size=None, crops_coords_top_left=(0, 0), guidance_rescale=0.0, target_size=None): """Generates an image based on encoded text. The encoding passed to this method should be derived from `StableDiffusion.encode_text`. Args: encoded_text: Tensor of shape (`batch_size`, 77, 768), or a Tensor of shape (77, 768). When the batch axis is omitted, the same encoded text will be used to produce every generated image. batch_size: int, number of images to generate, defaults to 1. negative_prompt: a string containing information to negatively guide the image generation (e.g. by removing or altering certain aspects of the generated image), defaults to None. num_steps: int, number of diffusion steps (controls image quality), defaults to 50. unconditional_guidance_scale: float, controlling how closely the image should adhere to the prompt. Larger values result in more closely adhering to the prompt, but will make the image noisier. Defaults to 7.5. diffusion_noise: Tensor of shape (`batch_size`, img_height // 8, img_width // 8, 4), or a Tensor of shape (img_height // 8, img_width // 8, 4). Optional custom noise to seed the diffusion process. When the batch axis is omitted, the same noise will be used to seed diffusion for every generated image. seed: integer which is used to seed the random generation of diffusion noise, only to be specified if `diffusion_noise` is None. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL batch_size = 8 model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) e_tacos = model.encode_text("Tacos at dawn") e_watermelons = model.encode_text("Watermelons at dusk") e_interpolated = tf.linspace(e_tacos, e_watermelons, batch_size) images = model.generate_image(e_interpolated, batch_size=batch_size) ``` """ if diffusion_noise is not None and seed is not None: raise ValueError( "`diffusion_noise` and `seed` should not both be passed to " "`generate_image`. `seed` is only used to generate diffusion " "noise when it's not already user-specified." ) context = self._expand_tensor(encoded_text, batch_size) if negative_prompt is None: negative_prompt = "" unconditional_context, unconditional_add_text_embeds = self.encode_text(negative_prompt) unconditional_context = self._expand_tensor(unconditional_context, batch_size) if diffusion_noise is not None: diffusion_noise = np.squeeze(diffusion_noise) if len(diffusion_noise.shape) == 3: diffusion_noise = np.repeat(np.expand_dims(diffusion_noise, axis=0), batch_size, axis=0) # Iterative reverse diffusion stage self.scheduler.set_timesteps(num_steps) timesteps = self.scheduler.timesteps[::-1] init_time = None init_latent = None input_image_array = None input_mask_array = None latent_mask_tensor = None if inpaint_mask is not None: input_mask_array, latent_mask_tensor = self.preprocessed_mask(inpaint_mask, mask_blur_strength) if input_mask_array is None or latent_mask_tensor is None: print("wrong inpaint mask:{}".format(inpaint_mask)) if reference_image is not None and (0. < reference_image_strength < 1.): input_image_array, input_image_tensor = self.preprocessed_image(reference_image) if input_image_tensor is not None: num_steps = int(num_steps * reference_image_strength + 0.5) init_time = timesteps[num_steps] init_latent = self.image_encoder.predict_on_batch(input_image_tensor) timesteps = timesteps[:num_steps] else: print("wrong reference image:{}".format(reference_image)) latent = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=init_time, seed=seed, noise=diffusion_noise) progbar = tf.keras.utils.Progbar(len(timesteps)) iteration = 0 if original_size is None: original_size = [self.img_height, self.img_width] if target_size is None: target_size = [self.img_height, self.img_width] add_time_ids = tf.expand_dims( tf.convert_to_tensor(list(list(original_size) + list(crops_coords_top_left) + list(target_size)), latent.dtype), axis=0) for index, timestep in list(enumerate(timesteps))[::-1]: latent_prev = latent # Set aside the previous latent vector time_emb = np.repeat(np.reshape(timestep, [1, -1]), batch_size, axis=0) if unconditional_guidance_scale > 0.0: unconditional_latent = self.diffusion_model.predict_on_batch( [latent, time_emb, unconditional_context, add_time_ids, tf.zeros_like(add_text_embeds)]) latent_text = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = unconditional_latent + unconditional_guidance_scale * ( latent_text - unconditional_latent) if guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/abs/2305.08891 latent = self.rescale_noise_cfg(latent, latent_text, guidance_rescale=guidance_rescale) else: latent = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = self.scheduler.step(latent, timestep, latent_prev) if latent_mask_tensor is not None and init_latent is not None: latent_orgin = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=timestep, seed=seed, noise=diffusion_noise) latent = latent_orgin * (1. - latent_mask_tensor) + latent * latent_mask_tensor iteration += 1 if callback is not None: callback(iteration) progbar.update(iteration) # Decoding stage decoded = self.image_decoder.predict_on_batch(latent) decoded = np.array(((decoded + 1.) * 0.5), dtype=np.float32) if input_mask_array is not None and input_image_array is not None: decoded = input_image_array * (1. - input_mask_array) + decoded * input_mask_array return np.clip(decoded * 255., 0, 255).astype("uint8") def _expand_tensor(self, text_embedding, batch_size): """Extends a tensor by repeating it to fit the shape of the given batch size.""" text_embedding = np.squeeze(text_embedding) if len(text_embedding.shape) == 2: text_embedding = np.repeat( np.expand_dims(text_embedding, axis=0), batch_size, axis=0 ) return text_embedding @property def image_encoder(self): pass @property def text_encoder_openai(self): pass @property def text_encoder_laion(self): pass @property def text_encoder_laion_proj(self): pass @property def diffusion_model(self): pass @property def image_decoder(self): pass @property def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None: self._tokenizer = SimpleTokenizer() return self._tokenizer def _get_initial_diffusion_noise(self, batch_size, seed): if seed is not None: try: seed = int(seed) except: seed = None return tf.random.stateless_normal( (batch_size, self.img_height // 8, self.img_width // 8, 4), seed=[seed, seed], ) else: return tf.random.normal( (batch_size, self.img_height // 8, self.img_width // 8, 4) ) def _get_initial_diffusion_latent(self, batch_size, init_latent=None, init_time=None, seed=None, noise=None): if noise is None: noise = self._get_initial_diffusion_noise(batch_size, seed=seed) if init_latent is None: latent = noise else: latent = self.scheduler.signal_rates[init_time] * np.repeat(init_latent, batch_size, axis=0) + \ self.scheduler.noise_rates[init_time] * noise return latent @staticmethod def _get_pos_ids(): return np.asarray([list(range(MAX_PROMPT_LENGTH))], dtype=np.int32) class StableDiffusionXL(StableDiffusionXLBase): """Keras implementation of Stable Diffusion. Note that the StableDiffusionXL API, as well as the APIs of the sub-components of StableDiffusionXL (e.g. ImageEncoder, DiffusionModel) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL from PIL import Image model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/CompVis/stable-diffusion) """ # noqa: E501 def __init__( self, img_height=1024, img_width=1024, jit_compile=True, unet_ckpt=None, text_encoder_ckpt=None, text_encoder2_ckpt=None, vae_ckpt=None, ): super().__init__(img_height, img_width, jit_compile) self.unet_ckpt = unet_ckpt self.text_encoder_ckpt = text_encoder_ckpt self.text_encoder2_ckpt = text_encoder2_ckpt self.vae_ckpt = vae_ckpt @property def text_encoder_openai(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_openai is None: self._text_encoder_openai = TextEncoderOpenAi(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder_ckpt) if self.jit_compile: self._text_encoder_openai.compile(jit_compile=True) return self._text_encoder_openai @property def text_encoder_laion(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion is None: self._text_encoder_laion = TextEncoderLaion(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder2_ckpt) if self.jit_compile: self._text_encoder_laion.compile(jit_compile=True) return self._text_encoder_laion @property def text_encoder_laion_proj(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion_proj is None:
self._text_encoder_laion_proj = TextEncoderLaionProj(ckpt_path=self.text_encoder2_ckpt)
7
2023-10-14 18:40:16+00:00
16k
spla-tam/SplaTAM
scripts/iphone_demo.py
[ { "identifier": "relative_transformation", "path": "datasets/gradslam_datasets/geometryutils.py", "snippet": "def relative_transformation(\n trans_01: torch.Tensor, trans_02: torch.Tensor, orthogonal_rotations: bool = False\n) -> torch.Tensor:\n r\"\"\"Function that computes the relative homogenous transformation from a\n reference transformation :math:`T_1^{0} = \\begin{bmatrix} R_1 & t_1 \\\\\n \\mathbf{0} & 1 \\end{bmatrix}` to destination :math:`T_2^{0} =\n \\begin{bmatrix} R_2 & t_2 \\\\ \\mathbf{0} & 1 \\end{bmatrix}`.\n\n .. note:: Works with imperfect (non-orthogonal) rotation matrices as well.\n\n The relative transformation is computed as follows:\n\n .. math::\n\n T_1^{2} = (T_0^{1})^{-1} \\cdot T_0^{2}\n\n Arguments:\n trans_01 (torch.Tensor): reference transformation tensor of shape\n :math:`(N, 4, 4)` or :math:`(4, 4)`.\n trans_02 (torch.Tensor): destination transformation tensor of shape\n :math:`(N, 4, 4)` or :math:`(4, 4)`.\n orthogonal_rotations (bool): If True, will invert `trans_01` assuming `trans_01[:, :3, :3]` are\n orthogonal rotation matrices (more efficient). Default: False\n\n Shape:\n - Output: :math:`(N, 4, 4)` or :math:`(4, 4)`.\n\n Returns:\n torch.Tensor: the relative transformation between the transformations.\n\n Example::\n >>> trans_01 = torch.eye(4) # 4x4\n >>> trans_02 = torch.eye(4) # 4x4\n >>> trans_12 = gradslam.geometry.geometryutils.relative_transformation(trans_01, trans_02) # 4x4\n \"\"\"\n if not torch.is_tensor(trans_01):\n raise TypeError(\n \"Input trans_01 type is not a torch.Tensor. Got {}\".format(type(trans_01))\n )\n if not torch.is_tensor(trans_02):\n raise TypeError(\n \"Input trans_02 type is not a torch.Tensor. Got {}\".format(type(trans_02))\n )\n if not trans_01.dim() in (2, 3) and trans_01.shape[-2:] == (4, 4):\n raise ValueError(\n \"Input must be a of the shape Nx4x4 or 4x4.\"\n \" Got {}\".format(trans_01.shape)\n )\n if not trans_02.dim() in (2, 3) and trans_02.shape[-2:] == (4, 4):\n raise ValueError(\n \"Input must be a of the shape Nx4x4 or 4x4.\"\n \" Got {}\".format(trans_02.shape)\n )\n if not trans_01.dim() == trans_02.dim():\n raise ValueError(\n \"Input number of dims must match. Got {} and {}\".format(\n trans_01.dim(), trans_02.dim()\n )\n )\n trans_10: torch.Tensor = (\n inverse_transformation(trans_01)\n if orthogonal_rotations\n else torch.inverse(trans_01)\n )\n trans_12: torch.Tensor = compose_transformations(trans_10, trans_02)\n return trans_12" }, { "identifier": "seed_everything", "path": "utils/common_utils.py", "snippet": "def seed_everything(seed=42):\n \"\"\"\n Set the `seed` value for torch and numpy seeds. Also turns on\n deterministic execution for cudnn.\n \n Parameters:\n - seed: A hashable seed value\n \"\"\"\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n print(f\"Seed set to: {seed} (type: {type(seed)})\")" }, { "identifier": "save_params_ckpt", "path": "utils/common_utils.py", "snippet": "def save_params_ckpt(output_params, output_dir, time_idx):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params\"+str(time_idx)+\".npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "save_params", "path": "utils/common_utils.py", "snippet": "def save_params(output_params, output_dir):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params.npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "report_progress", "path": "utils/eval_helpers.py", "snippet": "def report_progress(params, data, i, progress_bar, iter_time_idx, sil_thres, every_i=1, qual_every_i=1, \n tracking=False, mapping=False, wandb_run=None, wandb_step=None, wandb_save_qual=False, online_time_idx=None,\n global_logging=True):\n if i % every_i == 0 or i == 1:\n if wandb_run is not None:\n if tracking:\n stage = \"Tracking\"\n elif mapping:\n stage = \"Mapping\"\n else:\n stage = \"Current Frame Optimization\"\n if not global_logging:\n stage = \"Per Iteration \" + stage\n\n if tracking:\n # Get list of gt poses\n gt_w2c_list = data['iter_gt_w2c_list']\n valid_gt_w2c_list = []\n \n # Get latest trajectory\n latest_est_w2c = data['w2c']\n latest_est_w2c_list = []\n latest_est_w2c_list.append(latest_est_w2c)\n valid_gt_w2c_list.append(gt_w2c_list[0])\n for idx in range(1, iter_time_idx+1):\n # Check if gt pose is not nan for this time step\n if torch.isnan(gt_w2c_list[idx]).sum() > 0:\n continue\n interm_cam_rot = F.normalize(params['cam_unnorm_rots'][..., idx].detach())\n interm_cam_trans = params['cam_trans'][..., idx].detach()\n intermrel_w2c = torch.eye(4).cuda().float()\n intermrel_w2c[:3, :3] = build_rotation(interm_cam_rot)\n intermrel_w2c[:3, 3] = interm_cam_trans\n latest_est_w2c = intermrel_w2c\n latest_est_w2c_list.append(latest_est_w2c)\n valid_gt_w2c_list.append(gt_w2c_list[idx])\n\n # Get latest gt pose\n gt_w2c_list = valid_gt_w2c_list\n iter_gt_w2c = gt_w2c_list[-1]\n # Get euclidean distance error between latest and gt pose\n iter_pt_error = torch.sqrt((latest_est_w2c[0,3] - iter_gt_w2c[0,3])**2 + (latest_est_w2c[1,3] - iter_gt_w2c[1,3])**2 + (latest_est_w2c[2,3] - iter_gt_w2c[2,3])**2)\n if iter_time_idx > 0:\n # Calculate relative pose error\n rel_gt_w2c = relative_transformation(gt_w2c_list[-2], gt_w2c_list[-1])\n rel_est_w2c = relative_transformation(latest_est_w2c_list[-2], latest_est_w2c_list[-1])\n rel_pt_error = torch.sqrt((rel_gt_w2c[0,3] - rel_est_w2c[0,3])**2 + (rel_gt_w2c[1,3] - rel_est_w2c[1,3])**2 + (rel_gt_w2c[2,3] - rel_est_w2c[2,3])**2)\n else:\n rel_pt_error = torch.zeros(1).float()\n \n # Calculate ATE RMSE\n ate_rmse = evaluate_ate(gt_w2c_list, latest_est_w2c_list)\n ate_rmse = np.round(ate_rmse, decimals=6)\n if wandb_run is not None:\n tracking_log = {f\"{stage}/Latest Pose Error\":iter_pt_error, \n f\"{stage}/Latest Relative Pose Error\":rel_pt_error,\n f\"{stage}/ATE RMSE\":ate_rmse}\n\n # Get current frame Gaussians\n transformed_pts = transform_to_frame(params, iter_time_idx, \n gaussians_grad=False,\n camera_grad=False)\n\n # Initialize Render Variables\n rendervar = transformed_params2rendervar(params, transformed_pts)\n depth_sil_rendervar = transformed_params2depthplussilhouette(params, data['w2c'], \n transformed_pts)\n depth_sil, _, _, = Renderer(raster_settings=data['cam'])(**depth_sil_rendervar)\n rastered_depth = depth_sil[0, :, :].unsqueeze(0)\n valid_depth_mask = (data['depth'] > 0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n\n im, _, _, = Renderer(raster_settings=data['cam'])(**rendervar)\n if tracking:\n psnr = calc_psnr(im * presence_sil_mask, data['im'] * presence_sil_mask).mean()\n else:\n psnr = calc_psnr(im, data['im']).mean()\n\n if tracking:\n diff_depth_rmse = torch.sqrt((((rastered_depth - data['depth']) * presence_sil_mask) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n diff_depth_l1 = torch.abs((rastered_depth - data['depth']) * presence_sil_mask)\n diff_depth_l1 = diff_depth_l1 * valid_depth_mask\n depth_l1 = diff_depth_l1.sum() / valid_depth_mask.sum()\n else:\n diff_depth_rmse = torch.sqrt((((rastered_depth - data['depth'])) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n diff_depth_l1 = torch.abs((rastered_depth - data['depth']))\n diff_depth_l1 = diff_depth_l1 * valid_depth_mask\n depth_l1 = diff_depth_l1.sum() / valid_depth_mask.sum()\n\n if not (tracking or mapping):\n progress_bar.set_postfix({f\"Time-Step: {iter_time_idx} | PSNR: {psnr:.{7}} | Depth RMSE: {rmse:.{7}} | L1\": f\"{depth_l1:.{7}}\"})\n progress_bar.update(every_i)\n elif tracking:\n progress_bar.set_postfix({f\"Time-Step: {iter_time_idx} | Rel Pose Error: {rel_pt_error.item():.{7}} | Pose Error: {iter_pt_error.item():.{7}} | ATE RMSE\": f\"{ate_rmse.item():.{7}}\"})\n progress_bar.update(every_i)\n elif mapping:\n progress_bar.set_postfix({f\"Time-Step: {online_time_idx} | Frame {data['id']} | PSNR: {psnr:.{7}} | Depth RMSE: {rmse:.{7}} | L1\": f\"{depth_l1:.{7}}\"})\n progress_bar.update(every_i)\n \n if wandb_run is not None:\n wandb_log = {f\"{stage}/PSNR\": psnr,\n f\"{stage}/Depth RMSE\": rmse,\n f\"{stage}/Depth L1\": depth_l1,\n f\"{stage}/step\": wandb_step}\n if tracking:\n wandb_log = {**wandb_log, **tracking_log}\n wandb_run.log(wandb_log)\n \n if wandb_save_qual and (i % qual_every_i == 0 or i == 1):\n # Silhouette Mask\n presence_sil_mask = presence_sil_mask.detach().cpu().numpy()\n\n # Log plot to wandb\n if not mapping:\n fig_title = f\"Time-Step: {iter_time_idx} | Iter: {i} | Frame: {data['id']}\"\n else:\n fig_title = f\"Time-Step: {online_time_idx} | Iter: {i} | Frame: {data['id']}\"\n plot_rgbd_silhouette(data['im'], data['depth'], im, rastered_depth, presence_sil_mask, diff_depth_l1,\n psnr, depth_l1, fig_title, wandb_run=wandb_run, wandb_step=wandb_step, \n wandb_title=f\"{stage} Qual Viz\")" }, { "identifier": "keyframe_selection_overlap", "path": "utils/keyframe_selection.py", "snippet": "def keyframe_selection_overlap(gt_depth, w2c, intrinsics, keyframe_list, k, pixels=1600):\n \"\"\"\n Select overlapping keyframes to the current camera observation.\n\n Args:\n gt_depth (tensor): ground truth depth image of the current frame.\n w2c (tensor): world to camera matrix (4 x 4).\n keyframe_list (list): a list containing info for each keyframe.\n k (int): number of overlapping keyframes to select.\n pixels (int, optional): number of pixels to sparsely sample \n from the image of the current camera. Defaults to 1600.\n Returns:\n selected_keyframe_list (list): list of selected keyframe id.\n \"\"\"\n # Radomly Sample Pixel Indices from valid depth pixels\n width, height = gt_depth.shape[2], gt_depth.shape[1]\n valid_depth_indices = torch.where(gt_depth[0] > 0)\n valid_depth_indices = torch.stack(valid_depth_indices, dim=1)\n indices = torch.randint(valid_depth_indices.shape[0], (pixels,))\n sampled_indices = valid_depth_indices[indices]\n\n # Back Project the selected pixels to 3D Pointcloud\n pts = get_pointcloud(gt_depth, intrinsics, w2c, sampled_indices)\n\n list_keyframe = []\n for keyframeid, keyframe in enumerate(keyframe_list):\n # Get the estimated world2cam of the keyframe\n est_w2c = keyframe['est_w2c']\n # Transform the 3D pointcloud to the keyframe's camera space\n pts4 = torch.cat([pts, torch.ones_like(pts[:, :1])], dim=1)\n transformed_pts = (est_w2c @ pts4.T).T[:, :3]\n # Project the 3D pointcloud to the keyframe's image space\n points_2d = torch.matmul(intrinsics, transformed_pts.transpose(0, 1))\n points_2d = points_2d.transpose(0, 1)\n points_z = points_2d[:, 2:] + 1e-5\n points_2d = points_2d / points_z\n projected_pts = points_2d[:, :2]\n # Filter out the points that are outside the image\n edge = 20\n mask = (projected_pts[:, 0] < width-edge)*(projected_pts[:, 0] > edge) * \\\n (projected_pts[:, 1] < height-edge)*(projected_pts[:, 1] > edge)\n mask = mask & (points_z[:, 0] > 0)\n # Compute the percentage of points that are inside the image\n percent_inside = mask.sum()/projected_pts.shape[0]\n list_keyframe.append(\n {'id': keyframeid, 'percent_inside': percent_inside})\n\n # Sort the keyframes based on the percentage of points that are inside the image\n list_keyframe = sorted(\n list_keyframe, key=lambda i: i['percent_inside'], reverse=True)\n # Select the keyframes with percentage of points inside the image > 0\n selected_keyframe_list = [keyframe_dict['id']\n for keyframe_dict in list_keyframe if keyframe_dict['percent_inside'] > 0.0]\n selected_keyframe_list = list(np.random.permutation(\n np.array(selected_keyframe_list))[:k])\n\n return selected_keyframe_list" }, { "identifier": "setup_camera", "path": "utils/recon_helpers.py", "snippet": "def setup_camera(w, h, k, w2c, near=0.01, far=100):\n fx, fy, cx, cy = k[0][0], k[1][1], k[0][2], k[1][2]\n w2c = torch.tensor(w2c).cuda().float()\n cam_center = torch.inverse(w2c)[:3, 3]\n w2c = w2c.unsqueeze(0).transpose(1, 2)\n opengl_proj = torch.tensor([[2 * fx / w, 0.0, -(w - 2 * cx) / w, 0.0],\n [0.0, 2 * fy / h, -(h - 2 * cy) / h, 0.0],\n [0.0, 0.0, far / (far - near), -(far * near) / (far - near)],\n [0.0, 0.0, 1.0, 0.0]]).cuda().float().unsqueeze(0).transpose(1, 2)\n full_proj = w2c.bmm(opengl_proj)\n cam = Camera(\n image_height=h,\n image_width=w,\n tanfovx=w / (2 * fx),\n tanfovy=h / (2 * fy),\n bg=torch.tensor([0, 0, 0], dtype=torch.float32, device=\"cuda\"),\n scale_modifier=1.0,\n viewmatrix=w2c,\n projmatrix=full_proj,\n sh_degree=0,\n campos=cam_center,\n prefiltered=False\n )\n return cam" }, { "identifier": "build_rotation", "path": "utils/slam_external.py", "snippet": "def build_rotation(q):\n norm = torch.sqrt(q[:, 0] * q[:, 0] + q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] + q[:, 3] * q[:, 3])\n q = q / norm[:, None]\n rot = torch.zeros((q.size(0), 3, 3), device='cuda')\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n rot[:, 0, 0] = 1 - 2 * (y * y + z * z)\n rot[:, 0, 1] = 2 * (x * y - r * z)\n rot[:, 0, 2] = 2 * (x * z + r * y)\n rot[:, 1, 0] = 2 * (x * y + r * z)\n rot[:, 1, 1] = 1 - 2 * (x * x + z * z)\n rot[:, 1, 2] = 2 * (y * z - r * x)\n rot[:, 2, 0] = 2 * (x * z - r * y)\n rot[:, 2, 1] = 2 * (y * z + r * x)\n rot[:, 2, 2] = 1 - 2 * (x * x + y * y)\n return rot" }, { "identifier": "prune_gaussians", "path": "utils/slam_external.py", "snippet": "def prune_gaussians(params, variables, optimizer, iter, prune_dict):\n if iter <= prune_dict['stop_after']:\n if (iter >= prune_dict['start_after']) and (iter % prune_dict['prune_every'] == 0):\n if iter == prune_dict['stop_after']:\n remove_threshold = prune_dict['final_removal_opacity_threshold']\n else:\n remove_threshold = prune_dict['removal_opacity_threshold']\n # Remove Gaussians with low opacity\n to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()\n # Remove Gaussians that are too big\n if iter >= prune_dict['remove_big_after']:\n big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']\n to_remove = torch.logical_or(to_remove, big_points_ws)\n params, variables = remove_points(to_remove, params, variables, optimizer)\n torch.cuda.empty_cache()\n \n # Reset Opacities for all Gaussians\n if iter > 0 and iter % prune_dict['reset_opacities_every'] == 0 and prune_dict['reset_opacities']:\n new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}\n params = update_params_and_optimizer(new_params, params, optimizer)\n \n return params, variables" }, { "identifier": "densify", "path": "utils/slam_external.py", "snippet": "def densify(params, variables, optimizer, iter, densify_dict):\n if iter <= densify_dict['stop_after']:\n variables = accumulate_mean2d_gradient(variables)\n grad_thresh = densify_dict['grad_thresh']\n if (iter >= densify_dict['start_after']) and (iter % densify_dict['densify_every'] == 0):\n grads = variables['means2D_gradient_accum'] / variables['denom']\n grads[grads.isnan()] = 0.0\n to_clone = torch.logical_and(grads >= grad_thresh, (\n torch.max(torch.exp(params['log_scales']), dim=1).values <= 0.01 * variables['scene_radius']))\n new_params = {k: v[to_clone] for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n\n padded_grad = torch.zeros(num_pts, device=\"cuda\")\n padded_grad[:grads.shape[0]] = grads\n to_split = torch.logical_and(padded_grad >= grad_thresh,\n torch.max(torch.exp(params['log_scales']), dim=1).values > 0.01 * variables[\n 'scene_radius'])\n n = densify_dict['num_to_split_into'] # number to split into\n new_params = {k: v[to_split].repeat(n, 1) for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n stds = torch.exp(params['log_scales'])[to_split].repeat(n, 3)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = build_rotation(params['unnorm_rotations'][to_split]).repeat(n, 1, 1)\n new_params['means3D'] += torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1)\n new_params['log_scales'] = torch.log(torch.exp(new_params['log_scales']) / (0.8 * n))\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n\n variables['means2D_gradient_accum'] = torch.zeros(num_pts, device=\"cuda\")\n variables['denom'] = torch.zeros(num_pts, device=\"cuda\")\n variables['max_2D_radius'] = torch.zeros(num_pts, device=\"cuda\")\n to_remove = torch.cat((to_split, torch.zeros(n * to_split.sum(), dtype=torch.bool, device=\"cuda\")))\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n if iter == densify_dict['stop_after']:\n remove_threshold = densify_dict['final_removal_opacity_threshold']\n else:\n remove_threshold = densify_dict['removal_opacity_threshold']\n to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()\n if iter >= densify_dict['remove_big_after']:\n big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']\n to_remove = torch.logical_or(to_remove, big_points_ws)\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n torch.cuda.empty_cache()\n\n # Reset Opacities for all Gaussians (This is not desired for mapping on only current frame)\n if iter > 0 and iter % densify_dict['reset_opacities_every'] == 0 and densify_dict['reset_opacities']:\n new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}\n params = update_params_and_optimizer(new_params, params, optimizer)\n\n return params, variables" }, { "identifier": "get_loss", "path": "scripts/splatam.py", "snippet": "def get_loss(params, curr_data, variables, iter_time_idx, loss_weights, use_sil_for_loss,\n sil_thres, use_l1,ignore_outlier_depth_loss, tracking=False, \n mapping=False, do_ba=False, plot_dir=None, visualize_tracking_loss=False, tracking_iteration=None):\n # Initialize Loss Dictionary\n losses = {}\n\n if tracking:\n # Get current frame Gaussians, where only the camera pose gets gradient\n transformed_pts = transform_to_frame(params, iter_time_idx, \n gaussians_grad=False,\n camera_grad=True)\n elif mapping:\n if do_ba:\n # Get current frame Gaussians, where both camera pose and Gaussians get gradient\n transformed_pts = transform_to_frame(params, iter_time_idx,\n gaussians_grad=True,\n camera_grad=True)\n else:\n # Get current frame Gaussians, where only the Gaussians get gradient\n transformed_pts = transform_to_frame(params, iter_time_idx,\n gaussians_grad=True,\n camera_grad=False)\n else:\n # Get current frame Gaussians, where only the Gaussians get gradient\n transformed_pts = transform_to_frame(params, iter_time_idx,\n gaussians_grad=True,\n camera_grad=False)\n\n # Initialize Render Variables\n rendervar = transformed_params2rendervar(params, transformed_pts)\n depth_sil_rendervar = transformed_params2depthplussilhouette(params, curr_data['w2c'],\n transformed_pts)\n\n # RGB Rendering\n rendervar['means2D'].retain_grad()\n im, radius, _, = Renderer(raster_settings=curr_data['cam'])(**rendervar)\n variables['means2D'] = rendervar['means2D'] # Gradient only accum from colour render for densification\n\n # Depth & Silhouette Rendering\n depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar)\n depth = depth_sil[0, :, :].unsqueeze(0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n depth_sq = depth_sil[2, :, :].unsqueeze(0)\n uncertainty = depth_sq - depth**2\n uncertainty = uncertainty.detach()\n\n # Mask with valid depth values (accounts for outlier depth values)\n nan_mask = (~torch.isnan(depth)) & (~torch.isnan(uncertainty))\n if ignore_outlier_depth_loss:\n depth_error = torch.abs(curr_data['depth'] - depth) * (curr_data['depth'] > 0)\n mask = (depth_error < 10*depth_error.median())\n mask = mask & (curr_data['depth'] > 0)\n else:\n mask = (curr_data['depth'] > 0)\n mask = mask & nan_mask\n # Mask with presence silhouette mask (accounts for empty space)\n if tracking and use_sil_for_loss:\n mask = mask & presence_sil_mask\n\n # Depth loss\n if use_l1:\n mask = mask.detach()\n if tracking:\n losses['depth'] = torch.abs(curr_data['depth'] - depth)[mask].sum()\n else:\n losses['depth'] = torch.abs(curr_data['depth'] - depth)[mask].mean()\n \n # RGB Loss\n if tracking and (use_sil_for_loss or ignore_outlier_depth_loss):\n color_mask = torch.tile(mask, (3, 1, 1))\n color_mask = color_mask.detach()\n losses['im'] = torch.abs(curr_data['im'] - im)[color_mask].sum()\n elif tracking:\n losses['im'] = torch.abs(curr_data['im'] - im).sum()\n else:\n losses['im'] = 0.8 * l1_loss_v1(im, curr_data['im']) + 0.2 * (1.0 - calc_ssim(im, curr_data['im']))\n\n # Visualize the Diff Images\n if tracking and visualize_tracking_loss:\n fig, ax = plt.subplots(2, 4, figsize=(12, 6))\n weighted_render_im = im * color_mask\n weighted_im = curr_data['im'] * color_mask\n weighted_render_depth = depth * mask\n weighted_depth = curr_data['depth'] * mask\n diff_rgb = torch.abs(weighted_render_im - weighted_im).mean(dim=0).detach().cpu()\n diff_depth = torch.abs(weighted_render_depth - weighted_depth).mean(dim=0).detach().cpu()\n viz_img = torch.clip(weighted_im.permute(1, 2, 0).detach().cpu(), 0, 1)\n ax[0, 0].imshow(viz_img)\n ax[0, 0].set_title(\"Weighted GT RGB\")\n viz_render_img = torch.clip(weighted_render_im.permute(1, 2, 0).detach().cpu(), 0, 1)\n ax[1, 0].imshow(viz_render_img)\n ax[1, 0].set_title(\"Weighted Rendered RGB\")\n ax[0, 1].imshow(weighted_depth[0].detach().cpu(), cmap=\"jet\", vmin=0, vmax=6)\n ax[0, 1].set_title(\"Weighted GT Depth\")\n ax[1, 1].imshow(weighted_render_depth[0].detach().cpu(), cmap=\"jet\", vmin=0, vmax=6)\n ax[1, 1].set_title(\"Weighted Rendered Depth\")\n ax[0, 2].imshow(diff_rgb, cmap=\"jet\", vmin=0, vmax=0.8)\n ax[0, 2].set_title(f\"Diff RGB, Loss: {torch.round(losses['im'])}\")\n ax[1, 2].imshow(diff_depth, cmap=\"jet\", vmin=0, vmax=0.8)\n ax[1, 2].set_title(f\"Diff Depth, Loss: {torch.round(losses['depth'])}\")\n ax[0, 3].imshow(presence_sil_mask.detach().cpu(), cmap=\"gray\")\n ax[0, 3].set_title(\"Silhouette Mask\")\n ax[1, 3].imshow(mask[0].detach().cpu(), cmap=\"gray\")\n ax[1, 3].set_title(\"Loss Mask\")\n # Turn off axis\n for i in range(2):\n for j in range(4):\n ax[i, j].axis('off')\n # Set Title\n fig.suptitle(f\"Tracking Iteration: {tracking_iteration}\", fontsize=16)\n # Figure Tight Layout\n fig.tight_layout()\n os.makedirs(plot_dir, exist_ok=True)\n plt.savefig(os.path.join(plot_dir, f\"tmp.png\"), bbox_inches='tight')\n plt.close()\n plot_img = cv2.imread(os.path.join(plot_dir, f\"tmp.png\"))\n cv2.imshow('Diff Images', plot_img)\n cv2.waitKey(1)\n ## Save Tracking Loss Viz\n # save_plot_dir = os.path.join(plot_dir, f\"tracking_%04d\" % iter_time_idx)\n # os.makedirs(save_plot_dir, exist_ok=True)\n # plt.savefig(os.path.join(save_plot_dir, f\"%04d.png\" % tracking_iteration), bbox_inches='tight')\n # plt.close()\n\n weighted_losses = {k: v * loss_weights[k] for k, v in losses.items()}\n loss = sum(weighted_losses.values())\n\n seen = radius > 0\n variables['max_2D_radius'][seen] = torch.max(radius[seen], variables['max_2D_radius'][seen])\n variables['seen'] = seen\n weighted_losses['loss'] = loss\n\n return loss, variables, weighted_losses" }, { "identifier": "initialize_optimizer", "path": "scripts/splatam.py", "snippet": "def initialize_optimizer(params, lrs_dict, tracking):\n lrs = lrs_dict\n param_groups = [{'params': [v], 'name': k, 'lr': lrs[k]} for k, v in params.items()]\n if tracking:\n return torch.optim.Adam(param_groups)\n else:\n return torch.optim.Adam(param_groups, lr=0.0, eps=1e-15)" }, { "identifier": "initialize_params", "path": "scripts/splatam.py", "snippet": "def initialize_params(init_pt_cld, num_frames, mean3_sq_dist):\n num_pts = init_pt_cld.shape[0]\n means3D = init_pt_cld[:, :3] # [num_gaussians, 3]\n unnorm_rots = np.tile([1, 0, 0, 0], (num_pts, 1)) # [num_gaussians, 3]\n logit_opacities = torch.zeros((num_pts, 1), dtype=torch.float, device=\"cuda\")\n params = {\n 'means3D': means3D,\n 'rgb_colors': init_pt_cld[:, 3:6],\n 'unnorm_rotations': unnorm_rots,\n 'logit_opacities': logit_opacities,\n 'log_scales': torch.tile(torch.log(torch.sqrt(mean3_sq_dist))[..., None], (1, 1)),\n }\n\n # Initialize a single gaussian trajectory to model the camera poses relative to the first frame\n cam_rots = np.tile([1, 0, 0, 0], (1, 1))\n cam_rots = np.tile(cam_rots[:, :, None], (1, 1, num_frames))\n params['cam_unnorm_rots'] = cam_rots\n params['cam_trans'] = np.zeros((1, 3, num_frames))\n\n for k, v in params.items():\n # Check if value is already a torch tensor\n if not isinstance(v, torch.Tensor):\n params[k] = torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True))\n else:\n params[k] = torch.nn.Parameter(v.cuda().float().contiguous().requires_grad_(True))\n\n variables = {'max_2D_radius': torch.zeros(params['means3D'].shape[0]).cuda().float(),\n 'means2D_gradient_accum': torch.zeros(params['means3D'].shape[0]).cuda().float(),\n 'denom': torch.zeros(params['means3D'].shape[0]).cuda().float(),\n 'timestep': torch.zeros(params['means3D'].shape[0]).cuda().float()}\n\n return params, variables" }, { "identifier": "initialize_camera_pose", "path": "scripts/splatam.py", "snippet": "def initialize_camera_pose(params, curr_time_idx, forward_prop):\n with torch.no_grad():\n if curr_time_idx > 1 and forward_prop:\n # Initialize the camera pose for the current frame based on a constant velocity model\n # Rotation\n prev_rot1 = F.normalize(params['cam_unnorm_rots'][..., curr_time_idx-1].detach())\n prev_rot2 = F.normalize(params['cam_unnorm_rots'][..., curr_time_idx-2].detach())\n new_rot = F.normalize(prev_rot1 + (prev_rot1 - prev_rot2))\n params['cam_unnorm_rots'][..., curr_time_idx] = new_rot.detach()\n # Translation\n prev_tran1 = params['cam_trans'][..., curr_time_idx-1].detach()\n prev_tran2 = params['cam_trans'][..., curr_time_idx-2].detach()\n new_tran = prev_tran1 + (prev_tran1 - prev_tran2)\n params['cam_trans'][..., curr_time_idx] = new_tran.detach()\n else:\n # Initialize the camera pose for the current frame\n params['cam_unnorm_rots'][..., curr_time_idx] = params['cam_unnorm_rots'][..., curr_time_idx-1].detach()\n params['cam_trans'][..., curr_time_idx] = params['cam_trans'][..., curr_time_idx-1].detach()\n \n return params" }, { "identifier": "get_pointcloud", "path": "scripts/splatam.py", "snippet": "def get_pointcloud(color, depth, intrinsics, w2c, transform_pts=True, \n mask=None, compute_mean_sq_dist=False, mean_sq_dist_method=\"projective\"):\n width, height = color.shape[2], color.shape[1]\n CX = intrinsics[0][2]\n CY = intrinsics[1][2]\n FX = intrinsics[0][0]\n FY = intrinsics[1][1]\n\n # Compute indices of pixels\n x_grid, y_grid = torch.meshgrid(torch.arange(width).cuda().float(), \n torch.arange(height).cuda().float(),\n indexing='xy')\n xx = (x_grid - CX)/FX\n yy = (y_grid - CY)/FY\n xx = xx.reshape(-1)\n yy = yy.reshape(-1)\n depth_z = depth[0].reshape(-1)\n\n # Initialize point cloud\n pts_cam = torch.stack((xx * depth_z, yy * depth_z, depth_z), dim=-1)\n if transform_pts:\n pix_ones = torch.ones(height * width, 1).cuda().float()\n pts4 = torch.cat((pts_cam, pix_ones), dim=1)\n c2w = torch.inverse(w2c)\n pts = (c2w @ pts4.T).T[:, :3]\n else:\n pts = pts_cam\n\n # Compute mean squared distance for initializing the scale of the Gaussians\n if compute_mean_sq_dist:\n if mean_sq_dist_method == \"projective\":\n # Projective Geometry (this is fast, farther -> larger radius)\n scale_gaussian = depth_z / ((FX + FY)/2)\n mean3_sq_dist = scale_gaussian**2\n else:\n raise ValueError(f\"Unknown mean_sq_dist_method {mean_sq_dist_method}\")\n \n # Colorize point cloud\n cols = torch.permute(color, (1, 2, 0)).reshape(-1, 3) # (C, H, W) -> (H, W, C) -> (H * W, C)\n point_cld = torch.cat((pts, cols), -1)\n\n # Select points based on mask\n if mask is not None:\n point_cld = point_cld[mask]\n if compute_mean_sq_dist:\n mean3_sq_dist = mean3_sq_dist[mask]\n\n if compute_mean_sq_dist:\n return point_cld, mean3_sq_dist\n else:\n return point_cld" }, { "identifier": "add_new_gaussians", "path": "scripts/splatam.py", "snippet": "def add_new_gaussians(params, variables, curr_data, sil_thres, time_idx, mean_sq_dist_method):\n # Silhouette Rendering\n transformed_pts = transform_to_frame(params, time_idx, gaussians_grad=False, camera_grad=False)\n depth_sil_rendervar = transformed_params2depthplussilhouette(params, curr_data['w2c'],\n transformed_pts)\n depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar)\n silhouette = depth_sil[1, :, :]\n non_presence_sil_mask = (silhouette < sil_thres)\n # Check for new foreground objects by using GT depth\n gt_depth = curr_data['depth'][0, :, :]\n render_depth = depth_sil[0, :, :]\n depth_error = torch.abs(gt_depth - render_depth) * (gt_depth > 0)\n non_presence_depth_mask = (render_depth > gt_depth) * (depth_error > 50*depth_error.median())\n # Determine non-presence mask\n non_presence_mask = non_presence_sil_mask | non_presence_depth_mask\n # Flatten mask\n non_presence_mask = non_presence_mask.reshape(-1)\n\n # Get the new frame Gaussians based on the Silhouette\n if torch.sum(non_presence_mask) > 0:\n # Get the new pointcloud in the world frame\n curr_cam_rot = torch.nn.functional.normalize(params['cam_unnorm_rots'][..., time_idx].detach())\n curr_cam_tran = params['cam_trans'][..., time_idx].detach()\n curr_w2c = torch.eye(4).cuda().float()\n curr_w2c[:3, :3] = build_rotation(curr_cam_rot)\n curr_w2c[:3, 3] = curr_cam_tran\n valid_depth_mask = (curr_data['depth'][0, :, :] > 0)\n non_presence_mask = non_presence_mask & valid_depth_mask.reshape(-1)\n new_pt_cld, mean3_sq_dist = get_pointcloud(curr_data['im'], curr_data['depth'], curr_data['intrinsics'], \n curr_w2c, mask=non_presence_mask, compute_mean_sq_dist=True,\n mean_sq_dist_method=mean_sq_dist_method)\n new_params = initialize_new_params(new_pt_cld, mean3_sq_dist)\n for k, v in new_params.items():\n params[k] = torch.nn.Parameter(torch.cat((params[k], v), dim=0).requires_grad_(True))\n num_pts = params['means3D'].shape[0]\n variables['means2D_gradient_accum'] = torch.zeros(num_pts, device=\"cuda\").float()\n variables['denom'] = torch.zeros(num_pts, device=\"cuda\").float()\n variables['max_2D_radius'] = torch.zeros(num_pts, device=\"cuda\").float()\n new_timestep = time_idx*torch.ones(new_pt_cld.shape[0],device=\"cuda\").float()\n variables['timestep'] = torch.cat((variables['timestep'],new_timestep),dim=0)\n\n return params, variables" } ]
import argparse import os import shutil import sys import time import json import cv2 import matplotlib.pyplot as plt import numpy as np import torch import torch.nn.functional as F import cyclonedds.idl as idl import cyclonedds.idl.annotations as annotate import cyclonedds.idl.types as types from pathlib import Path from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets.geometryutils import relative_transformation from utils.common_utils import seed_everything, save_params_ckpt, save_params from utils.eval_helpers import report_progress from utils.keyframe_selection import keyframe_selection_overlap from utils.recon_helpers import setup_camera from utils.slam_external import build_rotation, prune_gaussians, densify from scripts.splatam import get_loss, initialize_optimizer, initialize_params, initialize_camera_pose, get_pointcloud, add_new_gaussians from diff_gaussian_rasterization import GaussianRasterizer as Renderer from dataclasses import dataclass from cyclonedds.domain import DomainParticipant, Domain from cyclonedds.core import Qos, Policy from cyclonedds.sub import DataReader from cyclonedds.topic import Topic from cyclonedds.util import duration
13,634
curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'], config['tracking']['use_sil_for_loss'], config['tracking']['sil_thres'], config['tracking']['use_l1'], config['tracking']['ignore_outlier_depth_loss'], tracking=True, visualize_tracking_loss=config['tracking']['visualize_tracking_loss'], tracking_iteration=iter) # Backprop loss.backward() # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) with torch.no_grad(): # Save the best candidate rotation & translation if loss < current_min_loss: current_min_loss = loss candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() # Report Progress if config['report_iter_progress']: report_progress(params, tracking_curr_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) else: progress_bar.update(1) # Update the runtime numbers iter_end_time = time.time() tracking_iter_time_sum += iter_end_time - iter_start_time tracking_iter_time_count += 1 # Check if we should stop tracking iter += 1 if iter == num_iters_tracking: if losses['depth'] < config['tracking']['depth_loss_thres'] and config['tracking']['use_depth_loss_thres']: break elif config['tracking']['use_depth_loss_thres'] and not do_continue_slam: do_continue_slam = True progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") num_iters_tracking = 2*num_iters_tracking else: break progress_bar.close() # Copy over the best candidate rotation & translation with torch.no_grad(): params['cam_unnorm_rots'][..., time_idx] = candidate_cam_unnorm_rot params['cam_trans'][..., time_idx] = candidate_cam_tran elif time_idx > 0 and config['tracking']['use_gt_poses']: with torch.no_grad(): # Get the ground truth pose relative to frame 0 rel_w2c = curr_gt_w2c[-1] rel_w2c_rot = rel_w2c[:3, :3].unsqueeze(0).detach() rel_w2c_rot_quat = matrix_to_quaternion(rel_w2c_rot) rel_w2c_tran = rel_w2c[:3, 3].detach() # Update the camera parameters params['cam_unnorm_rots'][..., time_idx] = rel_w2c_rot_quat params['cam_trans'][..., time_idx] = rel_w2c_tran # Update the runtime numbers tracking_end_time = time.time() tracking_frame_time_sum += tracking_end_time - tracking_start_time tracking_frame_time_count += 1 if time_idx == 0 or (time_idx+1) % config['report_global_progress_every'] == 0: try: # Report Final Tracking Progress progress_bar = tqdm(range(1), desc=f"Tracking Result Time Step: {time_idx}") with torch.no_grad(): report_progress(params, tracking_curr_data, 1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) progress_bar.close() except: ckpt_output_dir = save_path.joinpath("checkpoints") os.makedirs(ckpt_output_dir, exist_ok=True) save_params_ckpt(params, ckpt_output_dir, time_idx) print('Failed to evaluate trajectory.') # Densification & KeyFrame-based Mapping if time_idx == 0 or (time_idx+1) % config['map_every'] == 0: # Densification if config['mapping']['add_new_gaussians'] and time_idx > 0: densify_curr_data = {'cam': densify_cam, 'im': densify_color, 'depth': densify_depth, 'id': time_idx, 'intrinsics': densify_intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette params, variables = add_new_gaussians(params, variables, densify_curr_data, config['mapping']['sil_thres'], time_idx, config['mean_sq_dist_method']) with torch.no_grad(): # Get the current estimated rotation & translation curr_cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx].detach()) curr_cam_tran = params['cam_trans'][..., time_idx].detach() curr_w2c = torch.eye(4).cuda().float() curr_w2c[:3, :3] = build_rotation(curr_cam_rot) curr_w2c[:3, 3] = curr_cam_tran # Select Keyframes for Mapping num_keyframes = config['mapping_window_size']-2
""" Script to stream RGB-D data from the NeRFCapture iOS App & build a Gaussian Splat on the fly using SplaTAM. The CycloneDDS parts of this script are adapted from the Instant-NGP Repo: https://github.com/NVlabs/instant-ngp/blob/master/scripts/nerfcapture2nerf.py """ #!/usr/bin/env python3 _BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--config", default="./configs/iphone/online_demo.py", type=str, help="Path to config file.") return parser.parse_args() # DDS # ================================================================================================== @dataclass @annotate.final @annotate.autoid("sequential") class SplatCaptureFrame(idl.IdlStruct, typename="SplatCaptureData.SplatCaptureFrame"): id: types.uint32 annotate.key("id") timestamp: types.float64 fl_x: types.float32 fl_y: types.float32 cx: types.float32 cy: types.float32 transform_matrix: types.array[types.float32, 16] width: types.uint32 height: types.uint32 image: types.sequence[types.uint8] has_depth: bool depth_width: types.uint32 depth_height: types.uint32 depth_scale: types.float32 depth_image: types.sequence[types.uint8] dds_config = """<?xml version="1.0" encoding="UTF-8" ?> \ <CycloneDDS xmlns="https://cdds.io/config" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://cdds.io/config https://raw.githubusercontent.com/eclipse-cyclonedds/cyclonedds/master/etc/cyclonedds.xsd"> \ <Domain id="any"> \ <Internal> \ <MinimumSocketReceiveBufferSize>10MB</MinimumSocketReceiveBufferSize> \ </Internal> \ <Tracing> \ <Verbosity>config</Verbosity> \ <OutputFile>stdout</OutputFile> \ </Tracing> \ </Domain> \ </CycloneDDS> \ """ # ================================================================================================== def dataset_capture_loop(reader: DataReader, save_path: Path, overwrite: bool, n_frames: int, depth_scale: float, config: dict): rgb_path = save_path.joinpath("rgb") if rgb_path.exists(): if overwrite: # Prompt user to confirm deletion if (input(f"warning! folder '{save_path}' will be deleted/replaced. continue? (Y/n)").lower().strip()+"y")[:1] != "y": sys.exit(1) shutil.rmtree(save_path) else: print(f"rgb_path {rgb_path} already exists. Please use overwrite=True in config if you want to overwrite.") sys.exit(1) print("Waiting for frames...") # Make directory images_dir = save_path.joinpath("rgb") manifest = { "fl_x": 0.0, "fl_y": 0.0, "cx": 0.0, "cy": 0.0, "w": 0.0, "h": 0.0, "frames": [] } total_frames = 0 # Total frames received time_idx = total_frames num_frames = n_frames # Total frames desired # Initialize list to keep track of Keyframes keyframe_list = [] keyframe_time_indices = [] # Init Variables to keep track of ARkit poses and runtimes gt_w2c_all_frames = [] tracking_iter_time_sum = 0 tracking_iter_time_count = 0 mapping_iter_time_sum = 0 mapping_iter_time_count = 0 tracking_frame_time_sum = 0 tracking_frame_time_count = 0 mapping_frame_time_sum = 0 mapping_frame_time_count = 0 P = torch.tensor( [ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1] ] ).float() # Start DDS Loop while True: sample = reader.read_next() # Get frame from NeRFCapture if sample: print(f"{total_frames + 1}/{n_frames} frames received") if total_frames == 0: save_path.mkdir(parents=True, exist_ok=True) images_dir.mkdir(exist_ok=True) manifest["w"] = sample.width manifest["h"] = sample.height manifest["cx"] = sample.cx manifest["cy"] = sample.cy manifest["fl_x"] = sample.fl_x manifest["fl_y"] = sample.fl_y manifest["integer_depth_scale"] = float(depth_scale)/65535.0 if sample.has_depth: depth_dir = save_path.joinpath("depth") depth_dir.mkdir(exist_ok=True) # RGB image = np.asarray(sample.image, dtype=np.uint8).reshape((sample.height, sample.width, 3)) cv2.imwrite(str(images_dir.joinpath(f"{total_frames}.png")), cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) # Depth if avaiable save_depth = None if sample.has_depth: # Save Depth Image save_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) save_depth = (save_depth*65535/float(depth_scale)).astype(np.uint16) save_depth = cv2.resize(save_depth, dsize=( sample.width, sample.height), interpolation=cv2.INTER_NEAREST) cv2.imwrite(str(depth_dir.joinpath(f"{total_frames}.png")), save_depth) # Load Depth Image for SplaTAM curr_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) else: print("No Depth Image Received. Please make sure that the NeRFCapture App \ mentions Depth Supported on the top right corner. Skipping Frame...") continue # ARKit Poses for saving dataset X_WV = np.asarray(sample.transform_matrix, dtype=np.float32).reshape((4, 4)).T frame = { "transform_matrix": X_WV.tolist(), "file_path": f"rgb/{total_frames}.png", "fl_x": sample.fl_x, "fl_y": sample.fl_y, "cx": sample.cx, "cy": sample.cy, "w": sample.width, "h": sample.height } if save_depth is not None: frame["depth_path"] = f"depth/{total_frames}.png" manifest["frames"].append(frame) # Convert ARKit Pose to GradSLAM format gt_pose = torch.from_numpy(X_WV).float() gt_pose = P @ gt_pose @ P.T if time_idx == 0: first_abs_gt_pose = gt_pose gt_pose = relative_transformation(first_abs_gt_pose.unsqueeze(0), gt_pose.unsqueeze(0), orthogonal_rotations=False) gt_w2c = torch.linalg.inv(gt_pose[0]) gt_w2c_all_frames.append(gt_w2c) # Initialize Tracking & Mapping Resolution Data color = cv2.resize(image, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_LINEAR) depth = cv2.resize(curr_depth, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_NEAREST) depth = np.expand_dims(depth, -1) color = torch.from_numpy(color).cuda().float() color = color.permute(2, 0, 1) / 255 depth = torch.from_numpy(depth).cuda().float() depth = depth.permute(2, 0, 1) if time_idx == 0: intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() intrinsics = intrinsics / config['data']['downscale_factor'] intrinsics[2, 2] = 1.0 first_frame_w2c = torch.eye(4).cuda().float() cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Densification Resolution Data densify_color = cv2.resize(image, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_LINEAR) densify_depth = cv2.resize(curr_depth, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_NEAREST) densify_depth = np.expand_dims(densify_depth, -1) densify_color = torch.from_numpy(densify_color).cuda().float() densify_color = densify_color.permute(2, 0, 1) / 255 densify_depth = torch.from_numpy(densify_depth).cuda().float() densify_depth = densify_depth.permute(2, 0, 1) if time_idx == 0: densify_intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() densify_intrinsics = densify_intrinsics / config['data']['densify_downscale_factor'] densify_intrinsics[2, 2] = 1.0 densify_cam = setup_camera(densify_color.shape[2], densify_color.shape[1], densify_intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'], config['tracking']['use_sil_for_loss'], config['tracking']['sil_thres'], config['tracking']['use_l1'], config['tracking']['ignore_outlier_depth_loss'], tracking=True, visualize_tracking_loss=config['tracking']['visualize_tracking_loss'], tracking_iteration=iter) # Backprop loss.backward() # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) with torch.no_grad(): # Save the best candidate rotation & translation if loss < current_min_loss: current_min_loss = loss candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() # Report Progress if config['report_iter_progress']: report_progress(params, tracking_curr_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) else: progress_bar.update(1) # Update the runtime numbers iter_end_time = time.time() tracking_iter_time_sum += iter_end_time - iter_start_time tracking_iter_time_count += 1 # Check if we should stop tracking iter += 1 if iter == num_iters_tracking: if losses['depth'] < config['tracking']['depth_loss_thres'] and config['tracking']['use_depth_loss_thres']: break elif config['tracking']['use_depth_loss_thres'] and not do_continue_slam: do_continue_slam = True progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") num_iters_tracking = 2*num_iters_tracking else: break progress_bar.close() # Copy over the best candidate rotation & translation with torch.no_grad(): params['cam_unnorm_rots'][..., time_idx] = candidate_cam_unnorm_rot params['cam_trans'][..., time_idx] = candidate_cam_tran elif time_idx > 0 and config['tracking']['use_gt_poses']: with torch.no_grad(): # Get the ground truth pose relative to frame 0 rel_w2c = curr_gt_w2c[-1] rel_w2c_rot = rel_w2c[:3, :3].unsqueeze(0).detach() rel_w2c_rot_quat = matrix_to_quaternion(rel_w2c_rot) rel_w2c_tran = rel_w2c[:3, 3].detach() # Update the camera parameters params['cam_unnorm_rots'][..., time_idx] = rel_w2c_rot_quat params['cam_trans'][..., time_idx] = rel_w2c_tran # Update the runtime numbers tracking_end_time = time.time() tracking_frame_time_sum += tracking_end_time - tracking_start_time tracking_frame_time_count += 1 if time_idx == 0 or (time_idx+1) % config['report_global_progress_every'] == 0: try: # Report Final Tracking Progress progress_bar = tqdm(range(1), desc=f"Tracking Result Time Step: {time_idx}") with torch.no_grad(): report_progress(params, tracking_curr_data, 1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) progress_bar.close() except: ckpt_output_dir = save_path.joinpath("checkpoints") os.makedirs(ckpt_output_dir, exist_ok=True) save_params_ckpt(params, ckpt_output_dir, time_idx) print('Failed to evaluate trajectory.') # Densification & KeyFrame-based Mapping if time_idx == 0 or (time_idx+1) % config['map_every'] == 0: # Densification if config['mapping']['add_new_gaussians'] and time_idx > 0: densify_curr_data = {'cam': densify_cam, 'im': densify_color, 'depth': densify_depth, 'id': time_idx, 'intrinsics': densify_intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette params, variables = add_new_gaussians(params, variables, densify_curr_data, config['mapping']['sil_thres'], time_idx, config['mean_sq_dist_method']) with torch.no_grad(): # Get the current estimated rotation & translation curr_cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx].detach()) curr_cam_tran = params['cam_trans'][..., time_idx].detach() curr_w2c = torch.eye(4).cuda().float() curr_w2c[:3, :3] = build_rotation(curr_cam_rot) curr_w2c[:3, 3] = curr_cam_tran # Select Keyframes for Mapping num_keyframes = config['mapping_window_size']-2
selected_keyframes = keyframe_selection_overlap(depth, curr_w2c, intrinsics, keyframe_list[:-1], num_keyframes)
5
2023-11-30 20:26:47+00:00
16k
zhyever/PatchFusion
ControlNet/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ControlNet/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ControlNet/ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ControlNet/ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ControlNet/ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ControlNet/ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ControlNet/ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ControlNet/ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ControlNet/ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ControlNet/ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ControlNet/ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ControlNet/ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ControlNet/ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ControlNet/ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ControlNet/ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ControlNet/ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ControlNet/ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ControlNet/ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ControlNet.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ControlNet.ldm.modules.ema import LitEma from ControlNet.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ControlNet.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ControlNet.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ControlNet.ldm.models.diffusion.ddim import DDIMSampler
12,548
assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
2
2023-12-04 08:43:15+00:00
16k
baaivision/GeoDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,126
): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if ( quantize_denoised
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if ( quantize_denoised
and not isinstance(self.first_stage_model, AutoencoderKL)
0
2023-12-01 01:59:42+00:00
16k
lucidrains/meshgpt-pytorch
meshgpt_pytorch/trainer.py
[ { "identifier": "custom_collate", "path": "meshgpt_pytorch/data.py", "snippet": "def custom_collate(data, pad_id = -1):\n is_dict = isinstance(first(data), dict)\n\n if is_dict:\n keys = first(data).keys()\n data = [d.values() for d in data]\n\n output = []\n\n for datum in zip(*data):\n if is_tensor(first(datum)):\n datum = pad_sequence(datum, batch_first = True, padding_value = pad_id)\n else:\n datum = list(datum)\n\n output.append(datum)\n\n output = tuple(output)\n\n if is_dict:\n output = dict(zip(keys, output))\n\n return output" }, { "identifier": "__version__", "path": "meshgpt_pytorch/version.py", "snippet": "" }, { "identifier": "MeshAutoencoder", "path": "meshgpt_pytorch/meshgpt_pytorch.py", "snippet": "class MeshAutoencoder(Module):\n @beartype\n def __init__(\n self,\n num_discrete_coors = 128,\n coor_continuous_range: Tuple[float, float] = (-1., 1.),\n dim_coor_embed = 64,\n num_discrete_area = 128,\n dim_area_embed = 16,\n num_discrete_normals = 128,\n dim_normal_embed = 64,\n num_discrete_angle = 128,\n dim_angle_embed = 16,\n encoder_dims_through_depth: Tuple[int, ...] = (\n 64, 128, 256, 256, 576\n ),\n init_decoder_conv_kernel = 7,\n decoder_dims_through_depth: Tuple[int, ...] = (\n 128, 128, 128, 128,\n 192, 192, 192, 192,\n 256, 256, 256, 256, 256, 256,\n 384, 384, 384\n ),\n dim_codebook = 192,\n num_quantizers = 2, # or 'D' in the paper\n codebook_size = 16384, # they use 16k, shared codebook between layers\n use_residual_lfq = True, # whether to use the latest lookup-free quantization\n rq_kwargs: dict = dict(\n quantize_dropout = True,\n quantize_dropout_cutoff_index = 1,\n quantize_dropout_multiple_of = 1,\n ),\n rvq_kwargs: dict = dict(\n kmeans_init = True,\n threshold_ema_dead_code = 2,\n ),\n rlfq_kwargs: dict = dict(\n frac_per_sample_entropy = 1.\n ),\n rvq_stochastic_sample_codes = True,\n sageconv_kwargs: dict = dict(\n normalize = True,\n project = True\n ),\n commit_loss_weight = 0.1,\n bin_smooth_blur_sigma = 0.4, # they blur the one hot discretized coordinate positions\n attn_encoder_depth = 0,\n attn_decoder_depth = 0,\n local_attn_kwargs: dict = dict(\n dim_head = 32,\n heads = 8\n ),\n local_attn_window_size = 64,\n linear_attn_kwargs: dict = dict(\n dim_head = 8,\n heads = 16\n ),\n use_linear_attn = True,\n pad_id = -1,\n flash_attn = True,\n sageconv_dropout = 0.,\n attn_dropout = 0.,\n ff_dropout = 0.,\n resnet_dropout = 0,\n checkpoint_quantizer = False\n ):\n super().__init__()\n\n # main face coordinate embedding\n\n self.num_discrete_coors = num_discrete_coors\n self.coor_continuous_range = coor_continuous_range\n\n self.discretize_face_coords = partial(discretize, num_discrete = num_discrete_coors, continuous_range = coor_continuous_range)\n self.coor_embed = nn.Embedding(num_discrete_coors, dim_coor_embed)\n\n # derived feature embedding\n\n self.discretize_angle = partial(discretize, num_discrete = num_discrete_angle, continuous_range = (0., pi))\n self.angle_embed = nn.Embedding(num_discrete_angle, dim_angle_embed)\n\n lo, hi = coor_continuous_range\n self.discretize_area = partial(discretize, num_discrete = num_discrete_area, continuous_range = (0., (hi - lo) ** 2))\n self.area_embed = nn.Embedding(num_discrete_area, dim_area_embed)\n\n self.discretize_normals = partial(discretize, num_discrete = num_discrete_normals, continuous_range = coor_continuous_range)\n self.normal_embed = nn.Embedding(num_discrete_normals, dim_normal_embed)\n\n # attention related\n\n attn_kwargs = dict(\n causal = False,\n prenorm = True,\n dropout = attn_dropout,\n window_size = local_attn_window_size,\n )\n\n # initial dimension\n\n init_dim = dim_coor_embed * 9 + dim_angle_embed * 3 + dim_normal_embed * 3 + dim_area_embed\n\n # project into model dimension\n\n self.project_in = nn.Linear(init_dim, dim_codebook)\n\n # initial sage conv\n\n sageconv_kwargs = {**sageconv_kwargs, 'sageconv_dropout' : sageconv_dropout}\n\n init_encoder_dim, *encoder_dims_through_depth = encoder_dims_through_depth\n curr_dim = init_encoder_dim\n\n self.init_sage_conv = SAGEConv(dim_codebook, init_encoder_dim, **sageconv_kwargs)\n\n self.init_encoder_act_and_norm = nn.Sequential(\n nn.SiLU(),\n nn.LayerNorm(init_encoder_dim)\n )\n\n self.encoders = ModuleList([])\n\n for dim_layer in encoder_dims_through_depth:\n sage_conv = SAGEConv(\n curr_dim,\n dim_layer,\n **sageconv_kwargs\n )\n\n self.encoders.append(sage_conv)\n curr_dim = dim_layer\n\n self.encoder_attn_blocks = ModuleList([])\n\n for _ in range(attn_encoder_depth):\n self.encoder_attn_blocks.append(nn.ModuleList([\n TaylorSeriesLinearAttn(curr_dim, prenorm = True, **linear_attn_kwargs) if use_linear_attn else None,\n LocalMHA(dim = curr_dim, **attn_kwargs, **local_attn_kwargs),\n nn.Sequential(RMSNorm(curr_dim), FeedForward(curr_dim, glu = True, dropout = ff_dropout))\n ]))\n\n # residual quantization\n\n self.codebook_size = codebook_size\n self.num_quantizers = num_quantizers\n\n self.project_dim_codebook = nn.Linear(curr_dim, dim_codebook * 3)\n\n if use_residual_lfq:\n self.quantizer = ResidualLFQ(\n dim = dim_codebook,\n num_quantizers = num_quantizers,\n codebook_size = codebook_size,\n commitment_loss_weight = 1.,\n **rlfq_kwargs,\n **rq_kwargs\n )\n else:\n self.quantizer = ResidualVQ(\n dim = dim_codebook,\n num_quantizers = num_quantizers,\n codebook_size = codebook_size,\n shared_codebook = True,\n commitment_weight = 1.,\n stochastic_sample_codes = rvq_stochastic_sample_codes,\n **rvq_kwargs,\n **rq_kwargs\n )\n\n self.checkpoint_quantizer = checkpoint_quantizer # whether to memory checkpoint the quantizer\n\n self.pad_id = pad_id # for variable lengthed faces, padding quantized ids will be set to this value\n\n # decoder\n\n decoder_input_dim = dim_codebook * 3\n\n self.decoder_attn_blocks = ModuleList([])\n\n for _ in range(attn_decoder_depth):\n self.decoder_attn_blocks.append(nn.ModuleList([\n TaylorSeriesLinearAttn(decoder_input_dim, prenorm = True, **linear_attn_kwargs) if use_linear_attn else None,\n LocalMHA(dim = decoder_input_dim, **attn_kwargs, **local_attn_kwargs),\n nn.Sequential(RMSNorm(decoder_input_dim), FeedForward(decoder_input_dim, glu = True, dropout = ff_dropout))\n ]))\n\n init_decoder_dim, *decoder_dims_through_depth = decoder_dims_through_depth\n curr_dim = init_decoder_dim\n\n assert is_odd(init_decoder_conv_kernel)\n\n self.init_decoder_conv = nn.Sequential(\n nn.Conv1d(dim_codebook * 3, init_decoder_dim, kernel_size = init_decoder_conv_kernel, padding = init_decoder_conv_kernel // 2),\n nn.SiLU(),\n Rearrange('b c n -> b n c'),\n nn.LayerNorm(init_decoder_dim),\n Rearrange('b n c -> b c n')\n )\n\n self.decoders = ModuleList([])\n\n for dim_layer in decoder_dims_through_depth:\n resnet_block = ResnetBlock(curr_dim, dim_layer, dropout = resnet_dropout)\n\n self.decoders.append(resnet_block)\n curr_dim = dim_layer\n\n self.to_coor_logits = nn.Sequential(\n nn.Linear(curr_dim, num_discrete_coors * 9),\n Rearrange('... (v c) -> ... v c', v = 9)\n )\n\n # loss related\n\n self.commit_loss_weight = commit_loss_weight\n self.bin_smooth_blur_sigma = bin_smooth_blur_sigma\n\n @beartype\n def encode(\n self,\n *,\n vertices: TensorType['b', 'nv', 3, float],\n faces: TensorType['b', 'nf', 3, int],\n face_edges: TensorType['b', 'e', 2, int],\n face_mask: TensorType['b', 'nf', bool],\n face_edges_mask: TensorType['b', 'e', bool],\n return_face_coordinates = False\n ):\n \"\"\"\n einops:\n b - batch\n nf - number of faces\n nv - number of vertices (3)\n c - coordinates (3)\n d - embed dim\n \"\"\"\n\n batch, num_vertices, num_coors, device = *vertices.shape, vertices.device\n _, num_faces, _ = faces.shape\n\n face_without_pad = faces.masked_fill(~rearrange(face_mask, 'b nf -> b nf 1'), 0)\n\n faces_vertices = repeat(face_without_pad, 'b nf nv -> b nf nv c', c = num_coors)\n vertices = repeat(vertices, 'b nv c -> b nf nv c', nf = num_faces)\n\n # continuous face coords\n\n face_coords = vertices.gather(-2, faces_vertices)\n\n # compute derived features and embed\n\n derived_features = get_derived_face_features(face_coords)\n\n discrete_angle = self.discretize_angle(derived_features['angles'])\n angle_embed = self.angle_embed(discrete_angle)\n\n discrete_area = self.discretize_area(derived_features['area'])\n area_embed = self.area_embed(discrete_area)\n\n discrete_normal = self.discretize_normals(derived_features['normals'])\n normal_embed = self.normal_embed(discrete_normal)\n\n # discretize vertices for face coordinate embedding\n\n discrete_face_coords = self.discretize_face_coords(face_coords)\n discrete_face_coords = rearrange(discrete_face_coords, 'b nf nv c -> b nf (nv c)') # 9 coordinates per face\n\n face_coor_embed = self.coor_embed(discrete_face_coords)\n face_coor_embed = rearrange(face_coor_embed, 'b nf c d -> b nf (c d)')\n\n # combine all features and project into model dimension\n\n face_embed, _ = pack([face_coor_embed, angle_embed, area_embed, normal_embed], 'b nf *')\n face_embed = self.project_in(face_embed)\n\n # handle variable lengths by using masked_select and masked_scatter\n\n # first handle edges\n # needs to be offset by number of faces for each batch\n\n face_index_offsets = reduce(face_mask.long(), 'b nf -> b', 'sum')\n face_index_offsets = F.pad(face_index_offsets.cumsum(dim = 0), (1, -1), value = 0)\n face_index_offsets = rearrange(face_index_offsets, 'b -> b 1 1')\n\n face_edges = face_edges + face_index_offsets\n face_edges = face_edges[face_edges_mask]\n face_edges = rearrange(face_edges, 'be ij -> ij be')\n\n # next prepare the face_mask for using masked_select and masked_scatter\n\n orig_face_embed_shape = face_embed.shape[:2]\n\n face_embed = face_embed[face_mask]\n\n # initial sage conv followed by activation and norm\n\n face_embed = self.init_sage_conv(face_embed, face_edges)\n\n face_embed = self.init_encoder_act_and_norm(face_embed)\n\n for conv in self.encoders:\n face_embed = conv(face_embed, face_edges)\n\n shape = (*orig_face_embed_shape, face_embed.shape[-1])\n\n face_embed = face_embed.new_zeros(shape).masked_scatter(rearrange(face_mask, '... -> ... 1'), face_embed)\n\n for linear_attn, attn, ff in self.encoder_attn_blocks:\n if exists(linear_attn):\n face_embed = linear_attn(face_embed, mask = face_mask) + face_embed\n\n face_embed = attn(face_embed, mask = face_mask) + face_embed\n face_embed = ff(face_embed) + face_embed\n\n if not return_face_coordinates:\n return face_embed\n\n return face_embed, discrete_face_coords\n\n @beartype\n def quantize(\n self,\n *,\n faces: TensorType['b', 'nf', 3, int],\n face_mask: TensorType['b', 'n', bool],\n face_embed: TensorType['b', 'nf', 'd', float],\n pad_id = None,\n rvq_sample_codebook_temp = 1.\n ):\n pad_id = default(pad_id, self.pad_id)\n batch, num_faces, device = *faces.shape[:2], faces.device\n\n max_vertex_index = faces.amax()\n num_vertices = int(max_vertex_index.item() + 1)\n\n face_embed = self.project_dim_codebook(face_embed)\n face_embed = rearrange(face_embed, 'b nf (nv d) -> b nf nv d', nv = 3)\n\n vertex_dim = face_embed.shape[-1]\n vertices = torch.zeros((batch, num_vertices, vertex_dim), device = device)\n\n # create pad vertex, due to variable lengthed faces\n\n pad_vertex_id = num_vertices\n vertices = pad_at_dim(vertices, (0, 1), dim = -2, value = 0.)\n\n faces = faces.masked_fill(~rearrange(face_mask, 'b n -> b n 1'), pad_vertex_id)\n\n # prepare for scatter mean\n\n faces_with_dim = repeat(faces, 'b nf nv -> b (nf nv) d', d = vertex_dim)\n\n face_embed = rearrange(face_embed, 'b ... d -> b (...) d')\n\n # scatter mean\n\n averaged_vertices = scatter_mean(vertices, faces_with_dim, face_embed, dim = -2)\n\n # mask out null vertex token\n\n mask = torch.ones((batch, num_vertices + 1), device = device, dtype = torch.bool)\n mask[:, -1] = False\n\n # rvq specific kwargs\n\n quantize_kwargs = dict(mask = mask)\n\n if isinstance(self.quantizer, ResidualVQ):\n quantize_kwargs.update(sample_codebook_temp = rvq_sample_codebook_temp)\n\n # a quantize function that makes it memory checkpointable\n\n def quantize_wrapper_fn(inp):\n unquantized, quantize_kwargs = inp\n return self.quantizer(unquantized, **quantize_kwargs)\n\n # maybe checkpoint the quantize fn\n\n if self.checkpoint_quantizer:\n quantize_wrapper_fn = partial(checkpoint, quantize_wrapper_fn, use_reentrant = False)\n\n # residual VQ\n\n quantized, codes, commit_loss = quantize_wrapper_fn((averaged_vertices, quantize_kwargs))\n\n # gather quantized vertexes back to faces for decoding\n # now the faces have quantized vertices\n\n face_embed_output = quantized.gather(-2, faces_with_dim)\n face_embed_output = rearrange(face_embed_output, 'b (nf nv) d -> b nf (nv d)', nv = 3)\n\n # vertex codes also need to be gathered to be organized by face sequence\n # for autoregressive learning\n\n faces_with_quantized_dim = repeat(faces, 'b nf nv -> b (nf nv) q', q = self.num_quantizers)\n codes_output = codes.gather(-2, faces_with_quantized_dim)\n\n # make sure codes being outputted have this padding\n\n face_mask = repeat(face_mask, 'b nf -> b (nf nv) 1', nv = 3)\n codes_output = codes_output.masked_fill(~face_mask, self.pad_id)\n\n # output quantized, codes, as well as commitment loss\n\n return face_embed_output, codes_output, commit_loss\n\n @beartype\n def decode(\n self,\n quantized: TensorType['b', 'n', 'd', float],\n face_mask: TensorType['b', 'n', bool]\n ):\n conv_face_mask = rearrange(face_mask, 'b n -> b 1 n')\n\n x = quantized\n\n for linear_attn, attn, ff in self.decoder_attn_blocks:\n if exists(linear_attn):\n x = linear_attn(x, mask = face_mask) + x\n\n x = attn(x, mask = face_mask) + x\n x = ff(x) + x\n\n x = rearrange(x, 'b n d -> b d n')\n\n x = x.masked_fill(~conv_face_mask, 0.)\n x = self.init_decoder_conv(x)\n\n for resnet_block in self.decoders:\n x = resnet_block(x, mask = conv_face_mask)\n\n return rearrange(x, 'b d n -> b n d')\n\n @beartype\n @torch.no_grad()\n def decode_from_codes_to_faces(\n self,\n codes: Tensor,\n face_mask: Optional[TensorType['b', 'n', bool]] = None,\n return_discrete_codes = False\n ):\n codes = rearrange(codes, 'b ... -> b (...)')\n\n if not exists(face_mask):\n face_mask = reduce(codes != self.pad_id, 'b (nf nv q) -> b nf', 'all', nv = 3, q = self.num_quantizers)\n\n # handle different code shapes\n\n codes = rearrange(codes, 'b (n q) -> b n q', q = self.num_quantizers)\n\n # decode\n\n quantized = self.quantizer.get_output_from_indices(codes)\n quantized = rearrange(quantized, 'b (nf nv) d -> b nf (nv d)', nv = 3)\n\n decoded = self.decode(\n quantized,\n face_mask = face_mask\n )\n\n decoded = decoded.masked_fill(~face_mask[..., None], 0.)\n pred_face_coords = self.to_coor_logits(decoded)\n\n pred_face_coords = pred_face_coords.argmax(dim = -1)\n\n pred_face_coords = rearrange(pred_face_coords, '... (v c) -> ... v c', v = 3)\n\n # back to continuous space\n\n continuous_coors = undiscretize(\n pred_face_coords,\n num_discrete = self.num_discrete_coors,\n continuous_range = self.coor_continuous_range\n )\n\n # mask out with nan\n\n continuous_coors = continuous_coors.masked_fill(~rearrange(face_mask, 'b nf -> b nf 1 1'), float('nan'))\n\n if not return_discrete_codes:\n return continuous_coors, face_mask\n\n return continuous_coors, pred_face_coords, face_mask\n\n @torch.no_grad()\n def tokenize(self, vertices, faces, face_edges = None, **kwargs):\n assert 'return_codes' not in kwargs\n\n inputs = [vertices, faces, face_edges]\n inputs = [*filter(exists, inputs)]\n ndims = {i.ndim for i in inputs}\n\n assert len(ndims) == 1\n batch_less = first(list(ndims)) == 2\n\n if batch_less:\n inputs = [rearrange(i, '... -> 1 ...') for i in inputs]\n\n input_kwargs = dict(zip(['vertices', 'faces', 'face_edges'], inputs))\n\n self.eval()\n\n codes = self.forward(\n **input_kwargs,\n return_codes = True,\n **kwargs\n )\n\n if batch_less:\n codes = rearrange(codes, '1 ... -> ...')\n\n return codes\n\n @beartype\n def forward(\n self,\n *,\n vertices: TensorType['b', 'nv', 3, float],\n faces: TensorType['b', 'nf', 3, int],\n face_edges: Optional[TensorType['b', 'e', 2, int]] = None,\n return_codes = False,\n return_loss_breakdown = False,\n return_recon_faces = False,\n only_return_recon_faces = False,\n rvq_sample_codebook_temp = 1.\n ):\n if not exists(face_edges):\n face_edges = derive_face_edges_from_faces(faces, pad_id = self.pad_id)\n\n num_faces, num_face_edges, device = faces.shape[1], face_edges.shape[1], faces.device\n\n face_mask = reduce(faces != self.pad_id, 'b nf c -> b nf', 'all')\n face_edges_mask = reduce(face_edges != self.pad_id, 'b e ij -> b e', 'all')\n\n encoded, face_coordinates = self.encode(\n vertices = vertices,\n faces = faces,\n face_edges = face_edges,\n face_edges_mask = face_edges_mask,\n face_mask = face_mask,\n return_face_coordinates = True\n )\n\n quantized, codes, commit_loss = self.quantize(\n face_embed = encoded,\n faces = faces,\n face_mask = face_mask,\n rvq_sample_codebook_temp = rvq_sample_codebook_temp\n )\n\n if return_codes:\n assert not return_recon_faces, 'cannot return reconstructed faces when just returning raw codes'\n\n codes = codes.masked_fill(~repeat(face_mask, 'b nf -> b (nf 3) 1'), self.pad_id)\n return codes\n\n decode = self.decode(\n quantized,\n face_mask = face_mask\n )\n\n pred_face_coords = self.to_coor_logits(decode)\n\n # compute reconstructed faces if needed\n\n if return_recon_faces or only_return_recon_faces:\n\n recon_faces = undiscretize(\n pred_face_coords.argmax(dim = -1),\n num_discrete = self.num_discrete_coors,\n continuous_range = self.coor_continuous_range,\n )\n\n recon_faces = rearrange(recon_faces, 'b nf (nv c) -> b nf nv c', nv = 3)\n face_mask = rearrange(face_mask, 'b nf -> b nf 1 1')\n recon_faces = recon_faces.masked_fill(~face_mask, float('nan'))\n face_mask = rearrange(face_mask, 'b nf 1 1 -> b nf')\n\n if only_return_recon_faces:\n return recon_faces\n\n # prepare for recon loss\n\n pred_face_coords = rearrange(pred_face_coords, 'b ... c -> b c (...)')\n face_coordinates = rearrange(face_coordinates, 'b ... -> b 1 (...)')\n\n # reconstruction loss on discretized coordinates on each face\n # they also smooth (blur) the one hot positions, localized label smoothing basically\n\n with autocast(enabled = False):\n pred_log_prob = pred_face_coords.log_softmax(dim = 1)\n\n target_one_hot = torch.zeros_like(pred_log_prob).scatter(1, face_coordinates, 1.)\n\n if self.bin_smooth_blur_sigma >= 0.:\n target_one_hot = gaussian_blur_1d(target_one_hot, sigma = self.bin_smooth_blur_sigma)\n\n # cross entropy with localized smoothing\n\n recon_losses = (-target_one_hot * pred_log_prob).sum(dim = 1)\n\n face_mask = repeat(face_mask, 'b nf -> b (nf r)', r = 9)\n recon_loss = recon_losses[face_mask].mean()\n\n # calculate total loss\n\n total_loss = recon_loss + \\\n commit_loss.sum() * self.commit_loss_weight\n\n # calculate loss breakdown if needed\n\n loss_breakdown = (recon_loss, commit_loss)\n\n # some return logic\n\n if not return_loss_breakdown:\n if not return_recon_faces:\n return total_loss\n\n return recon_faces, total_loss\n\n if not return_recon_faces:\n return total_loss, loss_breakdown\n\n return recon_faces, total_loss, loss_breakdown" }, { "identifier": "MeshTransformer", "path": "meshgpt_pytorch/meshgpt_pytorch.py", "snippet": "class MeshTransformer(Module):\n @beartype\n def __init__(\n self,\n autoencoder: MeshAutoencoder,\n *,\n dim: Union[int, Tuple[int, int]] = 512,\n max_seq_len = 8192,\n flash_attn = True,\n attn_depth = 12,\n attn_dim_head = 64,\n attn_heads = 16,\n attn_kwargs: dict = dict(\n ff_glu = True,\n num_mem_kv = 4\n ),\n dropout = 0.,\n coarse_pre_gateloop_depth = 2,\n fine_pre_gateloop_depth = 2,\n gateloop_use_heinsen = False,\n fine_attn_depth = 2,\n fine_attn_dim_head = 32,\n fine_attn_heads = 8,\n pad_id = -1,\n condition_on_text = False,\n text_condition_model_types = ('t5',),\n text_condition_cond_drop_prob = 0.25\n ):\n super().__init__()\n\n dim, dim_fine = (dim, dim) if isinstance(dim, int) else dim\n\n self.autoencoder = autoencoder\n set_module_requires_grad_(autoencoder, False)\n\n self.codebook_size = autoencoder.codebook_size\n self.num_quantizers = autoencoder.num_quantizers\n\n self.sos_token = nn.Parameter(torch.randn(dim_fine))\n self.eos_token_id = self.codebook_size\n\n # they use axial positional embeddings\n\n assert divisible_by(max_seq_len, 3 * self.num_quantizers), f'max_seq_len ({max_seq_len}) must be divisible by (3 x {self.num_quantizers}) = {3 * self.num_quantizers}' # 3 vertices per face, with D codes per vertex\n\n self.token_embed = nn.Embedding(self.codebook_size + 1, dim)\n\n self.quantize_level_embed = nn.Parameter(torch.randn(self.num_quantizers, dim))\n self.vertex_embed = nn.Parameter(torch.randn(3, dim))\n\n self.abs_pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.max_seq_len = max_seq_len\n\n # text condition\n\n self.condition_on_text = condition_on_text\n self.conditioner = None\n\n cross_attn_dim_context = None\n\n if condition_on_text:\n self.conditioner = TextEmbeddingReturner(\n model_types = text_condition_model_types,\n cond_drop_prob = text_condition_cond_drop_prob\n )\n cross_attn_dim_context = self.conditioner.dim_latent\n\n # for summarizing the vertices of each face\n\n self.to_face_tokens = nn.Sequential(\n nn.Linear(self.num_quantizers * 3 * dim, dim),\n nn.LayerNorm(dim)\n )\n\n self.coarse_gateloop_block = GateLoopBlock(dim, depth = coarse_pre_gateloop_depth, use_heinsen = gateloop_use_heinsen) if coarse_pre_gateloop_depth > 0 else None\n\n # main autoregressive attention network\n # attending to a face token\n\n self.decoder = Decoder(\n dim = dim,\n depth = attn_depth,\n dim_head = attn_dim_head,\n heads = attn_heads,\n attn_flash = flash_attn,\n attn_dropout = dropout,\n ff_dropout = dropout,\n cross_attend = condition_on_text,\n cross_attn_dim_context = cross_attn_dim_context,\n **attn_kwargs\n )\n\n # projection from coarse to fine, if needed\n\n self.maybe_project_coarse_to_fine = nn.Linear(dim, dim_fine) if dim != dim_fine else nn.Identity()\n\n # address a weakness in attention\n\n self.fine_gateloop_block = GateLoopBlock(dim, depth = fine_pre_gateloop_depth) if fine_pre_gateloop_depth > 0 else None\n\n # decoding the vertices, 2-stage hierarchy\n\n self.fine_decoder = Decoder(\n dim = dim_fine,\n depth = fine_attn_depth,\n dim_head = attn_dim_head,\n heads = attn_heads,\n attn_flash = flash_attn,\n attn_dropout = dropout,\n ff_dropout = dropout,\n **attn_kwargs\n )\n\n # to logits\n\n self.to_logits = nn.Linear(dim_fine, self.codebook_size + 1)\n\n # padding id\n # force the autoencoder to use the same pad_id given in transformer\n\n self.pad_id = pad_id\n autoencoder.pad_id = pad_id\n\n @property\n def device(self):\n return next(self.parameters()).device\n\n @beartype\n @torch.no_grad()\n def embed_texts(self, texts: Union[str, List[str]]):\n single_text = not isinstance(texts, list)\n if single_text:\n texts = [texts]\n\n assert exists(self.conditioner)\n text_embeds = self.conditioner.embed_texts(texts).detach()\n\n if single_text:\n text_embeds = text_embeds[0]\n\n return text_embeds\n\n @eval_decorator\n @torch.no_grad()\n @beartype\n def generate(\n self,\n prompt: Optional[Tensor] = None,\n batch_size: Optional[int] = None,\n filter_logits_fn: Callable = top_k,\n filter_kwargs: dict = dict(),\n temperature = 1.,\n return_codes = False,\n texts: Optional[List[str]] = None,\n text_embeds: Optional[Tensor] = None,\n cond_scale = 1.,\n cache_kv = True,\n max_seq_len = None,\n face_coords_to_file: Optional[Callable[[Tensor], Any]] = None\n ):\n max_seq_len = default(max_seq_len, self.max_seq_len)\n\n if exists(prompt):\n assert not exists(batch_size)\n\n prompt = rearrange(prompt, 'b ... -> b (...)')\n assert prompt.shape[-1] <= self.max_seq_len\n\n batch_size = prompt.shape[0]\n\n if self.condition_on_text:\n assert exists(texts) ^ exists(text_embeds), '`text` or `text_embeds` must be passed in if `condition_on_text` is set to True'\n if exists(texts):\n text_embeds = self.embed_texts(texts)\n\n batch_size = default(batch_size, text_embeds.shape[0])\n\n batch_size = default(batch_size, 1)\n\n codes = default(prompt, torch.empty((batch_size, 0), dtype = torch.long, device = self.device))\n\n curr_length = codes.shape[-1]\n\n cache = (None, None)\n\n for i in tqdm(range(curr_length, max_seq_len)):\n # v1([q1] [q2] [q1] [q2] [q1] [q2]) v2([eos| q1] [q2] [q1] [q2] [q1] [q2]) -> 0 1 2 3 4 5 6 7 8 9 10 11 12 -> v1(F F F F F F) v2(T F F F F F) v3(T F F F F F)\n\n can_eos = i != 0 and divisible_by(i, self.num_quantizers * 3) # only allow for eos to be decoded at the end of each face, defined as 3 vertices with D residual VQ codes\n\n output = self.forward_on_codes(\n codes,\n text_embeds = text_embeds,\n return_loss = False,\n return_cache = cache_kv,\n append_eos = False,\n cond_scale = cond_scale,\n cfg_routed_kwargs = dict(\n cache = cache\n )\n )\n\n if cache_kv:\n logits, cache = output\n\n if cond_scale == 1.:\n cache = (cache, None)\n else:\n logits = output\n\n logits = logits[:, -1]\n\n if not can_eos:\n logits[:, -1] = -torch.finfo(logits.dtype).max\n\n filtered_logits = filter_logits_fn(logits, **filter_kwargs)\n\n if temperature == 0.:\n sample = filtered_logits.argmax(dim = -1)\n else:\n probs = F.softmax(filtered_logits / temperature, dim = -1)\n sample = torch.multinomial(probs, 1)\n\n codes, _ = pack([codes, sample], 'b *')\n\n # check for all rows to have [eos] to terminate\n\n is_eos_codes = (codes == self.eos_token_id)\n\n if is_eos_codes.any(dim = -1).all():\n break\n\n # mask out to padding anything after the first eos\n\n mask = is_eos_codes.float().cumsum(dim = -1) >= 1\n codes = codes.masked_fill(mask, self.pad_id)\n\n # remove a potential extra token from eos, if breaked early\n\n code_len = codes.shape[-1]\n round_down_code_len = code_len // self.num_quantizers * self.num_quantizers\n codes = codes[:, :round_down_code_len]\n\n # early return of raw residual quantizer codes\n\n if return_codes:\n codes = rearrange(codes, 'b (n q) -> b n q', q = self.num_quantizers)\n return codes\n\n self.autoencoder.eval()\n face_coords, face_mask = self.autoencoder.decode_from_codes_to_faces(codes)\n\n if not exists(face_coords_to_file):\n return face_coords, face_mask\n\n files = [face_coords_to_file(coords[mask]) for coords, mask in zip(face_coords, face_mask)]\n return files\n\n def forward(\n self,\n *,\n vertices: TensorType['b', 'nv', 3, int],\n faces: TensorType['b', 'nf', 3, int],\n face_edges: Optional[TensorType['b', 'e', 2, int]] = None,\n codes: Optional[Tensor] = None,\n cache: Optional[LayerIntermediates] = None,\n **kwargs\n ):\n if not exists(codes):\n codes = self.autoencoder.tokenize(\n vertices = vertices,\n faces = faces,\n face_edges = face_edges\n )\n\n return self.forward_on_codes(codes, cache = cache, **kwargs)\n\n @classifier_free_guidance\n def forward_on_codes(\n self,\n codes = None,\n return_loss = True,\n return_cache = False,\n append_eos = True,\n cache = None,\n texts: Optional[List[str]] = None,\n text_embeds: Optional[Tensor] = None,\n cond_drop_prob = 0.\n ):\n # handle text conditions\n\n attn_context_kwargs = dict()\n\n if self.condition_on_text:\n assert exists(texts) ^ exists(text_embeds), '`text` or `text_embeds` must be passed in if `condition_on_text` is set to True'\n\n if exists(texts):\n text_embeds = self.conditioner.embed_texts(texts)\n\n if exists(codes):\n assert text_embeds.shape[0] == codes.shape[0], 'batch size of texts or text embeddings is not equal to the batch size of the mesh codes'\n\n _, maybe_dropped_text_embeds = self.conditioner(\n text_embeds = text_embeds,\n cond_drop_prob = cond_drop_prob\n )\n\n attn_context_kwargs = dict(\n context = maybe_dropped_text_embeds.embed,\n context_mask = maybe_dropped_text_embeds.mask\n )\n\n # take care of codes that may be flattened\n\n if codes.ndim > 2:\n codes = rearrange(codes, 'b ... -> b (...)')\n\n # get some variable\n\n batch, seq_len, device = *codes.shape, codes.device\n\n assert seq_len <= self.max_seq_len, f'received codes of length {seq_len} but needs to be less than or equal to set max_seq_len {self.max_seq_len}'\n\n # auto append eos token\n\n if append_eos:\n assert exists(codes)\n\n code_lens = ((codes == self.pad_id).cumsum(dim = -1) == 0).sum(dim = -1)\n\n codes = F.pad(codes, (0, 1), value = 0)\n\n batch_arange = torch.arange(batch, device = device)\n\n batch_arange = rearrange(batch_arange, '... -> ... 1')\n code_lens = rearrange(code_lens, '... -> ... 1')\n\n codes[batch_arange, code_lens] = self.eos_token_id\n\n # if returning loss, save the labels for cross entropy\n\n if return_loss:\n assert seq_len > 0\n codes, labels = codes[:, :-1], codes\n\n # token embed (each residual VQ id)\n\n codes = codes.masked_fill(codes == self.pad_id, 0)\n codes = self.token_embed(codes)\n\n # codebook embed + absolute positions\n\n seq_arange = torch.arange(codes.shape[-2], device = device)\n\n codes = codes + self.abs_pos_emb(seq_arange)\n\n # embedding for quantizer level\n\n code_len = codes.shape[1]\n\n level_embed = repeat(self.quantize_level_embed, 'q d -> (r q) d', r = ceil(code_len / self.num_quantizers))\n codes = codes + level_embed[:code_len]\n\n # embedding for each vertex\n\n vertex_embed = repeat(self.vertex_embed, 'nv d -> (r nv q) d', r = ceil(code_len / (3 * self.num_quantizers)), q = self.num_quantizers)\n codes = codes + vertex_embed[:code_len]\n\n # create a token per face, by summarizing the 3 vertices\n # this is similar in design to the RQ transformer from Lee et al. https://arxiv.org/abs/2203.01941\n\n num_tokens_per_face = self.num_quantizers * 3\n\n curr_vertex_pos = code_len % num_tokens_per_face # the current intra-face vertex-code position id, needed for caching at the fine decoder stage\n\n code_len_is_multiple_of_face = divisible_by(code_len, num_tokens_per_face)\n\n next_multiple_code_len = ceil(code_len / num_tokens_per_face) * num_tokens_per_face\n\n codes = pad_to_length(codes, next_multiple_code_len, dim = -2)\n\n # grouped codes will be used for the second stage\n\n grouped_codes = rearrange(codes, 'b (nf n) d -> b nf n d', n = num_tokens_per_face)\n\n # create the coarse tokens for the first attention network\n\n face_codes = grouped_codes if code_len_is_multiple_of_face else grouped_codes[:, :-1]\n face_codes = rearrange(face_codes, 'b nf n d -> b nf (n d)')\n face_codes = self.to_face_tokens(face_codes)\n\n face_codes_len = face_codes.shape[-2]\n\n # cache logic\n\n (\n cached_attended_face_codes,\n coarse_cache,\n fine_cache,\n coarse_gateloop_cache,\n fine_gateloop_cache\n ) = cache if exists(cache) else ((None,) * 5)\n\n if exists(cache):\n cached_face_codes_len = cached_attended_face_codes.shape[-2]\n need_call_first_transformer = face_codes_len > cached_face_codes_len\n else:\n need_call_first_transformer = True\n\n should_cache_fine = not divisible_by(curr_vertex_pos + 1, num_tokens_per_face)\n\n # attention on face codes (coarse)\n\n if need_call_first_transformer:\n if exists(self.coarse_gateloop_block):\n face_codes, coarse_gateloop_cache = self.coarse_gateloop_block(face_codes, cache = coarse_gateloop_cache)\n\n attended_face_codes, coarse_cache = self.decoder(\n face_codes,\n cache = coarse_cache,\n return_hiddens = True,\n **attn_context_kwargs\n )\n\n attended_face_codes = safe_cat((cached_attended_face_codes, attended_face_codes), dim = -2)\n else:\n attended_face_codes = cached_attended_face_codes\n\n # maybe project from coarse to fine dimension for hierarchical transformers\n\n attended_face_codes = self.maybe_project_coarse_to_fine(attended_face_codes)\n\n # auto prepend sos token\n\n sos = repeat(self.sos_token, 'd -> b d', b = batch)\n\n attended_face_codes_with_sos, _ = pack([sos, attended_face_codes], 'b * d')\n\n grouped_codes = pad_to_length(grouped_codes, attended_face_codes_with_sos.shape[-2], dim = 1)\n fine_vertex_codes, _ = pack([attended_face_codes_with_sos, grouped_codes], 'b n * d')\n\n fine_vertex_codes = fine_vertex_codes[..., :-1, :]\n\n # gateloop layers\n\n if exists(self.fine_gateloop_block):\n fine_vertex_codes = rearrange(fine_vertex_codes, 'b nf n d -> b (nf n) d')\n orig_length = fine_vertex_codes.shape[-2]\n fine_vertex_codes = fine_vertex_codes[:, :(code_len + 1)]\n\n fine_vertex_codes, fine_gateloop_cache = self.fine_gateloop_block(fine_vertex_codes, cache = fine_gateloop_cache)\n\n fine_vertex_codes = pad_to_length(fine_vertex_codes, orig_length, dim = -2)\n fine_vertex_codes = rearrange(fine_vertex_codes, 'b (nf n) d -> b nf n d', n = num_tokens_per_face)\n\n # fine attention - 2nd stage\n\n if exists(cache):\n fine_vertex_codes = fine_vertex_codes[:, -1:]\n\n if exists(fine_cache):\n for attn_intermediate in fine_cache.attn_intermediates:\n ck, cv = attn_intermediate.cached_kv\n ck, cv = map(lambda t: rearrange(t, '(b nf) ... -> b nf ...', b = batch), (ck, cv))\n ck, cv = map(lambda t: t[:, -1, :, :curr_vertex_pos], (ck, cv))\n attn_intermediate.cached_kv = (ck, cv)\n\n one_face = fine_vertex_codes.shape[1] == 1\n\n fine_vertex_codes = rearrange(fine_vertex_codes, 'b nf n d -> (b nf) n d')\n\n if one_face:\n fine_vertex_codes = fine_vertex_codes[:, :(curr_vertex_pos + 1)]\n\n attended_vertex_codes, fine_cache = self.fine_decoder(\n fine_vertex_codes,\n cache = fine_cache,\n return_hiddens = True\n )\n\n if not should_cache_fine:\n fine_cache = None\n\n if not one_face:\n # reconstitute original sequence\n\n embed = rearrange(attended_vertex_codes, '(b nf) n d -> b (nf n) d', b = batch)\n embed = embed[:, :(code_len + 1)]\n else:\n embed = attended_vertex_codes\n\n # logits\n\n logits = self.to_logits(embed)\n\n if not return_loss:\n if not return_cache:\n return logits\n\n next_cache = (\n attended_face_codes,\n coarse_cache,\n fine_cache,\n coarse_gateloop_cache,\n fine_gateloop_cache\n )\n\n return logits, next_cache\n\n # loss\n\n ce_loss = F.cross_entropy(\n rearrange(logits, 'b n c -> b c n'),\n labels,\n ignore_index = self.pad_id\n )\n\n return ce_loss" } ]
from pathlib import Path from functools import partial from packaging import version from contextlib import nullcontext, contextmanager from torch import nn, Tensor from torch.nn import Module from torch.utils.data import Dataset, DataLoader from torch.optim.lr_scheduler import _LRScheduler from pytorch_custom_utils import ( get_adam_optimizer, OptimizerWithWarmupSchedule, add_wandb_tracker_contextmanager ) from accelerate import Accelerator from accelerate.utils import DistributedDataParallelKwargs from beartype import beartype from beartype.door import is_bearable from beartype.typing import Optional, Tuple, Type, List from ema_pytorch import EMA from meshgpt_pytorch.data import custom_collate from meshgpt_pytorch.version import __version__ from meshgpt_pytorch.meshgpt_pytorch import ( MeshAutoencoder, MeshTransformer ) import torch import torch.nn.functional as F
12,244
data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) self.should_validate = exists(val_dataset) if self.should_validate: assert len(val_dataset) > 0, 'your validation dataset is empty' self.val_every = val_every self.val_num_batches = val_num_batches self.val_dataloader = DataLoader( val_dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) if hasattr(dataset, 'data_kwargs') and exists(dataset.data_kwargs): assert is_bearable(dataset.data_kwargs, List[str]) self.data_kwargs = dataset.data_kwargs else: self.data_kwargs = data_kwargs ( self.model, self.dataloader ) = self.accelerator.prepare( self.model, self.dataloader ) self.grad_accum_every = grad_accum_every self.num_train_steps = num_train_steps self.register_buffer('step', torch.tensor(0)) self.checkpoint_every = checkpoint_every self.checkpoint_folder = Path(checkpoint_folder) self.checkpoint_folder.mkdir(exist_ok = True, parents = True) @property def ema_tokenizer(self): return self.ema_model.ema_model def tokenize(self, *args, **kwargs): return self.ema_tokenizer.tokenize(*args, **kwargs) def log(self, **data_kwargs): self.accelerator.log(data_kwargs, step = self.step.item()) @property def device(self): return self.unwrapped_model.device @property def is_main(self): return self.accelerator.is_main_process @property def unwrapped_model(self): return self.accelerator.unwrap_model(self.model) @property def is_local_main(self): return self.accelerator.is_local_main_process def wait(self): return self.accelerator.wait_for_everyone() def print(self, msg): return self.accelerator.print(msg) def save(self, path, overwrite = True): path = Path(path) assert overwrite or not path.exists() pkg = dict( model = self.unwrapped_model.state_dict(), ema_model = self.ema_model.state_dict(), optimizer = self.optimizer.state_dict(),
# constants DEFAULT_DDP_KWARGS = DistributedDataParallelKwargs( find_unused_parameters = True ) # helper functions def exists(v): return v is not None def default(v, d): return v if exists(v) else d def divisible_by(num, den): return (num % den) == 0 def cycle(dl): while True: for data in dl: yield data def maybe_del(d: dict, *keys): for key in keys: if key not in d: continue del d[key] # autoencoder trainer @add_wandb_tracker_contextmanager() class MeshAutoencoderTrainer(Module): @beartype def __init__( self, model: MeshAutoencoder, dataset: Dataset, num_train_steps: int, batch_size: int, grad_accum_every: int, val_dataset: Optional[Dataset] = None, val_every: int = 100, val_num_batches: int = 5, learning_rate: float = 1e-4, weight_decay: float = 0., max_grad_norm: Optional[float] = None, ema_kwargs: dict = dict(), scheduler: Optional[Type[_LRScheduler]] = None, scheduler_kwargs: dict = dict(), accelerator_kwargs: dict = dict(), optimizer_kwargs: dict = dict(), checkpoint_every = 1000, checkpoint_folder = './checkpoints', data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) self.should_validate = exists(val_dataset) if self.should_validate: assert len(val_dataset) > 0, 'your validation dataset is empty' self.val_every = val_every self.val_num_batches = val_num_batches self.val_dataloader = DataLoader( val_dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) if hasattr(dataset, 'data_kwargs') and exists(dataset.data_kwargs): assert is_bearable(dataset.data_kwargs, List[str]) self.data_kwargs = dataset.data_kwargs else: self.data_kwargs = data_kwargs ( self.model, self.dataloader ) = self.accelerator.prepare( self.model, self.dataloader ) self.grad_accum_every = grad_accum_every self.num_train_steps = num_train_steps self.register_buffer('step', torch.tensor(0)) self.checkpoint_every = checkpoint_every self.checkpoint_folder = Path(checkpoint_folder) self.checkpoint_folder.mkdir(exist_ok = True, parents = True) @property def ema_tokenizer(self): return self.ema_model.ema_model def tokenize(self, *args, **kwargs): return self.ema_tokenizer.tokenize(*args, **kwargs) def log(self, **data_kwargs): self.accelerator.log(data_kwargs, step = self.step.item()) @property def device(self): return self.unwrapped_model.device @property def is_main(self): return self.accelerator.is_main_process @property def unwrapped_model(self): return self.accelerator.unwrap_model(self.model) @property def is_local_main(self): return self.accelerator.is_local_main_process def wait(self): return self.accelerator.wait_for_everyone() def print(self, msg): return self.accelerator.print(msg) def save(self, path, overwrite = True): path = Path(path) assert overwrite or not path.exists() pkg = dict( model = self.unwrapped_model.state_dict(), ema_model = self.ema_model.state_dict(), optimizer = self.optimizer.state_dict(),
version = __version__,
1
2023-11-29 14:58:15+00:00
16k
EricGuo5513/momask-codes
train_res_transformer.py
[ { "identifier": "ResidualTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class ResidualTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1,\n num_heads=4, dropout=0.1, clip_dim=512, shared_codebook=False, share_weight=False,\n clip_version=None, opt=None, **kargs):\n super(ResidualTransformer, self).__init__()\n print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}')\n\n # assert shared_codebook == True, \"Only support shared codebook right now!\"\n\n self.code_dim = code_dim\n self.latent_dim = latent_dim\n self.clip_dim = clip_dim\n self.dropout = dropout\n self.opt = opt\n\n self.cond_mode = cond_mode\n # self.cond_drop_prob = cond_drop_prob\n\n if self.cond_mode == 'action':\n assert 'num_actions' in kargs\n self.num_actions = kargs.get('num_actions', 1)\n self.cond_drop_prob = cond_drop_prob\n\n '''\n Preparing Networks\n '''\n self.input_process = InputProcess(self.code_dim, self.latent_dim)\n self.position_enc = PositionalEncoding(self.latent_dim, self.dropout)\n\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=num_heads,\n dim_feedforward=ff_size,\n dropout=dropout,\n activation='gelu')\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=num_layers)\n\n self.encode_quant = partial(F.one_hot, num_classes=self.opt.num_quantizers)\n self.encode_action = partial(F.one_hot, num_classes=self.num_actions)\n\n self.quant_emb = nn.Linear(self.opt.num_quantizers, self.latent_dim)\n # if self.cond_mode != 'no_cond':\n if self.cond_mode == 'text':\n self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim)\n elif self.cond_mode == 'action':\n self.cond_emb = nn.Linear(self.num_actions, self.latent_dim)\n else:\n raise KeyError(\"Unsupported condition mode!!!\")\n\n\n _num_tokens = opt.num_tokens + 1 # one dummy tokens for padding\n self.pad_id = opt.num_tokens\n\n # self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim)\n self.output_process = OutputProcess(out_feats=code_dim, latent_dim=latent_dim)\n\n if shared_codebook:\n token_embed = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n self.token_embed_weight = token_embed.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n if share_weight:\n self.output_proj_weight = self.token_embed_weight\n self.output_proj_bias = None\n else:\n output_proj = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n output_bias = nn.Parameter(torch.zeros(size=(_num_tokens,)))\n # self.output_proj_bias = 0\n self.output_proj_weight = output_proj.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n self.output_proj_bias = output_bias.expand(opt.num_quantizers-1, _num_tokens)\n\n else:\n if share_weight:\n self.embed_proj_shared_weight = nn.Parameter(torch.normal(mean=0, std=0.02, size=(opt.num_quantizers - 2, _num_tokens, code_dim)))\n self.token_embed_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_bias = None\n self.registered = False\n else:\n output_proj_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n\n self.output_proj_weight = nn.Parameter(output_proj_weight)\n self.output_proj_bias = nn.Parameter(torch.zeros(size=(opt.num_quantizers, _num_tokens)))\n token_embed_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n self.token_embed_weight = nn.Parameter(token_embed_weight)\n\n self.apply(self.__init_weights)\n self.shared_codebook = shared_codebook\n self.share_weight = share_weight\n\n if self.cond_mode == 'text':\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n # def\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_drop_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_drop_prob).view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n # Cannot run on cpu\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n # Date 0707: It's necessary, only unecessary when load directly to gpu. Disable if need to run on cpu\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def encode_text(self, raw_text):\n device = next(self.parameters()).device\n text = clip.tokenize(raw_text, truncate=True).to(device)\n feat_clip_text = self.clip_model.encode_text(text).float()\n return feat_clip_text\n\n\n def q_schedule(self, bs, low, high):\n noise = uniform((bs,), device=self.opt.device)\n schedule = 1 - cosine_schedule(noise)\n return torch.round(schedule * (high - low)) + low\n\n def process_embed_proj_weight(self):\n if self.share_weight and (not self.shared_codebook):\n # if not self.registered:\n self.output_proj_weight = torch.cat([self.embed_proj_shared_weight, self.output_proj_weight_], dim=0)\n self.token_embed_weight = torch.cat([self.token_embed_weight_, self.embed_proj_shared_weight], dim=0)\n # self.registered = True\n\n def output_project(self, logits, qids):\n '''\n :logits: (bs, code_dim, seqlen)\n :qids: (bs)\n\n :return:\n -logits (bs, ntoken, seqlen)\n '''\n # (num_qlayers-1, num_token, code_dim) -> (bs, ntoken, code_dim)\n output_proj_weight = self.output_proj_weight[qids]\n # (num_qlayers, ntoken) -> (bs, ntoken)\n output_proj_bias = None if self.output_proj_bias is None else self.output_proj_bias[qids]\n\n output = torch.einsum('bnc, bcs->bns', output_proj_weight, logits)\n if output_proj_bias is not None:\n output += output + output_proj_bias.unsqueeze(-1)\n return output\n\n\n\n def trans_forward(self, motion_codes, qids, cond, padding_mask, force_mask=False):\n '''\n :param motion_codes: (b, seqlen, d)\n :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE\n :param qids: (b), quantizer layer ids\n :param cond: (b, embed_dim) for text, (b, num_actions) for action\n :return:\n -logits: (b, num_token, seqlen)\n '''\n cond = self.mask_cond(cond, force_mask=force_mask)\n\n # (b, seqlen, d) -> (seqlen, b, latent_dim)\n x = self.input_process(motion_codes)\n\n # (b, num_quantizer)\n q_onehot = self.encode_quant(qids).float().to(x.device)\n\n q_emb = self.quant_emb(q_onehot).unsqueeze(0) # (1, b, latent_dim)\n cond = self.cond_emb(cond).unsqueeze(0) # (1, b, latent_dim)\n\n x = self.position_enc(x)\n xseq = torch.cat([cond, q_emb, x], dim=0) # (seqlen+2, b, latent_dim)\n\n padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:2]), padding_mask], dim=1) # (b, seqlen+2)\n output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[2:] # (seqlen, b, e)\n logits = self.output_process(output)\n return logits\n\n def forward_with_cond_scale(self,\n motion_codes,\n q_id,\n cond_vector,\n padding_mask,\n cond_scale=3,\n force_mask=False):\n bs = motion_codes.shape[0]\n # if cond_scale == 1:\n qids = torch.full((bs,), q_id, dtype=torch.long, device=motion_codes.device)\n if force_mask:\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n logits = self.output_project(logits, qids-1)\n return logits\n\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask)\n logits = self.output_project(logits, qids-1)\n if cond_scale == 1:\n return logits\n\n aux_logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n aux_logits = self.output_project(aux_logits, qids-1)\n\n scaled_logits = aux_logits + (logits - aux_logits) * cond_scale\n return scaled_logits\n\n def forward(self, all_indices, y, m_lens):\n '''\n :param all_indices: (b, n, q)\n :param y: raw text for cond_mode=text, (b, ) for cond_mode=action\n :m_lens: (b,)\n :return:\n '''\n\n self.process_embed_proj_weight()\n\n bs, ntokens, num_quant_layers = all_indices.shape\n device = all_indices.device\n\n # Positions that are PADDED are ALL FALSE\n non_pad_mask = lengths_to_mask(m_lens, ntokens) # (b, n)\n\n q_non_pad_mask = repeat(non_pad_mask, 'b n -> b n q', q=num_quant_layers)\n all_indices = torch.where(q_non_pad_mask, all_indices, self.pad_id) #(b, n, q)\n\n # randomly sample quantization layers to work on, [1, num_q)\n active_q_layers = q_schedule(bs, low=1, high=num_quant_layers, device=device)\n\n # print(self.token_embed_weight.shape, all_indices.shape)\n token_embed = repeat(self.token_embed_weight, 'q c d-> b c d q', b=bs)\n gather_indices = repeat(all_indices[..., :-1], 'b n q -> b n d q', d=token_embed.shape[2])\n # print(token_embed.shape, gather_indices.shape)\n all_codes = token_embed.gather(1, gather_indices) # (b, n, d, q-1)\n\n cumsum_codes = torch.cumsum(all_codes, dim=-1) #(b, n, d, q-1)\n\n active_indices = all_indices[torch.arange(bs), :, active_q_layers] # (b, n)\n history_sum = cumsum_codes[torch.arange(bs), :, :, active_q_layers - 1]\n\n force_mask = False\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(y)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(y).to(device).float()\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)\n force_mask = True\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n logits = self.trans_forward(history_sum, active_q_layers, cond_vector, ~non_pad_mask, force_mask)\n logits = self.output_project(logits, active_q_layers-1)\n ce_loss, pred_id, acc = cal_performance(logits, active_indices, ignore_index=self.pad_id)\n\n return ce_loss, pred_id, acc\n\n @torch.no_grad()\n @eval_decorator\n def generate(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2,\n num_res_layers=-1, # If it's -1, use all.\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n num_quant_layers = self.opt.num_quantizers if num_res_layers==-1 else num_res_layers+1\n\n for i in range(1, num_quant_layers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices\n\n @torch.no_grad()\n @eval_decorator\n def edit(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n\n for i in range(1, self.opt.num_quantizers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices" }, { "identifier": "ResidualTransformerTrainer", "path": "models/mask_transformer/transformer_trainer.py", "snippet": "class ResidualTransformerTrainer:\n def __init__(self, args, res_transformer, vq_model):\n self.opt = args\n self.res_transformer = res_transformer\n self.vq_model = vq_model\n self.device = args.device\n self.vq_model.eval()\n\n if args.is_train:\n self.logger = SummaryWriter(args.log_dir)\n # self.l1_criterion = torch.nn.SmoothL1Loss()\n\n\n def update_lr_warm_up(self, nb_iter, warm_up_iter, lr):\n\n current_lr = lr * (nb_iter + 1) / (warm_up_iter + 1)\n for param_group in self.opt_res_transformer.param_groups:\n param_group[\"lr\"] = current_lr\n\n return current_lr\n\n\n def forward(self, batch_data):\n\n conds, motion, m_lens = batch_data\n motion = motion.detach().float().to(self.device)\n m_lens = m_lens.detach().long().to(self.device)\n\n # (b, n, q), (q, b, n ,d)\n code_idx, all_codes = self.vq_model.encode(motion)\n m_lens = m_lens // 4\n\n conds = conds.to(self.device).float() if torch.is_tensor(conds) else conds\n\n ce_loss, pred_ids, acc = self.res_transformer(code_idx, conds, m_lens)\n\n return ce_loss, acc\n\n def update(self, batch_data):\n loss, acc = self.forward(batch_data)\n\n self.opt_res_transformer.zero_grad()\n loss.backward()\n self.opt_res_transformer.step()\n self.scheduler.step()\n\n return loss.item(), acc\n\n def save(self, file_name, ep, total_it):\n res_trans_state_dict = self.res_transformer.state_dict()\n clip_weights = [e for e in res_trans_state_dict.keys() if e.startswith('clip_model.')]\n for e in clip_weights:\n del res_trans_state_dict[e]\n state = {\n 'res_transformer': res_trans_state_dict,\n 'opt_res_transformer': self.opt_res_transformer.state_dict(),\n 'scheduler':self.scheduler.state_dict(),\n 'ep': ep,\n 'total_it': total_it,\n }\n torch.save(state, file_name)\n\n def resume(self, model_dir):\n checkpoint = torch.load(model_dir, map_location=self.device)\n missing_keys, unexpected_keys = self.res_transformer.load_state_dict(checkpoint['res_transformer'], strict=False)\n assert len(unexpected_keys) == 0\n assert all([k.startswith('clip_model.') for k in missing_keys])\n\n try:\n self.opt_res_transformer.load_state_dict(checkpoint['opt_res_transformer']) # Optimizer\n\n self.scheduler.load_state_dict(checkpoint['scheduler']) # Scheduler\n except:\n print('Resume wo optimizer')\n return checkpoint['ep'], checkpoint['total_it']\n\n def train(self, train_loader, val_loader, eval_val_loader, eval_wrapper, plot_eval):\n self.res_transformer.to(self.device)\n self.vq_model.to(self.device)\n\n self.opt_res_transformer = optim.AdamW(self.res_transformer.parameters(), betas=(0.9, 0.99), lr=self.opt.lr, weight_decay=1e-5)\n self.scheduler = optim.lr_scheduler.MultiStepLR(self.opt_res_transformer,\n milestones=self.opt.milestones,\n gamma=self.opt.gamma)\n\n epoch = 0\n it = 0\n\n if self.opt.is_continue:\n model_dir = pjoin(self.opt.model_dir, 'latest.tar') # TODO\n epoch, it = self.resume(model_dir)\n print(\"Load model epoch:%d iterations:%d\"%(epoch, it))\n\n start_time = time.time()\n total_iters = self.opt.max_epoch * len(train_loader)\n print(f'Total Epochs: {self.opt.max_epoch}, Total Iters: {total_iters}')\n print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_loader), len(val_loader)))\n logs = defaultdict(def_value, OrderedDict())\n\n best_fid, best_div, best_top1, best_top2, best_top3, best_matching, writer = evaluation_res_transformer(\n self.opt.save_root, eval_val_loader, self.res_transformer, self.vq_model, self.logger, epoch,\n best_fid=100, best_div=100,\n best_top1=0, best_top2=0, best_top3=0,\n best_matching=100, eval_wrapper=eval_wrapper,\n plot_func=plot_eval, save_ckpt=False, save_anim=False\n )\n best_loss = 100\n best_acc = 0\n\n while epoch < self.opt.max_epoch:\n self.res_transformer.train()\n self.vq_model.eval()\n\n for i, batch in enumerate(train_loader):\n it += 1\n if it < self.opt.warm_up_iter:\n self.update_lr_warm_up(it, self.opt.warm_up_iter, self.opt.lr)\n\n loss, acc = self.update(batch_data=batch)\n logs['loss'] += loss\n logs[\"acc\"] += acc\n logs['lr'] += self.opt_res_transformer.param_groups[0]['lr']\n\n if it % self.opt.log_every == 0:\n mean_loss = OrderedDict()\n # self.logger.add_scalar('val_loss', val_loss, it)\n # self.l\n for tag, value in logs.items():\n self.logger.add_scalar('Train/%s'%tag, value / self.opt.log_every, it)\n mean_loss[tag] = value / self.opt.log_every\n logs = defaultdict(def_value, OrderedDict())\n print_current_loss(start_time, it, total_iters, mean_loss, epoch=epoch, inner_iter=i)\n\n if it % self.opt.save_latest == 0:\n self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)\n\n epoch += 1\n self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)\n\n print('Validation time:')\n self.vq_model.eval()\n self.res_transformer.eval()\n\n val_loss = []\n val_acc = []\n with torch.no_grad():\n for i, batch_data in enumerate(val_loader):\n loss, acc = self.forward(batch_data)\n val_loss.append(loss.item())\n val_acc.append(acc)\n\n print(f\"Validation loss:{np.mean(val_loss):.3f}, Accuracy:{np.mean(val_acc):.3f}\")\n\n self.logger.add_scalar('Val/loss', np.mean(val_loss), epoch)\n self.logger.add_scalar('Val/acc', np.mean(val_acc), epoch)\n\n if np.mean(val_loss) < best_loss:\n print(f\"Improved loss from {best_loss:.02f} to {np.mean(val_loss)}!!!\")\n self.save(pjoin(self.opt.model_dir, 'net_best_loss.tar'), epoch, it)\n best_loss = np.mean(val_loss)\n\n if np.mean(val_acc) > best_acc:\n print(f\"Improved acc from {best_acc:.02f} to {np.mean(val_acc)}!!!\")\n # self.save(pjoin(self.opt.model_dir, 'net_best_loss.tar'), epoch, it)\n best_acc = np.mean(val_acc)\n\n best_fid, best_div, best_top1, best_top2, best_top3, best_matching, writer = evaluation_res_transformer(\n self.opt.save_root, eval_val_loader, self.res_transformer, self.vq_model, self.logger, epoch, best_fid=best_fid,\n best_div=best_div, best_top1=best_top1, best_top2=best_top2, best_top3=best_top3,\n best_matching=best_matching, eval_wrapper=eval_wrapper,\n plot_func=plot_eval, save_ckpt=True, save_anim=(epoch%self.opt.eval_every_e==0)\n )" }, { "identifier": "RVQVAE", "path": "models/vq/model.py", "snippet": "class RVQVAE(nn.Module):\n def __init__(self,\n args,\n input_width=263,\n nb_code=1024,\n code_dim=512,\n output_emb_width=512,\n down_t=3,\n stride_t=2,\n width=512,\n depth=3,\n dilation_growth_rate=3,\n activation='relu',\n norm=None):\n\n super().__init__()\n assert output_emb_width == code_dim\n self.code_dim = code_dim\n self.num_code = nb_code\n # self.quant = args.quantizer\n self.encoder = Encoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n self.decoder = Decoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n rvqvae_config = {\n 'num_quantizers': args.num_quantizers,\n 'shared_codebook': args.shared_codebook,\n 'quantize_dropout_prob': args.quantize_dropout_prob,\n 'quantize_dropout_cutoff_index': 0,\n 'nb_code': nb_code,\n 'code_dim':code_dim, \n 'args': args,\n }\n self.quantizer = ResidualVQ(**rvqvae_config)\n\n def preprocess(self, x):\n # (bs, T, Jx3) -> (bs, Jx3, T)\n x = x.permute(0, 2, 1).float()\n return x\n\n def postprocess(self, x):\n # (bs, Jx3, T) -> (bs, T, Jx3)\n x = x.permute(0, 2, 1)\n return x\n\n def encode(self, x):\n N, T, _ = x.shape\n x_in = self.preprocess(x)\n x_encoder = self.encoder(x_in)\n # print(x_encoder.shape)\n code_idx, all_codes = self.quantizer.quantize(x_encoder, return_latent=True)\n # print(code_idx.shape)\n # code_idx = code_idx.view(N, -1)\n # (N, T, Q)\n # print()\n return code_idx, all_codes\n\n def forward(self, x):\n x_in = self.preprocess(x)\n # Encode\n x_encoder = self.encoder(x_in)\n\n ## quantization\n # x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5,\n # force_dropout_index=0) #TODO hardcode\n x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5)\n\n # print(code_idx[0, :, 1])\n ## decoder\n x_out = self.decoder(x_quantized)\n # x_out = self.postprocess(x_decoder)\n return x_out, commit_loss, perplexity\n\n def forward_decoder(self, x):\n x_d = self.quantizer.get_codes_from_indices(x)\n # x_d = x_d.view(1, -1, self.code_dim).permute(0, 2, 1).contiguous()\n x = x_d.sum(dim=0).permute(0, 2, 1)\n\n # decoder\n x_out = self.decoder(x)\n # x_out = self.postprocess(x_decoder)\n return x_out" }, { "identifier": "TrainT2MOptions", "path": "options/train_option.py", "snippet": "class TrainT2MOptions(BaseOptions):\n def initialize(self):\n BaseOptions.initialize(self)\n self.parser.add_argument('--batch_size', type=int, default=64, help='Batch size')\n self.parser.add_argument('--max_epoch', type=int, default=500, help='Maximum number of epoch for training')\n # self.parser.add_argument('--max_iters', type=int, default=150_000, help='Training iterations')\n\n '''LR scheduler'''\n self.parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')\n self.parser.add_argument('--gamma', type=float, default=0.1, help='Learning rate schedule factor')\n self.parser.add_argument('--milestones', default=[50_000], nargs=\"+\", type=int,\n help=\"learning rate schedule (iterations)\")\n self.parser.add_argument('--warm_up_iter', default=2000, type=int, help='number of total iterations for warmup')\n\n '''Condition'''\n self.parser.add_argument('--cond_drop_prob', type=float, default=0.1, help='Drop ratio of condition, for classifier-free guidance')\n self.parser.add_argument(\"--seed\", default=3407, type=int, help=\"Seed\")\n\n self.parser.add_argument('--is_continue', action=\"store_true\", help='Is this trial continuing previous state?')\n self.parser.add_argument('--gumbel_sample', action=\"store_true\", help='Strategy for token sampling, True: Gumbel sampling, False: Categorical sampling')\n self.parser.add_argument('--share_weight', action=\"store_true\", help='Whether to share weight for projection/embedding, for residual transformer.')\n\n self.parser.add_argument('--log_every', type=int, default=50, help='Frequency of printing training progress, (iteration)')\n # self.parser.add_argument('--save_every_e', type=int, default=100, help='Frequency of printing training progress')\n self.parser.add_argument('--eval_every_e', type=int, default=10, help='Frequency of animating eval results, (epoch)')\n self.parser.add_argument('--save_latest', type=int, default=500, help='Frequency of saving checkpoint, (iteration)')\n\n\n self.is_train = True" }, { "identifier": "plot_3d_motion", "path": "utils/plot_script.py", "snippet": "def plot_3d_motion(save_path, kinematic_tree, joints, title, figsize=(10, 10), fps=120, radius=4):\n matplotlib.use('Agg')\n\n title_sp = title.split(' ')\n if len(title_sp) > 20:\n title = '\\n'.join([' '.join(title_sp[:10]), ' '.join(title_sp[10:20]), ' '.join(title_sp[20:])])\n elif len(title_sp) > 10:\n title = '\\n'.join([' '.join(title_sp[:10]), ' '.join(title_sp[10:])])\n\n def init():\n ax.set_xlim3d([-radius / 2, radius / 2])\n ax.set_ylim3d([0, radius])\n ax.set_zlim3d([0, radius])\n # print(title)\n fig.suptitle(title, fontsize=20)\n ax.grid(b=False)\n\n def plot_xzPlane(minx, maxx, miny, minz, maxz):\n ## Plot a plane XZ\n verts = [\n [minx, miny, minz],\n [minx, miny, maxz],\n [maxx, miny, maxz],\n [maxx, miny, minz]\n ]\n xz_plane = Poly3DCollection([verts])\n xz_plane.set_facecolor((0.5, 0.5, 0.5, 0.5))\n ax.add_collection3d(xz_plane)\n\n # return ax\n\n # (seq_len, joints_num, 3)\n data = joints.copy().reshape(len(joints), -1, 3)\n fig = plt.figure(figsize=figsize)\n ax = p3.Axes3D(fig)\n init()\n MINS = data.min(axis=0).min(axis=0)\n MAXS = data.max(axis=0).max(axis=0)\n colors = ['red', 'blue', 'black', 'red', 'blue',\n 'darkblue', 'darkblue', 'darkblue', 'darkblue', 'darkblue',\n 'darkred', 'darkred', 'darkred', 'darkred', 'darkred']\n frame_number = data.shape[0]\n # print(data.shape)\n\n height_offset = MINS[1]\n data[:, :, 1] -= height_offset\n trajec = data[:, 0, [0, 2]]\n\n data[..., 0] -= data[:, 0:1, 0]\n data[..., 2] -= data[:, 0:1, 2]\n\n # print(trajec.shape)\n\n def update(index):\n # print(index)\n ax.lines = []\n ax.collections = []\n ax.view_init(elev=120, azim=-90)\n ax.dist = 7.5\n # ax =\n plot_xzPlane(MINS[0] - trajec[index, 0], MAXS[0] - trajec[index, 0], 0, MINS[2] - trajec[index, 1],\n MAXS[2] - trajec[index, 1])\n # ax.scatter(data[index, :22, 0], data[index, :22, 1], data[index, :22, 2], color='black', s=3)\n\n if index > 1:\n ax.plot3D(trajec[:index, 0] - trajec[index, 0], np.zeros_like(trajec[:index, 0]),\n trajec[:index, 1] - trajec[index, 1], linewidth=1.0,\n color='blue')\n # ax = plot_xzPlane(ax, MINS[0], MAXS[0], 0, MINS[2], MAXS[2])\n\n for i, (chain, color) in enumerate(zip(kinematic_tree, colors)):\n # print(color)\n if i < 5:\n linewidth = 4.0\n else:\n linewidth = 2.0\n ax.plot3D(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], linewidth=linewidth,\n color=color)\n # print(trajec[:index, 0].shape)\n\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n\n ani = FuncAnimation(fig, update, frames=frame_number, interval=1000 / fps, repeat=False)\n\n # writer = FFMpegFileWriter(fps=fps)\n ani.save(save_path, fps=fps)\n plt.close()" }, { "identifier": "recover_from_ric", "path": "utils/motion_process.py", "snippet": "def recover_from_ric(data, joints_num):\n r_rot_quat, r_pos = recover_root_rot_pos(data)\n positions = data[..., 4:(joints_num - 1) * 3 + 4]\n positions = positions.view(positions.shape[:-1] + (-1, 3))\n\n '''Add Y-axis rotation to local joints'''\n positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)\n\n '''Add root XZ to joints'''\n positions[..., 0] += r_pos[..., 0:1]\n positions[..., 2] += r_pos[..., 2:3]\n\n '''Concate root and joints'''\n positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)\n\n return positions" }, { "identifier": "get_opt", "path": "utils/get_opt.py", "snippet": "def get_opt(opt_path, device, **kwargs):\n opt = Namespace()\n opt_dict = vars(opt)\n\n skip = ('-------------- End ----------------',\n '------------ Options -------------',\n '\\n')\n print('Reading', opt_path)\n with open(opt_path, 'r') as f:\n for line in f:\n if line.strip() not in skip:\n # print(line.strip())\n key, value = line.strip('\\n').split(': ')\n if value in ('True', 'False'):\n opt_dict[key] = (value == 'True')\n # print(key, value)\n elif is_float(value):\n opt_dict[key] = float(value)\n elif is_number(value):\n opt_dict[key] = int(value)\n else:\n opt_dict[key] = str(value)\n\n # print(opt)\n opt_dict['which_epoch'] = 'finest'\n opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)\n opt.model_dir = pjoin(opt.save_root, 'model')\n opt.meta_dir = pjoin(opt.save_root, 'meta')\n\n if opt.dataset_name == 't2m':\n opt.data_root = './dataset/HumanML3D/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 22\n opt.dim_pose = 263\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n elif opt.dataset_name == 'kit':\n opt.data_root = './dataset/KIT-ML/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 21\n opt.dim_pose = 251\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n else:\n raise KeyError('Dataset not recognized')\n if not hasattr(opt, 'unit_length'):\n opt.unit_length = 4\n opt.dim_word = 300\n opt.num_classes = 200 // opt.unit_length\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.is_train = False\n opt.is_continue = False\n opt.device = device\n\n opt_dict.update(kwargs) # Overwrite with kwargs params\n\n return opt" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "t2m_kinematic_chain", "path": "utils/paramUtil.py", "snippet": "" }, { "identifier": "Text2MotionDataset", "path": "data/t2m_dataset.py", "snippet": "class Text2MotionDataset(data.Dataset):\n def __init__(self, opt, mean, std, split_file):\n self.opt = opt\n self.max_length = 20\n self.pointer = 0\n self.max_motion_length = opt.max_motion_length\n min_motion_len = 40 if self.opt.dataset_name =='t2m' else 24\n\n data_dict = {}\n id_list = []\n with cs.open(split_file, 'r') as f:\n for line in f.readlines():\n id_list.append(line.strip())\n # id_list = id_list[:250]\n\n new_name_list = []\n length_list = []\n for name in tqdm(id_list):\n try:\n motion = np.load(pjoin(opt.motion_dir, name + '.npy'))\n if (len(motion)) < min_motion_len or (len(motion) >= 200):\n continue\n text_data = []\n flag = False\n with cs.open(pjoin(opt.text_dir, name + '.txt')) as f:\n for line in f.readlines():\n text_dict = {}\n line_split = line.strip().split('#')\n # print(line)\n caption = line_split[0]\n tokens = line_split[1].split(' ')\n f_tag = float(line_split[2])\n to_tag = float(line_split[3])\n f_tag = 0.0 if np.isnan(f_tag) else f_tag\n to_tag = 0.0 if np.isnan(to_tag) else to_tag\n\n text_dict['caption'] = caption\n text_dict['tokens'] = tokens\n if f_tag == 0.0 and to_tag == 0.0:\n flag = True\n text_data.append(text_dict)\n else:\n try:\n n_motion = motion[int(f_tag*20) : int(to_tag*20)]\n if (len(n_motion)) < min_motion_len or (len(n_motion) >= 200):\n continue\n new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name\n while new_name in data_dict:\n new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name\n data_dict[new_name] = {'motion': n_motion,\n 'length': len(n_motion),\n 'text':[text_dict]}\n new_name_list.append(new_name)\n length_list.append(len(n_motion))\n except:\n print(line_split)\n print(line_split[2], line_split[3], f_tag, to_tag, name)\n # break\n\n if flag:\n data_dict[name] = {'motion': motion,\n 'length': len(motion),\n 'text': text_data}\n new_name_list.append(name)\n length_list.append(len(motion))\n except Exception as e:\n # print(e)\n pass\n\n # name_list, length_list = zip(*sorted(zip(new_name_list, length_list), key=lambda x: x[1]))\n name_list, length_list = new_name_list, length_list\n\n self.mean = mean\n self.std = std\n self.length_arr = np.array(length_list)\n self.data_dict = data_dict\n self.name_list = name_list\n\n def inv_transform(self, data):\n return data * self.std + self.mean\n\n def __len__(self):\n return len(self.data_dict) - self.pointer\n\n def __getitem__(self, item):\n idx = self.pointer + item\n data = self.data_dict[self.name_list[idx]]\n motion, m_length, text_list = data['motion'], data['length'], data['text']\n # Randomly select a caption\n text_data = random.choice(text_list)\n caption, tokens = text_data['caption'], text_data['tokens']\n\n if self.opt.unit_length < 10:\n coin2 = np.random.choice(['single', 'single', 'double'])\n else:\n coin2 = 'single'\n\n if coin2 == 'double':\n m_length = (m_length // self.opt.unit_length - 1) * self.opt.unit_length\n elif coin2 == 'single':\n m_length = (m_length // self.opt.unit_length) * self.opt.unit_length\n idx = random.randint(0, len(motion) - m_length)\n motion = motion[idx:idx+m_length]\n\n \"Z Normalization\"\n motion = (motion - self.mean) / self.std\n\n if m_length < self.max_motion_length:\n motion = np.concatenate([motion,\n np.zeros((self.max_motion_length - m_length, motion.shape[1]))\n ], axis=0)\n # print(word_embeddings.shape, motion.shape)\n # print(tokens)\n return caption, motion, m_length\n\n def reset_min_len(self, length):\n assert length <= self.max_motion_length\n self.pointer = np.searchsorted(self.length_arr, length)\n print(\"Pointer Pointing at %d\" % self.pointer)" }, { "identifier": "get_dataset_motion_loader", "path": "motion_loaders/dataset_motion_loader.py", "snippet": "def get_dataset_motion_loader(opt_path, batch_size, fname, device):\n opt = get_opt(opt_path, device)\n\n # Configurations of T2M dataset and KIT dataset is almost the same\n if opt.dataset_name == 't2m' or opt.dataset_name == 'kit':\n print('Loading dataset %s ...' % opt.dataset_name)\n\n mean = np.load(pjoin(opt.meta_dir, 'mean.npy'))\n std = np.load(pjoin(opt.meta_dir, 'std.npy'))\n\n w_vectorizer = WordVectorizer('./glove', 'our_vab')\n split_file = pjoin(opt.data_root, '%s.txt'%fname)\n dataset = Text2MotionDatasetEval(opt, mean, std, split_file, w_vectorizer)\n dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True,\n collate_fn=collate_fn, shuffle=True)\n else:\n raise KeyError('Dataset not Recognized !!')\n\n print('Ground Truth Dataset Loading Completed!!!')\n return dataloader, dataset" }, { "identifier": "EvaluatorModelWrapper", "path": "models/t2m_eval_wrapper.py", "snippet": "class EvaluatorModelWrapper(object):\n\n def __init__(self, opt):\n\n if opt.dataset_name == 't2m':\n opt.dim_pose = 263\n elif opt.dataset_name == 'kit':\n opt.dim_pose = 251\n else:\n raise KeyError('Dataset not Recognized!!!')\n\n opt.dim_word = 300\n opt.max_motion_length = 196\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.dim_motion_hidden = 1024\n opt.max_text_len = 20\n opt.dim_text_hidden = 512\n opt.dim_coemb_hidden = 512\n\n # print(opt)\n\n self.text_encoder, self.motion_encoder, self.movement_encoder = build_models(opt)\n self.opt = opt\n self.device = opt.device\n\n self.text_encoder.to(opt.device)\n self.motion_encoder.to(opt.device)\n self.movement_encoder.to(opt.device)\n\n self.text_encoder.eval()\n self.motion_encoder.eval()\n self.movement_encoder.eval()\n\n # Please note that the results does not follow the order of inputs\n def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):\n with torch.no_grad():\n word_embs = word_embs.detach().to(self.device).float()\n pos_ohot = pos_ohot.detach().to(self.device).float()\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n\n '''Text Encoding'''\n text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)\n text_embedding = text_embedding[align_idx]\n return text_embedding, motion_embedding\n\n # Please note that the results does not follow the order of inputs\n def get_motion_embeddings(self, motions, m_lens):\n with torch.no_grad():\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n return motion_embedding" } ]
import os import torch import numpy as np from torch.utils.data import DataLoader from os.path import join as pjoin from models.mask_transformer.transformer import ResidualTransformer from models.mask_transformer.transformer_trainer import ResidualTransformerTrainer from models.vq.model import RVQVAE from options.train_option import TrainT2MOptions from utils.plot_script import plot_3d_motion from utils.motion_process import recover_from_ric from utils.get_opt import get_opt from utils.fixseed import fixseed from utils.paramUtil import t2m_kinematic_chain, kit_kinematic_chain from data.t2m_dataset import Text2MotionDataset from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper
12,538
def plot_t2m(data, save_dir, captions, m_lengths): data = train_dataset.inv_transform(data) # print(ep_curves.shape) for i, (caption, joint_data) in enumerate(zip(captions, data)): joint_data = joint_data[:m_lengths[i]]
def plot_t2m(data, save_dir, captions, m_lengths): data = train_dataset.inv_transform(data) # print(ep_curves.shape) for i, (caption, joint_data) in enumerate(zip(captions, data)): joint_data = joint_data[:m_lengths[i]]
joint = recover_from_ric(torch.from_numpy(joint_data).float(), opt.joints_num).numpy()
5
2023-11-29 19:21:27+00:00
16k
dvlab-research/LLMGA
llmga/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py
[ { "identifier": "TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS", "path": "llmga/diffusers/tests/pipelines/pipeline_params.py", "snippet": "TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS = frozenset([\"prompt\", \"image\", \"negative_prompt\"])" }, { "identifier": "TEXT_GUIDED_IMAGE_VARIATION_PARAMS", "path": "llmga/diffusers/tests/pipelines/pipeline_params.py", "snippet": "TEXT_GUIDED_IMAGE_VARIATION_PARAMS = frozenset(\n [\n \"prompt\",\n \"image\",\n \"height\",\n \"width\",\n \"guidance_scale\",\n \"negative_prompt\",\n \"prompt_embeds\",\n \"negative_prompt_embeds\",\n ]\n)" }, { "identifier": "TEXT_TO_IMAGE_IMAGE_PARAMS", "path": "llmga/diffusers/tests/pipelines/pipeline_params.py", "snippet": "TEXT_TO_IMAGE_IMAGE_PARAMS = frozenset([])" }, { "identifier": "PipelineLatentTesterMixin", "path": "llmga/diffusers/tests/pipelines/test_pipelines_common.py", "snippet": "class PipelineLatentTesterMixin:\n \"\"\"\n This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes.\n It provides a set of common tests for PyTorch pipeline that has vae, e.g.\n equivalence of different input and output types, etc.\n \"\"\"\n\n @property\n def image_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `image_params` in the child test class. \"\n \"`image_params` are tested for if all accepted input image types (i.e. `pt`,`pil`,`np`) are producing same results\"\n )\n\n @property\n def image_latents_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `image_latents_params` in the child test class. \"\n \"`image_latents_params` are tested for if passing latents directly are producing same results\"\n )\n\n def get_dummy_inputs_by_type(self, device, seed=0, input_image_type=\"pt\", output_type=\"np\"):\n inputs = self.get_dummy_inputs(device, seed)\n\n def convert_to_pt(image):\n if isinstance(image, torch.Tensor):\n input_image = image\n elif isinstance(image, np.ndarray):\n input_image = VaeImageProcessor.numpy_to_pt(image)\n elif isinstance(image, PIL.Image.Image):\n input_image = VaeImageProcessor.pil_to_numpy(image)\n input_image = VaeImageProcessor.numpy_to_pt(input_image)\n else:\n raise ValueError(f\"unsupported input_image_type {type(image)}\")\n return input_image\n\n def convert_pt_to_type(image, input_image_type):\n if input_image_type == \"pt\":\n input_image = image\n elif input_image_type == \"np\":\n input_image = VaeImageProcessor.pt_to_numpy(image)\n elif input_image_type == \"pil\":\n input_image = VaeImageProcessor.pt_to_numpy(image)\n input_image = VaeImageProcessor.numpy_to_pil(input_image)\n else:\n raise ValueError(f\"unsupported input_image_type {input_image_type}.\")\n return input_image\n\n for image_param in self.image_params:\n if image_param in inputs.keys():\n inputs[image_param] = convert_pt_to_type(\n convert_to_pt(inputs[image_param]).to(device), input_image_type\n )\n\n inputs[\"output_type\"] = output_type\n\n return inputs\n\n def test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4):\n self._test_pt_np_pil_outputs_equivalent(expected_max_diff=expected_max_diff)\n\n def _test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4, input_image_type=\"pt\"):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n output_pt = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"pt\")\n )[0]\n output_np = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"np\")\n )[0]\n output_pil = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"pil\")\n )[0]\n\n max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max()\n self.assertLess(\n max_diff, expected_max_diff, \"`output_type=='pt'` generate different results from `output_type=='np'`\"\n )\n\n max_diff = np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max()\n self.assertLess(max_diff, 2.0, \"`output_type=='pil'` generate different results from `output_type=='np'`\")\n\n def test_pt_np_pil_inputs_equivalent(self):\n if len(self.image_params) == 0:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n out_input_pt = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\"))[0]\n out_input_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"np\"))[0]\n out_input_pil = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pil\"))[0]\n\n max_diff = np.abs(out_input_pt - out_input_np).max()\n self.assertLess(max_diff, 1e-4, \"`input_type=='pt'` generate different result from `input_type=='np'`\")\n max_diff = np.abs(out_input_pil - out_input_np).max()\n self.assertLess(max_diff, 1e-2, \"`input_type=='pt'` generate different result from `input_type=='np'`\")\n\n def test_latents_input(self):\n if len(self.image_latents_params) == 0:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\"))[0]\n\n vae = components[\"vae\"]\n inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\")\n generator = inputs[\"generator\"]\n for image_param in self.image_latents_params:\n if image_param in inputs.keys():\n inputs[image_param] = (\n vae.encode(inputs[image_param]).latent_dist.sample(generator) * vae.config.scaling_factor\n )\n out_latents_inputs = pipe(**inputs)[0]\n\n max_diff = np.abs(out - out_latents_inputs).max()\n self.assertLess(max_diff, 1e-4, \"passing latents as image input generate different result from passing image\")" }, { "identifier": "PipelineTesterMixin", "path": "llmga/diffusers/tests/pipelines/test_pipelines_common.py", "snippet": "class PipelineTesterMixin:\n \"\"\"\n This mixin is designed to be used with unittest.TestCase classes.\n It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline,\n equivalence of dict and tuple outputs, etc.\n \"\"\"\n\n # Canonical parameters that are passed to `__call__` regardless\n # of the type of pipeline. They are always optional and have common\n # sense default values.\n required_optional_params = frozenset(\n [\n \"num_inference_steps\",\n \"num_images_per_prompt\",\n \"generator\",\n \"latents\",\n \"output_type\",\n \"return_dict\",\n \"callback\",\n \"callback_steps\",\n ]\n )\n\n # set these parameters to False in the child class if the pipeline does not support the corresponding functionality\n test_attention_slicing = True\n\n test_xformers_attention = True\n\n def get_generator(self, seed):\n device = torch_device if torch_device != \"mps\" else \"cpu\"\n generator = torch.Generator(device).manual_seed(seed)\n return generator\n\n @property\n def pipeline_class(self) -> Union[Callable, DiffusionPipeline]:\n raise NotImplementedError(\n \"You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_components(self):\n raise NotImplementedError(\n \"You need to implement `get_dummy_components(self)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_inputs(self, device, seed=0):\n raise NotImplementedError(\n \"You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `params` in the child test class. \"\n \"`params` are checked for if all values are present in `__call__`'s signature.\"\n \" You can set `params` using one of the common set of parameters defined in `pipeline_params.py`\"\n \" e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to \"\n \"image pipelines, including prompts and prompt embedding overrides.\"\n \"If your pipeline's set of arguments has minor changes from one of the common sets of arguments, \"\n \"do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline \"\n \"with non-configurable height and width arguments should set the attribute as \"\n \"`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def batch_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `batch_params` in the child test class. \"\n \"`batch_params` are the parameters required to be batched when passed to the pipeline's \"\n \"`__call__` method. `pipeline_params.py` provides some common sets of parameters such as \"\n \"`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's \"\n \"set of batch arguments has minor changes from one of the common sets of batch arguments, \"\n \"do not make modifications to the existing common sets of batch arguments. I.e. a text to \"\n \"image pipeline `negative_prompt` is not batched should set the attribute as \"\n \"`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def tearDown(self):\n # clean up the VRAM after each test in case of CUDA runtime errors\n super().tearDown()\n gc.collect()\n torch.cuda.empty_cache()\n\n def test_save_load_local(self, expected_max_difference=5e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n logger = logging.get_logger(\"diffusers.pipelines.pipeline_utils\")\n logger.setLevel(diffusers.logging.INFO)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n\n with CaptureLogger(logger) as cap_logger:\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n\n for name in pipe_loaded.components.keys():\n if name not in pipe_loaded._optional_components:\n assert name in str(cap_logger)\n\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_pipeline_call_signature(self):\n self.assertTrue(\n hasattr(self.pipeline_class, \"__call__\"), f\"{self.pipeline_class} should have a `__call__` method\"\n )\n\n parameters = inspect.signature(self.pipeline_class.__call__).parameters\n\n optional_parameters = set()\n\n for k, v in parameters.items():\n if v.default != inspect._empty:\n optional_parameters.add(k)\n\n parameters = set(parameters.keys())\n parameters.remove(\"self\")\n parameters.discard(\"kwargs\") # kwargs can be added if arguments of pipeline call function are deprecated\n\n remaining_required_parameters = set()\n\n for param in self.params:\n if param not in parameters:\n remaining_required_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_parameters) == 0,\n f\"Required parameters not present: {remaining_required_parameters}\",\n )\n\n remaining_required_optional_parameters = set()\n\n for param in self.required_optional_params:\n if param not in optional_parameters:\n remaining_required_optional_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_optional_parameters) == 0,\n f\"Required optional parameters not present: {remaining_required_optional_parameters}\",\n )\n\n def test_inference_batch_consistent(self, batch_sizes=[2]):\n self._test_inference_batch_consistent(batch_sizes=batch_sizes)\n\n def _test_inference_batch_consistent(\n self, batch_sizes=[2], additional_params_copy_to_batched_inputs=[\"num_inference_steps\"]\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # prepare batched inputs\n batched_inputs = []\n for batch_size in batch_sizes:\n batched_input = {}\n batched_input.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n # make unequal batch sizes\n batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n\n # make last batch super long\n batched_input[name][-1] = 100 * \"very long\"\n\n else:\n batched_input[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_input[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_input[\"batch_size\"] = batch_size\n\n batched_inputs.append(batched_input)\n\n logger.setLevel(level=diffusers.logging.WARNING)\n for batch_size, batched_input in zip(batch_sizes, batched_inputs):\n output = pipe(**batched_input)\n assert len(output[0]) == batch_size\n\n def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4):\n self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff)\n\n def _test_inference_batch_single_identical(\n self,\n batch_size=2,\n expected_max_diff=1e-4,\n additional_params_copy_to_batched_inputs=[\"num_inference_steps\"],\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for components in pipe.components.values():\n if hasattr(components, \"set_default_attn_processor\"):\n components.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is has been used in self.get_dummy_inputs\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # batchify inputs\n batched_inputs = {}\n batched_inputs.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n batched_inputs[name][-1] = 100 * \"very long\"\n\n else:\n batched_inputs[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_inputs[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_inputs[\"batch_size\"] = batch_size\n\n for arg in additional_params_copy_to_batched_inputs:\n batched_inputs[arg] = inputs[arg]\n\n output = pipe(**inputs)\n output_batch = pipe(**batched_inputs)\n\n assert output_batch[0].shape[0] == batch_size\n\n max_diff = np.abs(output_batch[0][0] - output[0][0]).max()\n assert max_diff < expected_max_diff\n\n def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n output = pipe(**self.get_dummy_inputs(generator_device))[0]\n output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_tuple)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_components_function(self):\n init_components = self.get_dummy_components()\n init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))}\n\n pipe = self.pipeline_class(**init_components)\n\n self.assertTrue(hasattr(pipe, \"components\"))\n self.assertTrue(set(pipe.components.keys()) == set(init_components.keys()))\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_float16_inference(self, expected_max_diff=5e-2):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n components = self.get_dummy_components()\n pipe_fp16 = self.pipeline_class(**components)\n for component in pipe_fp16.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe_fp16.to(torch_device, torch.float16)\n pipe_fp16.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in inputs:\n inputs[\"generator\"] = self.get_generator(0)\n\n output = pipe(**inputs)[0]\n\n fp16_inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in fp16_inputs:\n fp16_inputs[\"generator\"] = self.get_generator(0)\n\n output_fp16 = pipe_fp16(**fp16_inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_fp16)).max()\n self.assertLess(max_diff, expected_max_diff, \"The outputs of the fp16 and fp32 pipelines are too different.\")\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_save_load_float16(self, expected_max_diff=1e-2):\n components = self.get_dummy_components()\n for name, module in components.items():\n if hasattr(module, \"half\"):\n components[name] = module.to(torch_device).half()\n\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for name, component in pipe_loaded.components.items():\n if hasattr(component, \"dtype\"):\n self.assertTrue(\n component.dtype == torch.float16,\n f\"`{name}.dtype` switched from `float16` to {component.dtype} after loading.\",\n )\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(\n max_diff, expected_max_diff, \"The output of the fp16 pipeline changed after saving and loading.\"\n )\n\n def test_save_load_optional_components(self, expected_max_difference=1e-4):\n if not hasattr(self.pipeline_class, \"_optional_components\"):\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n # set all optional components to None\n for optional_component in pipe._optional_components:\n setattr(pipe, optional_component, None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for optional_component in pipe._optional_components:\n self.assertTrue(\n getattr(pipe_loaded, optional_component) is None,\n f\"`{optional_component}` did not stay set to None after loading.\",\n )\n\n inputs = self.get_dummy_inputs(generator_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"CUDA and CPU are required to switch devices\")\n def test_to_device(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n pipe.to(\"cpu\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cpu\" for device in model_devices))\n\n output_cpu = pipe(**self.get_dummy_inputs(\"cpu\"))[0]\n self.assertTrue(np.isnan(output_cpu).sum() == 0)\n\n pipe.to(\"cuda\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cuda\" for device in model_devices))\n\n output_cuda = pipe(**self.get_dummy_inputs(\"cuda\"))[0]\n self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)\n\n def test_to_dtype(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes))\n\n pipe.to(torch_dtype=torch.float16)\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes))\n\n def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3):\n self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff)\n\n def _test_attention_slicing_forward_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3\n ):\n if not self.test_attention_slicing:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_slicing = pipe(**inputs)[0]\n\n pipe.enable_attention_slicing(slice_size=1)\n inputs = self.get_dummy_inputs(generator_device)\n output_with_slicing = pipe(**inputs)[0]\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_slicing) - to_np(output_without_slicing)).max()\n self.assertLess(max_diff, expected_max_diff, \"Attention slicing should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(output_with_slicing[0], output_without_slicing[0])\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.14.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.14.0` or higher\",\n )\n def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_sequential_cpu_offload()\n\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.17.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.17.0` or higher\",\n )\n def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):\n generator_device = \"cpu\"\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_model_cpu_offload()\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_xformers_available(),\n reason=\"XFormers attention is only available with CUDA and `xformers` installed\",\n )\n def test_xformers_attention_forwardGenerator_pass(self):\n self._test_xformers_attention_forwardGenerator_pass()\n\n def _test_xformers_attention_forwardGenerator_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4\n ):\n if not self.test_xformers_attention:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_without_offload = pipe(**inputs)[0]\n output_without_offload = (\n output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload\n )\n\n pipe.enable_xformers_memory_efficient_attention()\n inputs = self.get_dummy_inputs(torch_device)\n output_with_offload = pipe(**inputs)[0]\n output_with_offload = (\n output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload\n )\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"XFormers attention should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0])\n\n def test_progress_bar(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n\n inputs = self.get_dummy_inputs(torch_device)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n stderr = stderr.getvalue()\n # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,\n # so we just match \"5\" in \"#####| 1/5 [00:01<00:00]\"\n max_steps = re.search(\"/(.*?) \", stderr).group(1)\n self.assertTrue(max_steps is not None and len(max_steps) > 0)\n self.assertTrue(\n f\"{max_steps}/{max_steps}\" in stderr, \"Progress bar should be enabled and stopped at the max step\"\n )\n\n pipe.set_progress_bar_config(disable=True)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n self.assertTrue(stderr.getvalue() == \"\", \"Progress bar should be disabled\")\n\n def test_num_images_per_prompt(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"num_images_per_prompt\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n batch_sizes = [1, 2]\n num_images_per_prompts = [1, 2]\n\n for batch_size in batch_sizes:\n for num_images_per_prompt in num_images_per_prompts:\n inputs = self.get_dummy_inputs(torch_device)\n\n for key in inputs.keys():\n if key in self.batch_params:\n inputs[key] = batch_size * [inputs[key]]\n\n images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]\n\n assert images.shape[0] == batch_size * num_images_per_prompt\n\n def test_cfg(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"guidance_scale\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n\n inputs[\"guidance_scale\"] = 1.0\n out_no_cfg = pipe(**inputs)[0]\n\n inputs[\"guidance_scale\"] = 7.5\n out_cfg = pipe(**inputs)[0]\n\n assert out_cfg.shape == out_no_cfg.shape" }, { "identifier": "assert_mean_pixel_difference", "path": "llmga/diffusers/tests/pipelines/test_pipelines_common.py", "snippet": "def assert_mean_pixel_difference(image, expected_image, expected_max_diff=10):\n image = np.asarray(DiffusionPipeline.numpy_to_pil(image)[0], dtype=np.float32)\n expected_image = np.asarray(DiffusionPipeline.numpy_to_pil(expected_image)[0], dtype=np.float32)\n avg_diff = np.abs(image - expected_image).mean()\n assert avg_diff < expected_max_diff, f\"Error image deviates {avg_diff} pixels on average\"" } ]
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DDPMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, StableDiffusionPix2PixZeroPipeline, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, load_pt, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, )
11,361
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = sd_pipe.invert(**inputs).images image_slice = image[1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.6046, 0.5400, 0.4902, 0.4448, 0.4694, 0.5498, 0.4857, 0.5073, 0.5089]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4863, 0.5053, 0.5033, 0.4007, 0.3571, 0.4768, 0.5176, 0.5277, 0.4940]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5177, 0.5097, 0.5047, 0.4076, 0.3667, 0.4767, 0.5238, 0.5307, 0.4958]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5421, 0.5525, 0.6085, 0.5279, 0.4658, 0.5317, 0.4418, 0.4815, 0.5132]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_ddpm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDPMScheduler() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4861, 0.5053, 0.5038, 0.3994, 0.3562, 0.4768, 0.5172, 0.5280, 0.4938]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_outputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) output_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pt")).images output_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="np")).images output_pil = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pil")).images max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess(max_diff, 1e-4, "`output_type=='pt'` generate different results from `output_type=='np'`") max_diff = np.abs(np.array(output_pil[0]) - (output_np[0] * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_inputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) out_input_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pt")).images out_input_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="np")).images out_input_pil = sd_pipe.invert( **self.get_dummy_inversion_inputs_by_type(device, input_image_type="pil") ).images max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`")
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. enable_full_determinism() @skip_mps class StableDiffusionPix2PixZeroPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionPix2PixZeroPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"image"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def setUpClass(cls): cls.source_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/src_emb_0.pt" ) cls.target_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/tgt_emb_0.pt" ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler() inverse_scheduler = DDIMInverseScheduler() torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "inverse_scheduler": inverse_scheduler, "caption_generator": None, "caption_processor": None, } return components def get_dummy_inputs(self, device, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "cross_attention_guidance_amount": 0.15, "source_embeds": self.source_embeds, "target_embeds": self.target_embeds, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs(self, device, seed=0): dummy_image = floats_tensor((2, 3, 32, 32), rng=random.Random(seed)).to(torch_device) dummy_image = dummy_image / 2 + 0.5 generator = torch.manual_seed(seed) inputs = { "prompt": [ "A painting of a squirrel eating a burger", "A painting of a burger eating a squirrel", ], "image": dummy_image.cpu(), "num_inference_steps": 2, "guidance_scale": 6.0, "generator": generator, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"): inputs = self.get_dummy_inversion_inputs(device, seed) if input_image_type == "pt": image = inputs["image"] elif input_image_type == "np": image = VaeImageProcessor.pt_to_numpy(inputs["image"]) elif input_image_type == "pil": image = VaeImageProcessor.pt_to_numpy(inputs["image"]) image = VaeImageProcessor.numpy_to_pil(image) else: raise ValueError(f"unsupported input_image_type {input_image_type}") inputs["image"] = image inputs["output_type"] = output_type return inputs def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) def test_stable_diffusion_pix2pix_zero_inversion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) inputs["image"] = inputs["image"][:1] inputs["prompt"] = inputs["prompt"][:1] image = sd_pipe.invert(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4732, 0.4630, 0.5722, 0.5103, 0.5140, 0.5622, 0.5104, 0.5390, 0.5020]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = sd_pipe.invert(**inputs).images image_slice = image[1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.6046, 0.5400, 0.4902, 0.4448, 0.4694, 0.5498, 0.4857, 0.5073, 0.5089]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4863, 0.5053, 0.5033, 0.4007, 0.3571, 0.4768, 0.5176, 0.5277, 0.4940]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5177, 0.5097, 0.5047, 0.4076, 0.3667, 0.4767, 0.5238, 0.5307, 0.4958]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5421, 0.5525, 0.6085, 0.5279, 0.4658, 0.5317, 0.4418, 0.4815, 0.5132]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_ddpm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDPMScheduler() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4861, 0.5053, 0.5038, 0.3994, 0.3562, 0.4768, 0.5172, 0.5280, 0.4938]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_outputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) output_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pt")).images output_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="np")).images output_pil = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pil")).images max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess(max_diff, 1e-4, "`output_type=='pt'` generate different results from `output_type=='np'`") max_diff = np.abs(np.array(output_pil[0]) - (output_np[0] * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_inputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) out_input_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pt")).images out_input_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="np")).images out_input_pil = sd_pipe.invert( **self.get_dummy_inversion_inputs_by_type(device, input_image_type="pil") ).images max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`")
assert_mean_pixel_difference(out_input_pil, out_input_np, expected_max_diff=1)
5
2023-11-27 18:46:55+00:00
16k
sherwinbahmani/4dfy
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference']\n finite_difference_normal_eps: float = 0.01\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n mesh = trimesh.load(mesh_path)\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if self.cfg.normal_type == \"finite_difference\":\n eps = self.cfg.finite_difference_normal_eps\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(points_offset)\n normal = (\n 0.5 * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0]) / eps\n )\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n anneal_density_blob_std_config: Optional[dict] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n self.density_blob_std = self.cfg.density_blob_std\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if self.cfg.normal_type == \"finite_difference\":\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )\n \n def update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ) -> None:\n if self.cfg.anneal_density_blob_std_config is not None:\n min_step = self.cfg.anneal_density_blob_std_config.min_anneal_step\n max_step = self.cfg.anneal_density_blob_std_config.max_anneal_step\n if (\n global_step >= min_step\n and global_step <= max_step\n ): \n end_val = self.cfg.anneal_density_blob_std_config.end_val\n start_val = self.cfg.anneal_density_blob_std_config.start_val\n self.density_blob_std = start_val + (global_step - min_step)*(end_val - start_val)/(max_step - min_step)" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n vn_idx[i] = self.t_nrm_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config)\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
12,966
isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
2
2023-11-29 05:15:56+00:00
16k
rlawjdghek/StableVITON
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.lossconfig = lossconfig\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = torch.nn.Identity()\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n def init_loss(self):\n self.loss = instantiate_from_config(self.lossconfig)\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx):\n real_img = self.get_input(batch, self.image_key)\n recon, posterior = self(real_img)\n loss = self.loss(real_img, recon, posterior)\n return loss\n \n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.decoder.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n print(f\"beta scheduler name : {schedule}\")\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates, cond_output_dict = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates, cond_output_dict\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0, cond_output_dict = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n if cond_output_dict is not None:\n cond_output = cond_output_dict[\"cond_output\"] \n if self.model.use_noisy_cond:\n b = cond_output.shape[0]\n\n alphas = self.model.alphas_cumprod if ddim_use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if ddim_use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if ddim_use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if ddim_use_original_steps else self.ddim_sigmas\n\n device = cond_output.device\n a_t = torch.full((b, 1, 1, 1), alphas[0], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[0], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[0], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[0], device=device)\n\n c = cond_output_dict[\"cond_input\"]\n e_t = cond_output\n pred_c0 = (c - sqrt_one_minus_at * e_t) / a_t.sqrt()\n dir_ct = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(c.shape, device, False) * temperature\n\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n cond_output = a_prev.sqrt() * pred_c0 + dir_ct + noise \n cond_output_dict[f\"cond_sample\"] = cond_output\n return img, intermediates, cond_output_dict\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output, cond_output_dict = self.model.apply_model(x, t, c)\n else:\n # x_in = torch.cat([x] * 2)\n # t_in = torch.cat([t] * 2)\n # if isinstance(c, dict):\n # assert isinstance(unconditional_conditioning, dict)\n # c_in = dict()\n # for k in c:\n # if isinstance(c[k], list):\n # c_in[k] = [torch.cat([\n # unconditional_conditioning[k][i],\n # c[k][i]]) for i in range(len(c[k]))]\n # else:\n # c_in[k] = torch.cat([\n # unconditional_conditioning[k],\n # c[k]])\n # elif isinstance(c, list):\n # c_in = list()\n # assert isinstance(unconditional_conditioning, list)\n # for i in range(len(c)):\n # c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n # else:\n # c_in = torch.cat([unconditional_conditioning, c])\n x_in = x\n t_in = t\n model_t, cond_output_dict_cond = self.model.apply_model(x_in, t_in, c)\n model_uncond, cond_output_dict_uncond = self.model.apply_model(x_in, t_in, unconditional_conditioning)\n if isinstance(model_t, tuple):\n model_t, _ = model_t\n if isinstance(model_uncond, tuple):\n model_uncond, _ = model_uncond\n if cond_output_dict_cond is not None:\n cond_output_dict = dict()\n for k in cond_output_dict_cond.keys():\n cond_output_dict[k] = torch.cat([cond_output_dict_uncond[k], cond_output_dict_cond[k]])\n else:\n cond_output_dict = None\n # model_output, cond_output_dict = self.model.apply_model(x_in, t_in, c_in)\n # model_uncond, model_t = model_output.chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0, cond_output_dict\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)[0]\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)[0]\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torchvision.transforms as T import random import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from torchvision.transforms.functional import resize from diffusers.models.autoencoder_kl import AutoencoderKLOutput from diffusers.models.vae import DecoderOutput from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like, zero_module, conv_nd from ldm.models.diffusion.ddim import DDIMSampler
12,922
else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, l_cond_simple_weight=1.0, l_cond_recon_weight=1.0, **kwargs ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.unet_config = unet_config self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.imagenet_norm = T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.l_cond_simple_weight = l_cond_simple_weight self.l_cond_recon_weight = l_cond_recon_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}_loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}_loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}_loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): self.batch = batch for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.kwargs = kwargs self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std self.cond_stage_trainable = cond_stage_trainable assert self.num_timesteps_cond <= kwargs['timesteps'] if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None if self.kwargs["use_imageCLIP"]: self.proj_out = nn.Linear(1024, 768) else: self.proj_out = None if self.use_pbe_weight: print("learnable vector gene") self.learnable_vector = nn.Parameter(torch.randn((1,1,768)), requires_grad=True) else: self.learnable_vector = None if self.kwargs["use_lastzc"]: # deprecated self.lastzc = zero_module(conv_nd(2, 4, 4, 1, 1, 0)) else: self.lastzc = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None else: model = instantiate_from_config(config) self.cond_stage_model = model else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior elif isinstance(encoder_posterior, AutoencoderKLOutput): z = encoder_posterior.latent_dist.sample() else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False, no_latent=False, is_controlnet=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) if no_latent: _,_,h,w = x.shape x = resize(x, (h//8, w//8)) return [x, None] encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if is_controlnet and self.lastzc is not None: z = self.lastzc(z) if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if self.kwargs["use_imageCLIP"]: xc = resize(xc, (224,224)) xc = self.imagenet_norm((xc+1)/2) c = xc else: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) c = c.float() else: if self.kwargs["use_imageCLIP"]: xc = resize(xc, (224,224)) xc = self.imagenet_norm((xc+1)/2) c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z output = self.first_stage_model.decode(z) if not isinstance(output, DecoderOutput): return output else: return output.sample def decode_first_stage_train(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): if not self.use_pbe_weight: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) # pbe negative condition else: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() self.u_cond_prop=random.uniform(0, 1) c["c_crossattn"] = [self.get_learned_conditioning(c["c_crossattn"])] if self.u_cond_prop < self.u_cond_percent: c["c_crossattn"] = [self.learnable_vector.repeat(x.shape[0],1,1)] return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): loss_dict = {} noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output, cond_output_dict = self.apply_model(x_noisy, t, cond) prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() model_loss = None if isinstance(model_output, tuple): model_output, model_loss = model_output if self.only_agn_simple_loss: _, _, l_h, l_w = model_output.shape m_agn = F.interpolate(super().get_input(self.batch, "agn_mask"), (l_h, l_w)) loss_simple = self.get_loss(model_output * (1-m_agn), target * (1-m_agn), mean=False).mean([1, 2, 3]) else: loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() if self.original_elbo_weight != 0: loss_dict.update({f'loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) if model_loss is not None: loss += model_loss loss_dict.update({f"model loss" : model_loss}) loss_dict.update({f'{prefix}_loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out, cond_output_dict = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if isinstance(model_out, tuple): model_out, _ = model_out if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
0
2023-12-02 05:56:58+00:00
16k
AIFSH/NativeSpeaker
src/core.py
[ { "identifier": "HandleLog", "path": "src/log_helper.py", "snippet": "class HandleLog:\n \"\"\"\n 先创建日志记录器(logging.getLogger),然后再设置日志级别(logger.setLevel),\n 接着再创建日志文件,也就是日志保存的地方(logging.FileHandler),然后再设置日志格式(logging.Formatter),\n 最后再将日志处理程序记录到记录器(addHandler)\n \"\"\"\n\n def __init__(self):\n self.__now_time = datetime.now().strftime('%Y-%m-%d') # 当前日期格式化\n self.__all_log_path = os.path.join(log_path, self.__now_time + \"-all\" + \".log\") # 收集所有日志信息文件\n self.__error_log_path = os.path.join(log_path, self.__now_time + \"-error\" + \".log\") # 收集错误日志信息文件\n self.__logger = logging.getLogger() # 创建日志记录器\n self.__logger.setLevel(logging.DEBUG) # 设置默认日志记录器记录级别\n\n @staticmethod\n def __init_logger_handler(log_path):\n \"\"\"\n 创建日志记录器handler,用于收集日志\n :param log_path: 日志文件路径\n :return: 日志记录器\n \"\"\"\n # 写入文件,如果文件超过1M大小时,切割日志文件,仅保留3个文件\n logger_handler = RotatingFileHandler(filename=log_path, maxBytes=1 * 1024 * 1024, backupCount=3, encoding='utf-8')\n return logger_handler\n\n @staticmethod\n def __init_console_handle():\n \"\"\"创建终端日志记录器handler,用于输出到控制台\"\"\"\n console_handle = colorlog.StreamHandler()\n return console_handle\n\n def __set_log_handler(self, logger_handler, level=logging.DEBUG):\n \"\"\"\n 设置handler级别并添加到logger收集器\n :param logger_handler: 日志记录器\n :param level: 日志记录器级别\n \"\"\"\n logger_handler.setLevel(level=level)\n self.__logger.addHandler(logger_handler)\n\n def __set_color_handle(self, console_handle):\n \"\"\"\n 设置handler级别并添加到终端logger收集器\n :param console_handle: 终端日志记录器\n :param level: 日志记录器级别\n \"\"\"\n console_handle.setLevel(logging.DEBUG)\n self.__logger.addHandler(console_handle)\n\n @staticmethod\n def __set_color_formatter(console_handle, color_config):\n \"\"\"\n 设置输出格式-控制台\n :param console_handle: 终端日志记录器\n :param color_config: 控制台打印颜色配置信息\n :return:\n \"\"\"\n formatter = colorlog.ColoredFormatter(default_formats[\"color_format\"], log_colors=color_config)\n console_handle.setFormatter(formatter)\n\n @staticmethod\n def __set_log_formatter(file_handler):\n \"\"\"\n 设置日志输出格式-日志文件\n :param file_handler: 日志记录器\n \"\"\"\n formatter = logging.Formatter(default_formats[\"log_format\"], datefmt='%a, %d %b %Y %H:%M:%S')\n file_handler.setFormatter(formatter)\n\n @staticmethod\n def __close_handler(file_handler):\n \"\"\"\n 关闭handler\n :param file_handler: 日志记录器\n \"\"\"\n file_handler.close()\n\n def __console(self, level, message):\n \"\"\"构造日志收集器\"\"\"\n all_logger_handler = self.__init_logger_handler(self.__all_log_path) # 创建日志文件\n error_logger_handler = self.__init_logger_handler(self.__error_log_path)\n console_handle = self.__init_console_handle()\n\n self.__set_log_formatter(all_logger_handler) # 设置日志格式\n self.__set_log_formatter(error_logger_handler)\n self.__set_color_formatter(console_handle, log_colors_config)\n\n self.__set_log_handler(all_logger_handler) # 设置handler级别并添加到logger收集器\n self.__set_log_handler(error_logger_handler, level=logging.ERROR)\n self.__set_color_handle(console_handle)\n\n if level == 'info':\n self.__logger.info(message)\n elif level == 'debug':\n self.__logger.debug(message)\n elif level == 'warning':\n self.__logger.warning(message)\n elif level == 'error':\n self.__logger.error(message)\n elif level == 'critical':\n self.__logger.critical(message)\n\n self.__logger.removeHandler(all_logger_handler) # 避免日志输出重复问题\n self.__logger.removeHandler(error_logger_handler)\n self.__logger.removeHandler(console_handle)\n\n self.__close_handler(all_logger_handler) # 关闭handler\n self.__close_handler(error_logger_handler)\n\n def debug(self, message):\n self.__console('debug', message)\n\n def info(self, message):\n self.__console('info', message)\n\n def warning(self, message):\n self.__console('warning', message)\n\n def error(self, message):\n self.__console('error', message)\n\n def critical(self, message):\n self.__console('critical', message)" }, { "identifier": "AudioProcess", "path": "src/audio_bgm_split.py", "snippet": "class AudioProcess:\n def __init__(self, agg, is_half=False, tta=False):\n\n # model_path = os.path.join('weights', 'HP5-主旋律人声vocals+其他instrumentals.pth')\n model_path = load_file_from_url(url=\"https://hf-mirror.com/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-%E4%B8%BB%E6%97%8B%E5%BE%8B%E4%BA%BA%E5%A3%B0vocals%2B%E5%85%B6%E4%BB%96instrumentals.pth?download=true\", \n model_dir='weights', progress=True, file_name=\"HP5-主旋律人声vocals+其他instrumentals.pth\")\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.data = {\n # Processing Options\n \"postprocess\": False,\n \"tta\": tta,\n # Constants\n \"window_size\": 512,\n \"agg\": agg,\n \"high_end_process\": \"mirroring\",\n }\n mp = ModelParameters(\"src/third_part/uvr5_pack/lib_v5/modelparams/4band_v2.json\")\n model = Nets.CascadedASPPNet(mp.param[\"bins\"] * 2)\n cpk = torch.load(model_path, map_location=\"cpu\")\n model.load_state_dict(cpk)\n model.eval()\n if is_half:\n model = model.half().to(self.device)\n else:\n model = model.to(self.device)\n\n self.mp = mp\n self.model = model\n\n def split(self, music_file):\n \n X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}\n bands_n = len(self.mp.param[\"band\"])\n # print(bands_n)\n for d in range(bands_n, 0, -1):\n bp = self.mp.param[\"band\"][d]\n if d == bands_n: # high-end band\n (\n X_wave[d],\n _,\n ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑\n path=music_file,\n sr=bp[\"sr\"],\n mono=False,\n dtype=np.float32,\n res_type=bp[\"res_type\"],\n )\n if X_wave[d].ndim == 1:\n X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])\n else: # lower bands\n X_wave[d] = librosa.core.resample(\n y=X_wave[d + 1],\n orig_sr=self.mp.param[\"band\"][d + 1][\"sr\"],\n target_sr=bp[\"sr\"],\n res_type=bp[\"res_type\"],\n )\n # Stft of wave source\n X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(\n X_wave[d],\n bp[\"hl\"],\n bp[\"n_fft\"],\n self.mp.param[\"mid_side\"],\n self.mp.param[\"mid_side_b2\"],\n self.mp.param[\"reverse\"],\n )\n # pdb.set_trace()\n if d == bands_n and self.data[\"high_end_process\"] != \"none\":\n input_high_end_h = (bp[\"n_fft\"] // 2 - bp[\"crop_stop\"]) + (\n self.mp.param[\"pre_filter_stop\"] - self.mp.param[\"pre_filter_start\"]\n )\n input_high_end = X_spec_s[d][\n :, bp[\"n_fft\"] // 2 - input_high_end_h : bp[\"n_fft\"] // 2, :\n ]\n\n X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)\n aggresive_set = float(self.data[\"agg\"] / 100)\n aggressiveness = {\n \"value\": aggresive_set,\n \"split_bin\": self.mp.param[\"band\"][1][\"crop_stop\"],\n }\n with torch.no_grad():\n pred, X_mag, X_phase = inference(\n X_spec_m, self.device, self.model, aggressiveness, self.data\n )\n # Postprocess\n if self.data[\"postprocess\"]:\n pred_inv = np.clip(X_mag - pred, 0, np.inf)\n pred = spec_utils.mask_silence(pred, pred_inv)\n y_spec_m = pred * X_phase\n v_spec_m = X_spec_m - y_spec_m\n\n \n if self.data[\"high_end_process\"].startswith(\"mirroring\"):\n input_high_end_y = spec_utils.mirroring(\n self.data[\"high_end_process\"], y_spec_m, input_high_end, self.mp\n )\n wav_instrument = spec_utils.cmb_spectrogram_to_wave(\n y_spec_m, self.mp, input_high_end_h, input_high_end_y\n )\n \n input_high_end_v = spec_utils.mirroring(\n self.data[\"high_end_process\"], v_spec_m, input_high_end, self.mp\n )\n wav_vocals = spec_utils.cmb_spectrogram_to_wave(\n v_spec_m, self.mp, input_high_end_h, input_high_end_v\n )\n \n else:\n wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)\n wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)\n \n logger.info(\"vocal and instruments split done\")\n \n temp_manager = TempFileManager()\n voice_temp_file = temp_manager.create_temp_file(suffix='.wav')\n noise_temp_file = temp_manager.create_temp_file(suffix='.wav')\n \n sf.write(\n voice_temp_file,\n (np.array(wav_vocals) * 32768).astype(\"int16\"),\n self.mp.param[\"sr\"],\n )\n sf.write(\n noise_temp_file,\n (np.array(wav_instrument) * 32768).astype(\"int16\"),\n self.mp.param[\"sr\"],\n )\n return voice_temp_file.name, noise_temp_file.name" }, { "identifier": "VoiceCloner", "path": "src/voice_clone.py", "snippet": "class VoiceCloner:\n\n def __init__(self, version_name=\"v2.0.3\") -> None:\n self.temp_manager = TempFileManager()\n root_path = os.path.join('weights',f\"xtts_{version_name}\")\n config_path = load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/config.json?download=true\",\n model_dir=root_path,\n file_name=\"config.json\")\n load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/model.pth?download=true\",\n model_dir=root_path,\n file_name=\"model.pth\")\n load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/vocab.json?download=true\",\n model_dir=root_path,\n file_name=\"vocab.json\")\n load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/hash.md5?download=true\",\n model_dir=root_path,\n file_name=\"hash.md5\")\n # model_path = f\"{root_path}/model.pth\"\n # logger.info(f'model_path:{model_path}')\n self.tts = TTS(model_path=root_path,config_path=config_path,gpu=True)\n \n def __call__(self, text, lang_code, speaker_wav,speed=1.0,*args: Any, **kwds: Any) -> Any:\n temp_file = self.temp_manager.create_temp_file(suffix='.wav').name\n self.tts.tts_to_file(text=text,\n language=lang_code,\n speaker_wav=speaker_wav,\n speed=speed,\n file_path=temp_file)\n return temp_file" }, { "identifier": "TempFileManager", "path": "src/temp_manager.py", "snippet": "class TempFileManager:\n _instance = None\n temp_files = []\n\n def __new__(cls):\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n atexit.register(cls.cleanup)\n return cls._instance\n\n def create_temp_file(self, suffix):\n temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)\n self.temp_files.append(temp_file.name)\n return temp_file\n\n @classmethod\n def cleanup(cls):\n for temp_file in cls.temp_files:\n try:\n # Remove the temporary file\n os.remove(temp_file)\n except OSError:\n pass" }, { "identifier": "Translator", "path": "src/translator.py", "snippet": "class Translator:\n\n def __init__(self,work_with_human=False) -> None:\n # _ = ts.preaccelerate_and_speedtest()\n self.work_with_human = work_with_human\n\n def __call__(self,text,from_lang,to_lang,*args: Any, **kwds: Any) -> Any:\n assert from_lang != to_lang,\"same lang code error,translator only work in language to another language\"\n if self.work_with_human:\n lience = input(\"!!!注意,出现这个提示是因为您自行修改了相关代码,请不要做偏离原文内容的手工翻译,否则后果自负,与该项目开源作者无关!我已经阅读并同意该声明。\\n(!!!Attention!This prompt appears because you modified the code yourself,Please do not deviate from the original content of manual translation, or bear the consequences,It has nothing to do with the author of this project! I have read and agree with the statement)\\t yes | no:\\n\").strip()\n if \"y\" not in lience:\n self.work_with_human = False\n \n if \"zh\" in to_lang:\n to_lang = \"zh\"\n logger.info(f\"{from_lang} {to_lang} {text} \")\n try:\n dst_text = ts.translate_text(query_text=text,translator=\"qqTranSmart\",\n from_language=from_lang,to_language=to_lang)\n except ts.server.TranslatorError:\n dst_text = input(\"translator failed,input by self:\")\n dst_text = dst_text.strip()\n return dst_text\n logger.info(\"dst_text:{}\".format(dst_text))\n if self.work_with_human:\n if_by_hand = input(\"translate by hand? 1 by hand, 0 pass:\\t\")\n if if_by_hand == \"1\":\n dst_text = input(\"input by hand:\\n\").strip()\n logger.info(f\"dst_text edited:{dst_text}\")\n\n return dst_text" }, { "identifier": "LipSync", "path": "src/lipsync.py", "snippet": "class LipSync:\n def __init__(self,model_name) -> None:\n self.model_name = model_name\n self.img_size = 96\n self.static = False\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.face_det_batch_size = 16\n self.wav2lip_batch_size = 16\n self.mel_step_size = 16\n self.pads = [0,20,0,0]\n self.nosmooth = True\n self.box = [-1, -1, -1, -1]\n self.fps = 25\n self.resize_factor = 2\n self.rotate = False\n self.crop = [0, -1, 0, -1]\n logger.info('Using {} for inference.'.format(self.device))\n\n load_file_from_url(url=\"https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth\",\n model_dir=\"src/third_part/wav2lip/face_detection/detection/sfd\",\n file_name=\"s3fd.pth\")\n load_file_from_url(url=\"https://hf-mirror.com/MarjorieSaul/wav2lip_sd_models/resolve/main/wav2lip.pth?download=true\",\n model_dir=\"weights\",\n file_name=\"wav2lip.pth\")\n load_file_from_url(url=\"https://hf-mirror.com/MarjorieSaul/wav2lip_sd_models/resolve/main/wav2lip_gan.pth?download=true\",\n model_dir=\"weights\",\n file_name=\"wav2lip_gan.pth\")\n self.tmp_manager = TempFileManager()\n \n\n def __call__(self, face,audio,outfile,voice,*args: Any, **kwds: Any) -> Any:\n if os.path.isfile(face) and face.split('.')[1] in ['jpg', 'png', 'jpeg']:\n self.static = True\n if not os.path.isfile(face):\n raise ValueError('face argument must be a valid path to video/image file')\n elif face.split('.')[1] in ['jpg', 'png', 'jpeg']:\n full_frames = [cv2.imread(face)]\n fps = self.fps\n else:\n video_stream = cv2.VideoCapture(face)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n logger.info('Reading video frames...')\n\n full_frames = []\n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break\n if self.resize_factor > 1:\n frame = cv2.resize(frame, (frame.shape[1]//self.resize_factor, frame.shape[0]//self.resize_factor))\n\n if self.rotate:\n frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)\n \n y1, y2, x1, x2 = self.crop\n if x2 == -1: x2 = frame.shape[1]\n if y2 == -1: y2 = frame.shape[0]\n frame = frame[y1:y2, x1:x2]\n full_frames.append(frame)\n\n logger.info(\"Number of frames available for inference: \"+str(len(full_frames)))\n\n assert audio.endswith('.wav'),\"audio file shoud end with .wav\"\n\n wav = load_wav(audio, sr=16000)\n mel = melspectrogram(wav)\n\n if np.isnan(mel.reshape(-1)).sum() > 0:\n raise ValueError('Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again')\n \n mel_chunks = []\n mel_idx_multiplier = 80./fps\n i = 0\n while 1:\n start_idx = int(i * mel_idx_multiplier)\n if start_idx + self.mel_step_size > len(mel[0]):\n mel_chunks.append(mel[:, len(mel[0]) - self.mel_step_size:])\n break\n mel_chunks.append(mel[:, start_idx : start_idx + self.mel_step_size])\n i += 1\n \n logger.info(\"Length of mel chunks: {}\".format(len(mel_chunks)))\n\n full_frames = full_frames[:len(mel_chunks)]\n\n batch_size = self.wav2lip_batch_size\n\n gen = self.datagen(full_frames.copy(), mel_chunks)\n while 1:\n try:\n for i, (img_batch, mel_batch, frames, coords) in enumerate(tqdm(gen,\n total=int(np.ceil(float(len(mel_chunks))/batch_size)))):\n if i == 0:\n model = self.load_model()\n logger.info(\"Model loaded\")\n frame_h, frame_w = full_frames[0].shape[:-1]\n temp_file = self.tmp_manager.create_temp_file(suffix='.avi').name\n out = cv2.VideoWriter(temp_file, \n\t\t\t\t\t\t\t\t\tcv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))\n img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(self.device)\n mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(self.device)\n\n with torch.no_grad():\n pred = model(mel_batch, img_batch)\n pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.\n for p, f, c in zip(pred, frames, coords):\n y1, y2, x1, x2 = c\n try:\n p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1))\n f[y1:y2, x1:x2] = p\n except cv2.error:\n pass\n out.write(f)\n out.release()\n except RuntimeError:\n if batch_size == 1: \n raise RuntimeError('Image too big to run wav2lip on GPU. Please use the --resize_factor argument')\n batch_size //= 2\n continue\n break\n command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(voice, temp_file, outfile)\n subprocess.call(command, shell=platform.system() != 'Windows')\n \n def load_model(self):\n model = Wav2Lip()\n logger.info(\"Load checkpoint from: {}\".format(self.model_name))\n checkpoint = self._load()\n s = checkpoint[\"state_dict\"]\n new_s = {}\n for k, v in s.items():\n new_s[k.replace('module.', '')] = v\n model.load_state_dict(new_s)\n model = model.to(self.device)\n return model.eval()\n\n def _load(self):\n checkpoint_path = \"weights/{}.pth\".format(self.model_name)\n if self.device == 'cuda':\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)\n return checkpoint\n \n\n def datagen(self,frames, mels):\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n\n if self.box[0] == -1:\n if not self.static:\n face_det_results = self.face_detect(frames) # BGR2RGB for CNN face detection\n else:\n face_det_results = self.face_detect([frames[0]])\n else:\n logger.info('Using the specified bounding box instead of face detection...')\n y1, y2, x1, x2 = self.box\n face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]\n \n for i, m in enumerate(mels):\n idx = 0 if self.static else i%len(frames)\n frame_to_save = frames[idx].copy()\n face, coords = face_det_results[idx].copy()\n\n try:\n face = cv2.resize(face, (self.img_size, self.img_size))\n except cv2.error:\n face = np.zeros((10, 10,3), np.uint8)\n face = cv2.resize(face, (self.img_size, self.img_size))\n \n img_batch.append(face)\n mel_batch.append(m)\n frame_batch.append(frame_to_save)\n coords_batch.append(coords)\n\n if len(img_batch) >= self.wav2lip_batch_size:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n\n img_masked = img_batch.copy()\n img_masked[:, self.img_size//2:] = 0\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n yield img_batch, mel_batch, frame_batch, coords_batch\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n \n \n if len(img_batch) > 0:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n img_masked = img_batch.copy()\n img_masked[:, self.img_size//2:] = 0\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n yield img_batch, mel_batch, frame_batch, coords_batch\n\n\n def face_detect(self,images):\n detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,\n flip_input=False,device=self.device\n )\n batch_size = self.face_det_batch_size\n while 1:\n predictions = []\n try:\n for i in tqdm(range(0,len(images),batch_size)):\n predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))\n except RuntimeError:\n if batch_size == 1: \n raise RuntimeError('Image too big to run face detection on GPU. Please use the --resize_factor argument')\n batch_size //= 2\n logger.warning('Recovering from OOM error; New batch size: {}'.format(batch_size))\n continue\n break\n results = []\n pady1, pady2, padx1, padx2 = self.pads\n for rect, image in zip(predictions, images):\n if rect is None:\n rect = (0,20,0,0)\n y1 = max(0, rect[1] - pady1)\n y2 = min(image.shape[0], rect[3] + pady2)\n x1 = max(0, rect[0] - padx1)\n x2 = min(image.shape[1], rect[2] + padx2)\n results.append([x1,y1,x2,y2])\n boxes = np.array(results)\n if not self.nosmooth: boxes = self.get_smoothened_boxes(boxes, T=5)\n results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]\n import gc; gc.collect(); torch.cuda.empty_cache();del detector\n return results\n\n def get_smoothened_boxes(self,boxes, T):\n for i in range(len(boxes)):\n if i + T > len(boxes):\n window = boxes[len(boxes) - T:]\n else:\n window = boxes[i : i + T]\n boxes[i] = np.mean(window, axis=0)\n return boxes" }, { "identifier": "Upscale", "path": "src/upscale.py", "snippet": "class Upscale:\n def __init__(self,fidelity_weight=0.9) -> None:\n self.pretrain_model_url = {\n 'restoration': 'https://mirror.ghproxy.com/https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth',\n }\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.use_half = True \n self.bg_tile = 400\n self.w = fidelity_weight\n self.bg_upsampler = 'realesrgan'\n self.face_upsample = True\n self.has_aligned = False\n self.detection_model = \"retinaface_resnet50\"\n self.upscale = 2\n self.only_center_face = False\n self.draw_box = False\n self.suffix = None\n\n\n def __call__(self,input_path:str,output_path:str,audio,*args: Any, **kwds: Any) -> Any:\n \n input_video = False\n if input_path.endswith(('jpg', 'jpeg', 'png', 'JPG', 'JPEG', 'PNG')): # input single img path\n input_img_list = [input_path]\n result_root = f'results/test_img_{self.w}'\n elif input_path.endswith(('mp4', 'mov', 'avi', 'MP4', 'MOV', 'AVI')): # input video path\n input_img_list = []\n vidreader = VideoReader(input_path)\n image = vidreader.get_frame()\n while image is not None:\n input_img_list.append(image)\n image = vidreader.get_frame()\n # audio = vidreader.get_audio()\n fps = vidreader.get_fps() \n video_name = os.path.basename(input_path)[:-4]\n result_root = f'results/{video_name}_{self.w}'\n input_video = True\n vidreader.close()\n else: # input img folder\n if input_path.endswith('/'): # solve when path ends with /\n input_path = input_path[:-1]\n # scan all the jpg and png images\n input_img_list = sorted(glob.glob(os.path.join(input_path, '*.[jpJP][pnPN]*[gG]')))\n result_root = f'results/{os.path.basename(input_path)}_{self.w}'\n \n if not output_path is None: # set output path\n result_root = output_path\n\n test_img_num = len(input_img_list)\n if test_img_num == 0:\n raise FileNotFoundError('No input image/video is found...\\n' \n '\\tNote that --input_path for video should end with .mp4|.mov|.avi')\n\n # ------------------ set up background upsampler ------------------\n if self.bg_upsampler == 'realesrgan':\n bg_upsampler = self.set_realesrgan()\n else:\n bg_upsampler = None\n \n # ------------------ set up face upsampler ------------------\n if self.face_upsample:\n if bg_upsampler is not None:\n face_upsampler = bg_upsampler\n else:\n face_upsampler = self.set_realesrgan()\n else:\n face_upsampler = None\n \n # ------------------ set up CodeFormer restorer -------------------\n net = CodeFormer(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, \n connect_list=['32', '64', '128', '256']).to(self.device)\n # ckpt_path = 'weights/CodeFormer/codeformer.pth'\n ckpt_path = load_file_from_url(url=self.pretrain_model_url['restoration'], \n model_dir='weights/CodeFormer', progress=True, file_name=None)\n checkpoint = torch.load(ckpt_path)['params_ema']\n net.load_state_dict(checkpoint)\n net.eval()\n\n # ------------------ set up FaceRestoreHelper -------------------\n # large det_model: 'YOLOv5l', 'retinaface_resnet50'\n # small det_model: 'YOLOv5n', 'retinaface_mobile0.25'\n if not self.has_aligned: \n logger.info(f'Face detection model: {self.detection_model}')\n if bg_upsampler is not None: \n logger.info(f'Background upsampling: True, Face upsampling: {self.face_upsample}')\n else:\n logger.info(f'Background upsampling: False, Face upsampling: {self.face_upsample}')\n\n # -------------------- start to processing ---------------------\n logger.info(\"multi thread processing \")\n '''\n with ThreadPoolExecutor(max_workers=20) as executor:\n for i, img_path in enumerate(input_img_list):\n executor.submit(self.enhance_face,img_path,i,video_name,test_img_num,\n bg_upsampler,result_root,input_video,net,face_upsampler)\n '''\n Parallel(n_jobs=4)(delayed(self.enhance_face)(img_path,i,video_name,test_img_num,\\\n bg_upsampler,result_root,input_video,\\\n net,face_upsampler) for i,img_path in enumerate(input_img_list))\n\n # save enhanced video\n if input_video:\n logger.info('Video Saving...')\n # load images\n video_frames = []\n img_list = sorted(glob.glob(os.path.join(result_root, 'final_results', '*.[jp][pn]g')))\n for img_path in img_list:\n img = cv2.imread(img_path)\n video_frames.append(img)\n # write images to video\n height, width = video_frames[0].shape[:2]\n if self.suffix is not None:\n video_name = f'{video_name}_{self.suffix}.png'\n save_restore_path = os.path.join(result_root, f'{video_name}.avi')\n vidwriter = cv2.VideoWriter(save_restore_path,cv2.VideoWriter_fourcc(*'DIVX'),fps, (width, height))\n \n for f in tqdm(video_frames,desc=\"Combining png to avi...\",total=len(video_frames)):\n vidwriter.write(f)\n \n vidwriter.release()\n \n out_file = os.path.join(result_root, f'{video_name}.mp4')\n command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(audio, save_restore_path, out_file)\n subprocess.call(command, shell=platform.system() != 'Windows')\n\n logger.info(f'\\nAll results are saved in {result_root}')\n\n def enhance_face(self,img_path,i,video_name,test_img_num,bg_upsampler,result_root,input_video,net,face_upsampler):\n # clean all the intermediate results to process the next image\n face_helper = FaceRestoreHelper(\n self.upscale,\n face_size=512,\n crop_ratio=(1, 1),\n det_model = self.detection_model,\n save_ext='png',\n use_parse=True,\n device=self.device)\n with num_lock:\n if isinstance(img_path, str):\n img_name = os.path.basename(img_path)\n basename, ext = os.path.splitext(img_name)\n logger.info(f'[{i+1}/{test_img_num}] Processing: {img_name}')\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n else: # for video processing\n basename = str(i).zfill(6)\n img_name = f'{video_name}_{basename}' if input_video else basename\n logger.info(f'[{i+1}/{test_img_num}] Processing: {img_name}')\n img = img_path\n\n if self.has_aligned: \n # the input faces are already cropped and aligned\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)\n # face_helper.is_gray = is_gray(img, threshold=10)\n if face_helper.is_gray:\n logger.info('Grayscale input: True')\n face_helper.cropped_faces = [img]\n else:\n face_helper.read_image(img)\n # get face landmarks for each face\n num_det_faces = face_helper.get_face_landmarks_5(\n only_center_face=self.only_center_face, resize=640, eye_dist_threshold=5)\n logger.info(f'\\tdetect {num_det_faces} faces')\n # align and warp each face\n face_helper.align_warp_face()\n\n # face restoration for each cropped face\n for idx, cropped_face in enumerate(face_helper.cropped_faces):\n # prepare data\n cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)\n normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)\n cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device)\n\n try:\n with torch.no_grad():\n output = net(cropped_face_t, w=self.w, adain=True)[0]\n restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))\n del output\n torch.cuda.empty_cache()\n except Exception as error:\n logger.info(f'\\tFailed inference for CodeFormer: {error}')\n restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))\n\n restored_face = restored_face.astype('uint8')\n face_helper.add_restored_face(restored_face, cropped_face)\n\n # paste_back\n if not self.has_aligned:\n # upsample the background\n if bg_upsampler is not None:\n # Now only support RealESRGAN for upsampling background\n bg_img = bg_upsampler.enhance(img, outscale=self.upscale)[0]\n else:\n bg_img = None\n face_helper.get_inverse_affine(None)\n # paste each restored face to the input image\n if self.face_upsample and face_upsampler is not None: \n restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=self.draw_box, face_upsampler=face_upsampler)\n else:\n restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=self.draw_box)\n \n \n # save faces\n for idx, (cropped_face, restored_face) in enumerate(zip(face_helper.cropped_faces, face_helper.restored_faces)):\n # save cropped face\n if not self.has_aligned: \n save_crop_path = os.path.join(result_root, 'cropped_faces', f'{basename}_{idx:02d}.png')\n imwrite(cropped_face, save_crop_path)\n # save restored face\n if self.has_aligned:\n save_face_name = f'{basename}.png'\n else:\n save_face_name = f'{basename}_{idx:02d}.png'\n if self.suffix is not None:\n save_face_name = f'{save_face_name[:-4]}_{self.suffix}.png'\n save_restore_path = os.path.join(result_root, 'restored_faces', save_face_name)\n imwrite(restored_face, save_restore_path)\n \n # save restored img\n if not self.has_aligned and restored_img is not None:\n if self.suffix is not None:\n basename = f'{basename}_{self.suffix}'\n save_restore_path = os.path.join(result_root, 'final_results', f'{basename}.png')\n imwrite(restored_img, save_restore_path)\n\n\n def set_realesrgan(self):\n if torch.cuda.is_available():\n no_half_gpu_list = ['1650', '1660'] # set False for GPUs that don't support f16\n if not True in [gpu in torch.cuda.get_device_name(0) for gpu in no_half_gpu_list]:\n self.use_half = True\n model = RRDBNet(\n num_in_ch=3,\n num_out_ch=3,\n num_feat=64,\n num_block=23,\n num_grow_ch=32,\n scale=2\n )\n upsampler = RealESRGANer(\n scale=2,\n model_path=\"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/RealESRGAN_x2plus.pth\",\n model=model,\n tile=self.bg_tile,\n tile_pad=40,\n pre_pad=0,\n half=self.use_half\n )\n if not torch.cuda.is_available():\n logger.warning('Running on CPU now! Make sure your PyTorch version matches your CUDA.')\n return upsampler" }, { "identifier": "analyse_video", "path": "src/nfsw.py", "snippet": "@lru_cache(maxsize = None)\ndef analyse_video(video_path : str) -> bool:\n\tvideo_frame_total = count_video_frame_total(video_path)\n\tfps = detect_fps(video_path)\n\tframe_range = range( 0, video_frame_total)\n\trate = 0.0\n\tcounter = 0\n\twith tqdm(total = len(frame_range), desc = 'video content analysing', unit = 'frame', ascii = ' =') as progress:\n\t\tfor frame_number in frame_range:\n\t\t\tif frame_number % int(fps) == 0:\n\t\t\t\tframe = get_video_frame(video_path, frame_number)\n\t\t\t\tif analyse_frame(frame):\n\t\t\t\t\tcounter += 1\n\t\t\trate = counter * int(fps) / len(frame_range) * 100\n\t\t\tprogress.update()\n\t\t\tprogress.set_postfix(rate = rate)\n\treturn rate > MAX_RATE" }, { "identifier": "load_model", "path": "src/third_part/whisperx/transcribe.py", "snippet": "def cli():" }, { "identifier": "load_audio", "path": "src/third_part/whisperx/audio.py", "snippet": "def load_audio(file: str, sr: int = SAMPLE_RATE):\n \"\"\"\n Open an audio file and read as mono waveform, resampling as necessary\n\n Parameters\n ----------\n file: str\n The audio file to open\n\n sr: int\n The sample rate to resample the audio if necessary\n\n Returns\n -------\n A NumPy array containing the audio waveform, in float32 dtype.\n \"\"\"\n try:\n # Launches a subprocess to decode audio while down-mixing and resampling as necessary.\n # Requires the ffmpeg CLI to be installed.\n cmd = [\n \"ffmpeg\",\n \"-nostdin\",\n \"-threads\",\n \"0\",\n \"-i\",\n file,\n \"-f\",\n \"s16le\",\n \"-ac\",\n \"1\",\n \"-acodec\",\n \"pcm_s16le\",\n \"-ar\",\n str(sr),\n \"-\",\n ]\n out = subprocess.run(cmd, capture_output=True, check=True).stdout\n except subprocess.CalledProcessError as e:\n raise RuntimeError(f\"Failed to load audio: {e.stderr.decode()}\") from e\n\n return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0" }, { "identifier": "DiarizationPipeline", "path": "src/third_part/whisperx/diarize.py", "snippet": "class DiarizationPipeline:\n def __init__(\n self,\n model_name=\"pyannote/speaker-diarization-3.0\",\n use_auth_token=None,\n device: Optional[Union[str, torch.device]] = \"cpu\",\n ):\n if isinstance(device, str):\n device = torch.device(device)\n self.model = Pipeline.from_pretrained(model_name, use_auth_token=use_auth_token).to(device)\n\n def __call__(self, audio: Union[str, np.ndarray], min_speakers=None, max_speakers=None):\n if isinstance(audio, str):\n audio = load_audio(audio)\n audio_data = {\n 'waveform': torch.from_numpy(audio[None, :]),\n 'sample_rate': SAMPLE_RATE\n }\n segments = self.model(audio_data, min_speakers=min_speakers, max_speakers=max_speakers)\n diarize_df = pd.DataFrame(segments.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])\n diarize_df['start'] = diarize_df['segment'].apply(lambda x: x.start)\n diarize_df['end'] = diarize_df['segment'].apply(lambda x: x.end)\n return diarize_df" } ]
import os import torch import soundfile as sf import gc; gc.collect(); torch.cuda.empty_cache(); del cloner import gc; gc.collect(); torch.cuda.empty_cache(); del diarize_model import gc; gc.collect(); torch.cuda.empty_cache(); del whisper from typing import Any from tqdm import tqdm from src.log_helper import HandleLog from moviepy.editor import VideoFileClip,concatenate_videoclips from pathlib import Path from pydub import AudioSegment from src.audio_bgm_split import AudioProcess from src.voice_clone import VoiceCloner from src.temp_manager import TempFileManager from src.translator import Translator from src.lipsync import LipSync from src.upscale import Upscale from src.nfsw import analyse_video from src.third_part.whisperx import load_model,load_audio,DiarizationPipeline
11,034
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice") audio_process = AudioProcess(15) vocal_file, bgm_file = audio_process.split(org_voice_path) logger.critical("[Step 3] whisperx from speech to text") whispher_segments, src_lang_code, speakers_wav = self.speech_to_text(vocal_file) logger.critical("[Step 4] translate,text to speech,video and voice_cloned aligment") vocal_cloned_audio = AudioSegment.silent(0) bgm_audio_extend = AudioSegment.silent(0) video_extend_list = [] org_vocal = AudioSegment.from_file(vocal_file) bgm_audio = AudioSegment.from_file(bgm_file) seg_len = len(whispher_segments)
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice") audio_process = AudioProcess(15) vocal_file, bgm_file = audio_process.split(org_voice_path) logger.critical("[Step 3] whisperx from speech to text") whispher_segments, src_lang_code, speakers_wav = self.speech_to_text(vocal_file) logger.critical("[Step 4] translate,text to speech,video and voice_cloned aligment") vocal_cloned_audio = AudioSegment.silent(0) bgm_audio_extend = AudioSegment.silent(0) video_extend_list = [] org_vocal = AudioSegment.from_file(vocal_file) bgm_audio = AudioSegment.from_file(bgm_file) seg_len = len(whispher_segments)
cloner = VoiceCloner(self.xt_version_name)
2
2023-12-01 12:23:19+00:00
16k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras" }, { "identifier": "qvec2rotmat", "path": "scene/colmap_loader.py", "snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])" }, { "identifier": "read_extrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras" }, { "identifier": "read_points3D_binary", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors" }, { "identifier": "read_points3D_text", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors" }, { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)" }, { "identifier": "focal2fov", "path": "utils/graphics_utils.py", "snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))" }, { "identifier": "fov2focal", "path": "utils/graphics_utils.py", "snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))" }, { "identifier": "SH2RGB", "path": "utils/sh_utils.py", "snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5" }, { "identifier": "BasicPointCloud", "path": "scene/gaussian_model.py", "snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation, transform):\n def __init__(self, sh_degree : int, smpl_type : str, motion_offset_flag : bool, actor_gender: str):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1, transform=None):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def kl_densify_and_clone(self, grads, grad_threshold, scene_extent, kl_threshold=0.4):\n def kl_densify_and_split(self, grads, grad_threshold, scene_extent, kl_threshold=0.4, N=2):\n def kl_merge(self, grads, grad_threshold, scene_extent, kl_threshold=0.1):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size, kl_threshold=0.4, t_vertices=None, iter=None):\n def kl_div(self, mu_0, rotation_0_q, scaling_0_diag, mu_1, rotation_1_q, scaling_1_diag):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n def coarse_deform_c2source(self, query_pts, params, t_params, t_vertices, lbs_weights=None, correct_Rs=None, return_transl=False):\ndef read_pickle(pkl_path):\ndef SMPL_to_tensor(params, device):\ndef batch_rodrigues_torch(poses):\ndef get_rigid_transformation_torch(rot_mats, joints, parents):\ndef get_transform_params_torch(smpl, params, rot_mats=None, correct_Rs=None):\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)\n L_0 = rotation_0 @ scaling_0\n A = torch.matmul(bweights, A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n A = torch.matmul(bweights, self.s_A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1)\n K = K.reshape([batch_size, 3, 3])\n A = get_rigid_transformation_torch(rot_mats, joints, parents)\n R = params['R'] \n K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \\\n .view((batch_size, 3, 3))" }, { "identifier": "SMPL", "path": "smpl/smpl_numpy.py", "snippet": "class SMPL():\n def __init__(self, sex, model_dir):\n super(SMPL, self).__init__()\n\n model_paths = {\n 'male': os.path.join(model_dir, MALE_PATH),\n 'female': os.path.join(model_dir, FEMALE_PATH),\n # 'neutral': os.path.join(model_dir, NEUTRAL_PATH)\n 'neutral': os.path.join('assets/SMPL_NEUTRAL.pkl')\n }\n\n with open(model_paths[sex], 'rb') as f:\n smpl_model = pickle.load(f, encoding='latin1')\n self.J_regressor = np.array(smpl_model['J_regressor'].todense()) # (24, 6890)\n self.weights = smpl_model['weights'] # (6890, 24)\n self.posedirs = smpl_model['posedirs'] # (6890, 3, 207)\n self.v_template = smpl_model['v_template'] # (6890, 3)\n self.shapedirs = np.array(smpl_model['shapedirs']) # (6890, 3, 10)\n self.faces = smpl_model['f'].astype('int32') # (13776, 3)\n self.kintree_table = smpl_model['kintree_table'].astype('int64') # (2, 24)\n\n id_to_col = {self.kintree_table[1, i].item(): i for i in range(self.kintree_table.shape[1])}\n self.parent = np.array([id_to_col[self.kintree_table[0, it]] for it in range(1, self.kintree_table.shape[1])])\n\n self.pose_shape = [24, 3]\n self.beta_shape = [10]\n self.pose = np.zeros(self.pose_shape)\n self.beta = np.zeros(self.beta_shape)\n\n self.verts = None\n self.J = None\n self.R = None\n\n def __call__(self, pose, beta):\n\n v_template = self.v_template # (6890, 3)\n shapedirs = self.shapedirs.reshape(-1,10) # (6890*3, 10)\n beta = beta[:, None] # (10, 1)\n\n v_shaped = shapedirs.dot(beta).reshape(6890, 3) + v_template # (6890, 3)\n J = self.J_regressor.dot(v_shaped) # (24, 3)\n\n # input is a rotation matrix: (24,3,3)\n if pose.shape == (24, 3, 3):\n R = pose\n # input is a rotation axis-angle vector: (1, 72), (72, 1) or (72, )\n elif pose.shape == (1, 72) or pose.shape == (72, 1) or pose.shape == (72,):\n pose_vectors = pose.reshape(-1, 3) # (24, 3)\n R = np.array([rodrigues(pose_vectors[p_idx])[0] \n for p_idx in range(pose_vectors.shape[0])\n ], \n dtype='float32') # (24, 3, 3)\n else:\n raise ValueError(\"Unsupported Pose Inputs - the Pose Shape is {}\".format(pose.shape))\n\n Is = np.eye(3, dtype='float32')[None, :] # (1, 3, 3)\n lrotmin = (R[1:,:] - Is).reshape(-1, 1) # (23x3x3, 1)\n posedirs = self.posedirs.reshape(-1,207) # (6890x3, 207)\n v_posed = v_shaped + posedirs.dot(lrotmin).reshape(6890, 3) # (6890, 3)\n\n J_ = J.copy()\n J_[1:, :] = J[1:, :] - J[self.parent, :] # (24, 3)\n G_ = np.concatenate([R, J_[:, :, None]], axis=-1) # (24, 3, 4)\n pad_rows = np.array([[0, 0, 0, 1]], dtype='float32')\n pad_rows = np.repeat(pad_rows, 24, axis=0).reshape(-1, 1, 4)\n G_ = np.concatenate([G_, pad_rows], axis=1) # (24, 4, 4)\n\n G = [G_[0].copy()]\n for i in range(1, 24):\n G.append(G[self.parent[i-1]].dot(G_[i, :, :]))\n G = np.stack(G, axis=0) # (24, 4, 4)\n\n joints = G[:, :3, 3]\n rest_joints = np.concatenate([J, np.zeros((24, 1))], axis=-1)[:, :, None] # (24, 4, 1)\n zeros = np.zeros((24, 4, 3), dtype='float32') # (24, 4, 3)\n rest_joints_mtx = np.concatenate([zeros, rest_joints], axis=-1) # (24, 4, 4) \n # print(\"G1: \", G[0], \"rest_joints_mtx1: \", rest_joints_mtx[0])\n posed_joints_mtx = np.matmul(G, rest_joints_mtx)\n # print(\"rest_joints_mtx2: \", posed_joints_mtx[0])\n G = G - posed_joints_mtx\n # print(G[0]) \n rest_shape_h = np.concatenate([v_posed, np.ones(v_posed.shape[0])[:, None]], axis=-1) #(6890, 4)\n T = self.weights.dot(G.reshape(24, -1)).reshape(6890, 4, 4)\n v = np.matmul(T, rest_shape_h[:, :, None])[:, :3, 0]\n \n return v, joints" }, { "identifier": "SMPLX", "path": "smplx/body_models.py", "snippet": "class SMPLX(SMPLH):\n '''\n SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters\n trained jointly for the face, hands and body.\n SMPL-X uses standard vertex based linear blend skinning with learned\n corrective blend shapes, has N=10475 vertices and K=54 joints,\n which includes joints for the neck, jaw, eyeballs and fingers.\n '''\n\n NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS\n NUM_HAND_JOINTS = 15\n NUM_FACE_JOINTS = 3\n NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS\n EXPRESSION_SPACE_DIM = 100\n NECK_IDX = 12\n\n def __init__(\n self, model_path: str,\n kid_template_path: str = '',\n num_expression_coeffs: int = 10,\n create_expression: bool = True,\n expression: Optional[Tensor] = None,\n create_jaw_pose: bool = True,\n jaw_pose: Optional[Tensor] = None,\n create_leye_pose: bool = True,\n leye_pose: Optional[Tensor] = None,\n create_reye_pose=True,\n reye_pose: Optional[Tensor] = None,\n use_face_contour: bool = False,\n batch_size: int = 1,\n gender: str = 'neutral',\n age: str = 'adult',\n dtype=torch.float32,\n ext: str = 'npz',\n **kwargs\n ) -> None:\n ''' SMPLX model constructor\n\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n num_expression_coeffs: int, optional\n Number of expression components to use\n (default = 10).\n create_expression: bool, optional\n Flag for creating a member variable for the expression space\n (default = True).\n expression: torch.tensor, optional, Bx10\n The default value for the expression member variable.\n (default = None)\n create_jaw_pose: bool, optional\n Flag for creating a member variable for the jaw pose.\n (default = False)\n jaw_pose: torch.tensor, optional, Bx3\n The default value for the jaw pose variable.\n (default = None)\n create_leye_pose: bool, optional\n Flag for creating a member variable for the left eye pose.\n (default = False)\n leye_pose: torch.tensor, optional, Bx10\n The default value for the left eye pose variable.\n (default = None)\n create_reye_pose: bool, optional\n Flag for creating a member variable for the right eye pose.\n (default = False)\n reye_pose: torch.tensor, optional, Bx10\n The default value for the right eye pose variable.\n (default = None)\n use_face_contour: bool, optional\n Whether to compute the keypoints that form the facial contour\n batch_size: int, optional\n The batch size used for creating the member variables\n gender: str, optional\n Which gender to load\n dtype: torch.dtype\n The data type for the created variables\n '''\n\n # Load the model\n if osp.isdir(model_path):\n model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext)\n smplx_path = os.path.join(model_path, model_fn)\n else:\n smplx_path = model_path\n assert osp.exists(smplx_path), 'Path {} does not exist!'.format(\n smplx_path)\n\n if ext == 'pkl':\n with open(smplx_path, 'rb') as smplx_file:\n model_data = pickle.load(smplx_file, encoding='latin1')\n elif ext == 'npz':\n model_data = np.load(smplx_path, allow_pickle=True)\n else:\n raise ValueError('Unknown extension: {}'.format(ext))\n\n data_struct = Struct(**model_data)\n\n super(SMPLX, self).__init__(\n model_path=model_path,\n kid_template_path=kid_template_path,\n data_struct=data_struct,\n dtype=dtype,\n batch_size=batch_size,\n vertex_ids=VERTEX_IDS['smplx'],\n gender=gender, age=age, ext=ext,\n **kwargs)\n\n lmk_faces_idx = data_struct.lmk_faces_idx\n self.register_buffer('lmk_faces_idx',\n torch.tensor(lmk_faces_idx, dtype=torch.long))\n lmk_bary_coords = data_struct.lmk_bary_coords\n self.register_buffer('lmk_bary_coords',\n torch.tensor(lmk_bary_coords, dtype=dtype))\n\n self.use_face_contour = use_face_contour\n if self.use_face_contour:\n dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx\n dynamic_lmk_faces_idx = torch.tensor(\n dynamic_lmk_faces_idx,\n dtype=torch.long)\n self.register_buffer('dynamic_lmk_faces_idx',\n dynamic_lmk_faces_idx)\n\n dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords\n dynamic_lmk_bary_coords = torch.tensor(\n dynamic_lmk_bary_coords, dtype=dtype)\n self.register_buffer('dynamic_lmk_bary_coords',\n dynamic_lmk_bary_coords)\n\n neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents)\n self.register_buffer(\n 'neck_kin_chain',\n torch.tensor(neck_kin_chain, dtype=torch.long))\n\n if create_jaw_pose:\n if jaw_pose is None:\n default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype)\n jaw_pose_param = nn.Parameter(default_jaw_pose,\n requires_grad=True)\n self.register_parameter('jaw_pose', jaw_pose_param)\n\n if create_leye_pose:\n if leye_pose is None:\n default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_leye_pose = torch.tensor(leye_pose, dtype=dtype)\n leye_pose_param = nn.Parameter(default_leye_pose,\n requires_grad=True)\n self.register_parameter('leye_pose', leye_pose_param)\n\n if create_reye_pose:\n if reye_pose is None:\n default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_reye_pose = torch.tensor(reye_pose, dtype=dtype)\n reye_pose_param = nn.Parameter(default_reye_pose,\n requires_grad=True)\n self.register_parameter('reye_pose', reye_pose_param)\n\n shapedirs = data_struct.shapedirs\n if len(shapedirs.shape) < 3:\n shapedirs = shapedirs[:, :, None]\n if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM +\n self.EXPRESSION_SPACE_DIM):\n print(f'WARNING: You are using a {self.name()} model, with only'\n ' 10 shape and 10 expression coefficients.')\n expr_start_idx = 10\n expr_end_idx = 20\n num_expression_coeffs = min(num_expression_coeffs, 10)\n else:\n expr_start_idx = self.SHAPE_SPACE_DIM\n expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs\n num_expression_coeffs = min(\n num_expression_coeffs, self.EXPRESSION_SPACE_DIM)\n\n self._num_expression_coeffs = num_expression_coeffs\n\n expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx]\n self.register_buffer(\n 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype))\n\n if create_expression:\n if expression is None:\n default_expression = torch.zeros(\n [batch_size, self.num_expression_coeffs], dtype=dtype)\n else:\n default_expression = torch.tensor(expression, dtype=dtype)\n expression_param = nn.Parameter(default_expression,\n requires_grad=True)\n self.register_parameter('expression', expression_param)\n\n def name(self) -> str:\n return 'SMPL-X'\n\n @property\n def num_expression_coeffs(self):\n return self._num_expression_coeffs\n\n def create_mean_pose(self, data_struct, flat_hand_mean=False):\n # Create the array for the mean pose. If flat_hand is false, then use\n # the mean that is given by the data, rather than the flat open hand\n global_orient_mean = torch.zeros([3], dtype=self.dtype)\n body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3],\n dtype=self.dtype)\n jaw_pose_mean = torch.zeros([3], dtype=self.dtype)\n leye_pose_mean = torch.zeros([3], dtype=self.dtype)\n reye_pose_mean = torch.zeros([3], dtype=self.dtype)\n # pose_mean = np.concatenate([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], axis=0)\n pose_mean = torch.cat([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], 0)\n\n return pose_mean\n\n def extra_repr(self):\n msg = super(SMPLX, self).extra_repr()\n msg = [\n msg,\n f'Number of Expression Coefficients: {self.num_expression_coeffs}'\n ]\n return '\\n'.join(msg)\n\n def forward(\n self,\n betas: Optional[Tensor] = None,\n global_orient: Optional[Tensor] = None,\n body_pose: Optional[Tensor] = None,\n left_hand_pose: Optional[Tensor] = None,\n right_hand_pose: Optional[Tensor] = None,\n transl: Optional[Tensor] = None,\n expression: Optional[Tensor] = None,\n jaw_pose: Optional[Tensor] = None,\n leye_pose: Optional[Tensor] = None,\n reye_pose: Optional[Tensor] = None,\n return_verts: bool = True,\n return_full_pose: bool = False,\n pose2rot: bool = True,\n return_shaped: bool = True,\n **kwargs\n ) -> TensorOutput:\n '''\n Forward pass for the SMPLX model\n\n Parameters\n ----------\n global_orient: torch.tensor, optional, shape Bx3\n If given, ignore the member variable and use it as the global\n rotation of the body. Useful if someone wishes to predicts this\n with an external model. (default=None)\n betas: torch.tensor, optional, shape BxN_b\n If given, ignore the member variable `betas` and use it\n instead. For example, it can used if shape parameters\n `betas` are predicted from some external model.\n (default=None)\n expression: torch.tensor, optional, shape BxN_e\n If given, ignore the member variable `expression` and use it\n instead. For example, it can used if expression parameters\n `expression` are predicted from some external model.\n body_pose: torch.tensor, optional, shape Bx(J*3)\n If given, ignore the member variable `body_pose` and use it\n instead. For example, it can used if someone predicts the\n pose of the body joints are predicted from some external model.\n It should be a tensor that contains joint rotations in\n axis-angle format. (default=None)\n left_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `left_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n right_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `right_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n jaw_pose: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `jaw_pose` and\n use this instead. It should either joint rotations in\n axis-angle format.\n transl: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `transl` and use it\n instead. For example, it can used if the translation\n `transl` is predicted from some external model.\n (default=None)\n return_verts: bool, optional\n Return the vertices. (default=True)\n return_full_pose: bool, optional\n Returns the full axis-angle pose vector (default=False)\n\n Returns\n -------\n output: ModelOutput\n A named tuple of type `ModelOutput`\n '''\n\n # If no shape and pose parameters are passed along, then use the\n # ones from the module\n global_orient = (global_orient if global_orient is not None else\n self.global_orient)\n body_pose = body_pose if body_pose is not None else self.body_pose\n betas = betas if betas is not None else self.betas\n\n left_hand_pose = (left_hand_pose if left_hand_pose is not None else\n self.left_hand_pose)\n right_hand_pose = (right_hand_pose if right_hand_pose is not None else\n self.right_hand_pose)\n jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose\n leye_pose = leye_pose if leye_pose is not None else self.leye_pose\n reye_pose = reye_pose if reye_pose is not None else self.reye_pose\n expression = expression if expression is not None else self.expression\n\n apply_trans = transl is not None or hasattr(self, 'transl')\n if transl is None:\n if hasattr(self, 'transl'):\n transl = self.transl\n\n if self.use_pca:\n left_hand_pose = torch.einsum(\n 'bi,ij->bj', [left_hand_pose, self.left_hand_components])\n right_hand_pose = torch.einsum(\n 'bi,ij->bj', [right_hand_pose, self.right_hand_components])\n\n full_pose = torch.cat([global_orient.reshape(-1, 1, 3),\n body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3),\n jaw_pose.reshape(-1, 1, 3),\n leye_pose.reshape(-1, 1, 3),\n reye_pose.reshape(-1, 1, 3),\n left_hand_pose.reshape(-1, 15, 3),\n right_hand_pose.reshape(-1, 15, 3)],\n dim=1).reshape(-1, 165).to(self.pose_mean.device)\n\n # Add the mean pose of the model. Does not affect the body, only the\n # hands when flat_hand_mean == False\n full_pose += self.pose_mean\n\n batch_size = max(betas.shape[0], global_orient.shape[0],\n body_pose.shape[0])\n # Concatenate the shape and expression coefficients\n scale = int(batch_size / betas.shape[0])\n if scale > 1:\n betas = betas.expand(scale, -1)\n shape_components = torch.cat([betas, expression], dim=-1).to(self.pose_mean.device)\n\n shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)\n\n vertices, joints, A, T = lbs(shape_components, full_pose, self.v_template,\n shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, pose2rot=pose2rot,\n )\n\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(\n dim=0).expand(batch_size, -1).contiguous()\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(\n self.batch_size, 1, 1)\n if self.use_face_contour:\n lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(\n vertices, full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain,\n pose2rot=True,\n )\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords\n\n lmk_faces_idx = torch.cat([lmk_faces_idx,\n dyn_lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat(\n [lmk_bary_coords.expand(batch_size, -1, -1),\n dyn_lmk_bary_coords], 1)\n\n landmarks = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n\n # import matplotlib.pyplot as plt\n # import numpy as np\n # xs = joints[0,:,0]\n # ys = joints[0,:,1]\n # plt.scatter(xs, ys)\n\n # # zip joins x and y coordinates in pairs\n # count = 0\n # for x,y in zip(xs, ys):\n\n # label = \"{:.2f}\".format(count)\n\n # plt.annotate(label, # this is the text\n # (x,y), # these are the coordinates to position the label\n # textcoords=\"offset points\", # how to position the text\n # xytext=(0,10), # distance from text to points (x,y)\n # ha='center') # horizontal alignment can be left, right or center\n # count += 1\n # plt.savefig(\"joints.png\")\n # import pdb; pdb.set_trace()\n\n # Add any extra joints that might be needed\n joints = self.vertex_joint_selector(vertices, joints)\n # Add the landmarks to the joints\n joints = torch.cat([joints, landmarks], dim=1)\n # Map the joints to the current dataset\n\n if self.joint_mapper is not None:\n joints = self.joint_mapper(joints=joints, vertices=vertices)\n\n if apply_trans:\n joints += transl.unsqueeze(dim=1)\n vertices += transl.unsqueeze(dim=1)\n # clone because we are modifying them in-place\n A = A.clone()\n A[..., :3, 3] += transl.unsqueeze(dim=1)\n T = T.clone()\n T[..., :3, 3] += transl.unsqueeze(dim=1)\n\n v_shaped = None\n if return_shaped:\n v_shaped = self.v_template + blend_shapes(betas, self.shapedirs)\n else:\n v_shaped = Tensor(0)\n\n output = TensorOutput(vertices=vertices if return_verts else None,\n joints=joints,\n betas=betas,\n expression=expression,\n global_orient=global_orient,\n body_pose=body_pose,\n left_hand_pose=left_hand_pose,\n right_hand_pose=right_hand_pose,\n jaw_pose=jaw_pose,\n v_shaped=v_shaped,\n full_pose=full_pose if return_full_pose else None,\n A=A,\n T=T,\n f=self.faces)\n return output" }, { "identifier": "SMCReader", "path": "data/dna_rendering/dna_rendering_sample_code/SMCReader.py", "snippet": "class SMCReader:\n\n def __init__(self, file_path):\n \"\"\"Read SenseMocapFile endswith \".smc\".\n\n Args:\n file_path (str):\n Path to an SMC file.\n body_model (nn.Module or dict):\n Only needed for SMPL transformation to device frame\n if nn.Module: a body_model instance\n if dict: a body_model config\n \"\"\"\n self.smc = h5py.File(file_path, 'r')\n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None \n self.__available_keys__ = list(self.smc.keys())\n \n self.actor_info = None \n if hasattr(self.smc, 'attrs') and len(self.smc.attrs.keys()) > 0:\n self.actor_info = dict(\n id=self.smc.attrs['actor_id'],\n perf_id=self.smc.attrs['performance_id'],\n age=self.smc.attrs['age'],\n gender=self.smc.attrs['gender'],\n height=self.smc.attrs['height'],\n weight=self.smc.attrs['weight'],\n ethnicity=self.smc.attrs['ethnicity'],\n )\n\n self.Camera_5mp_info = None \n if 'Camera_5mp' in self.smc:\n self.Camera_5mp_info = dict(\n num_device=self.smc['Camera_5mp'].attrs['num_device'],\n num_frame=self.smc['Camera_5mp'].attrs['num_frame'],\n resolution=self.smc['Camera_5mp'].attrs['resolution'],\n )\n self.Camera_12mp_info = None \n if 'Camera_12mp' in self.smc:\n self.Camera_12mp_info = dict(\n num_device=self.smc['Camera_12mp'].attrs['num_device'],\n num_frame=self.smc['Camera_12mp'].attrs['num_frame'],\n resolution=self.smc['Camera_12mp'].attrs['resolution'],\n )\n self.Kinect_info = None\n if 'Kinect' in self.smc:\n self.Kinect_info=dict(\n num_device=self.smc['Kinect'].attrs['num_device'],\n num_frame=self.smc['Kinect'].attrs['num_frame'],\n resolution=self.smc['Kinect'].attrs['resolution'],\n )\n\n def get_available_keys(self):\n return self.__available_keys__ \n\n def get_actor_info(self):\n return self.actor_info\n \n def get_Camera_12mp_info(self):\n return self.Camera_12mp_info\n\n def get_Camera_5mp_info(self):\n return self.Camera_5mp_info\n \n def get_Kinect_info(self):\n return self.Kinect_info\n \n ### RGB Camera Calibration\n def get_Calibration_all(self):\n \"\"\"Get calibration matrix of all cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_Parameter: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_id(str) in {'Camera_5mp': '0'~'47', 'Camera_12mp':'48'~'60'}\n Matrix_type in ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\" \n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n if self.__calibration_dict__ is not None:\n return self.__calibration_dict__\n\n self.__calibration_dict__ = dict()\n for ci in self.smc['Camera_Parameter'].keys():\n self.__calibration_dict__.setdefault(ci,dict())\n for mt in ['D', 'K', 'RT', 'Color_Calibration'] :\n self.__calibration_dict__[ci][mt] = \\\n self.smc['Camera_Parameter'][ci][mt][()]\n return self.__calibration_dict__\n\n def get_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain camera by its type and id \n\n Args:\n Camera_id (int/str of a number):\n Camera_id(str) in {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\"\n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n rs = dict()\n for k in ['D', 'K', 'RT', 'Color_Calibration'] :\n rs[k] = self.smc['Camera_Parameter'][f'{int(Camera_id):02d}'][k][()]\n return rs\n\n ### Kinect Camera Calibration\n def get_Kinect_Calibration_all(self):\n \"\"\"Get calibration matrix of all kinect cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_group: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_group(str) in ['Kinect']\n Camera_id(str) in {'Kinect': '0'~'7'}\n Matrix_type in ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n if self.__kinect_calib_dict__ is not None:\n return self.__kinect_calib_dict__\n\n self.__kinect_calib_dict__ = dict()\n for cg in ['Kinect']:\n self.__kinect_calib_dict__.setdefault(cg,dict())\n for ci in self.smc['Calibration'][cg].keys():\n self.__kinect_calib_dict__[cg].setdefault(ci,dict())\n for mt in ['D', 'K', 'RT'] :\n self.__kinect_calib_dict__[cg][ci][mt] = \\\n self.smc['Calibration'][cg][ci][mt][()]\n return self.__kinect_calib_dict__\n\n def get_kinect_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain kinect camera by its type and id \n\n Args:\n Camera_group (str):\n Camera_group in ['Kinect'].\n Camera_id (int/str of a number):\n CameraID(str) in {'Kinect': '0'~'7'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(Camera_id in self.smc['Calibration'][\"Kinect\"].keys())\n rs = dict()\n for k in ['D', 'K', 'RT']:\n rs[k] = self.smc['Calibration'][\"Kinect\"][Camera_id][k][()]\n return rs\n\n ### RGB image\n def __read_color_from_bytes__(self, color_array):\n \"\"\"Decode an RGB image from an encoded byte array.\"\"\"\n return cv2.imdecode(color_array, cv2.IMREAD_COLOR)\n\n def get_mask(self, Camera_id, Frame_id=None, disable_tqdm=True):\n \"\"\"Get mask from Camera_id, Frame_id\n\n Args:\n Camera_id (int/str of a number):\n Camera_id (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'\n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Mask' in self.smc:\n print(\"=== no key: Mask.\\nplease check available keys!\")\n return None \n\n Camera_id = str(Camera_id)\n\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc['Mask'][Camera_id]['mask'].keys())\n img_byte = self.smc['Mask'][Camera_id]['mask'][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc['Mask'][Camera_id]['mask'].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_mask(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n def get_img(self, Camera_group, Camera_id, Image_type, Frame_id=None, disable_tqdm=True):\n \"\"\"Get image its Camera_group, Camera_id, Image_type and Frame_id\n\n Args:\n Camera_group (str):\n Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'].\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Image_type(str) in \n {'Camera_5mp': ['color'], \n 'Camera_12mp': ['color'],\n 'Kinect': ['depth', 'mask']}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not Camera_group in self.smc:\n print(\"=== no key: %s.\\nplease check available keys!\" % Camera_group)\n return None\n\n assert(Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'])\n Camera_id = str(Camera_id)\n assert(Camera_id in self.smc[Camera_group].keys())\n assert(Image_type in self.smc[Camera_group][Camera_id].keys())\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc[Camera_group][Camera_id][Image_type].keys())\n if Image_type in ['color']:\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n if Image_type == 'mask':\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n if Image_type == 'depth':\n img_color = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc[Camera_group][Camera_id][Image_type].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_img(Camera_group, Camera_id, Image_type,fi))\n return np.stack(rs,axis=0)\n \n ###Keypoints2d\n def get_Keypoints2d(self, Camera_id, Frame_id=None):\n \"\"\"Get keypoint2D by its Camera_group, Camera_id and Frame_id\n\n Args:\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Keypoints_2D' in self.smc:\n print(\"=== no key: Keypoints_2D.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_2D'][Camera_id][()][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_2D'][Camera_id][()]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints2d(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n ###Keypoints3d\n def get_Keypoints3d(self, Frame_id=None):\n \"\"\"Get keypoint3D Frame_id, TODO coordinate\n\n Args:\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n Keypoints3d tensor: np.ndarray of shape ([N], ,3)\n \"\"\" \n if not 'Keypoints_3D' in self.smc:\n print(\"=== no key: Keypoints_3D.\\nplease check available keys!\")\n return None \n\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_3D'][\"keypoints3d\"][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_3D'][\"keypoints3d\"]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints3d(fi))\n return np.stack(rs,axis=0)\n\n ###SMPLx\n def get_SMPLx(self, Frame_id=None):\n \"\"\"Get SMPL (world coordinate) computed by mocap processing pipeline.\n\n Args:\n Frame_id (int, list or None, optional):\n int: frame id of one selected frame\n list: a list of frame id\n None: all frames will be returned\n Defaults to None.\n\n Returns:\n dict:\n 'global_orient': np.ndarray of shape (N, 3)\n 'body_pose': np.ndarray of shape (N, 21, 3)\n 'transl': np.ndarray of shape (N, 3)\n 'betas': np.ndarray of shape (1, 10)\n \"\"\"\n if not 'SMPLx' in self.smc:\n print(\"=== no key: SMPLx.\\nplease check available keys!\")\n return None \n\n t_frame = self.smc['SMPLx']['betas'][()].shape[0]\n if Frame_id is None:\n frame_list = range(t_frame)\n elif isinstance(Frame_id, list):\n frame_list = [int(fi) for fi in Frame_id]\n elif isinstance(Frame_id, (int,str)):\n Frame_id = int(Frame_id)\n assert Frame_id < t_frame,\\\n f'Invalid frame_index {Frame_id}'\n frame_list = Frame_id\n else:\n raise TypeError('frame_id should be int, list or None.')\n\n smpl_dict = {}\n for key in ['betas', 'expression', 'fullpose', 'transl']:\n smpl_dict[key] = self.smc['SMPLx'][key][()][frame_list, ...]\n smpl_dict['scale'] = self.smc['SMPLx']['scale'][()]\n\n return smpl_dict\n\n def release(self):\n self.smc = None \n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None\n self.__available_keys__ = None\n self.actor_info = None \n self.Camera_5mp_info = None\n self.Camera_12mp_info = None \n self.Kinect_info = None" } ]
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
14,191
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info:
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
7
2023-11-29 07:10:39+00:00
16k
emdgroup/baybe
tests/simulate_telemetry.py
[ { "identifier": "Campaign", "path": "baybe/campaign.py", "snippet": "class Campaign(SerialMixin):\n \"\"\"Main class for interaction with BayBE.\n\n Campaigns define and record an experimentation process, i.e. the execution of a\n series of measurements and the iterative sequence of events involved.\n\n In particular, a campaign:\n * Defines the objective of an experimentation process.\n * Defines the search space over which the experimental parameter may vary.\n * Defines a strategy for traversing the search space.\n * Records the measurement data collected during the process.\n * Records metadata about the progress of the experimentation process.\n \"\"\"\n\n # DOE specifications\n searchspace: SearchSpace = field()\n \"\"\"The search space in which the experiments are conducted.\"\"\"\n\n objective: Objective = field()\n \"\"\"The optimization objective.\"\"\"\n\n strategy: Strategy = field(factory=TwoPhaseStrategy)\n \"\"\"The employed strategy\"\"\"\n\n # Data\n measurements_exp: pd.DataFrame = field(factory=pd.DataFrame, eq=eq_dataframe)\n \"\"\"The experimental representation of the conducted experiments.\"\"\"\n\n numerical_measurements_must_be_within_tolerance: bool = field(default=True)\n \"\"\"Flag for forcing numerical measurements to be within tolerance.\"\"\"\n\n # Metadata\n n_batches_done: int = field(default=0)\n \"\"\"The number of already processed batches.\"\"\"\n\n n_fits_done: int = field(default=0)\n \"\"\"The number of fits already done.\"\"\"\n\n # Private\n _cached_recommendation: pd.DataFrame = field(factory=pd.DataFrame, eq=eq_dataframe)\n \"\"\"The cached recommendations.\"\"\"\n\n @property\n def parameters(self) -> List[Parameter]:\n \"\"\"The parameters of the underlying search space.\"\"\"\n return self.searchspace.parameters\n\n @property\n def targets(self) -> List[NumericalTarget]:\n \"\"\"The targets of the underlying objective.\"\"\"\n # TODO: Currently, the `Objective` class is directly coupled to\n # `NumericalTarget`, hence the return type.\n return self.objective.targets\n\n @property\n def measurements_parameters_comp(self) -> pd.DataFrame:\n \"\"\"The computational representation of the measured parameters.\"\"\"\n if len(self.measurements_exp) < 1:\n return pd.DataFrame()\n return self.searchspace.transform(self.measurements_exp)\n\n @property\n def measurements_targets_comp(self) -> pd.DataFrame:\n \"\"\"The computational representation of the measured targets.\"\"\"\n if len(self.measurements_exp) < 1:\n return pd.DataFrame()\n return self.objective.transform(self.measurements_exp)\n\n @classmethod\n def from_config(cls, config_json: str) -> Campaign:\n \"\"\"Create a campaign from a configuration JSON.\n\n Args:\n config_json: The string with the configuration JSON.\n\n Returns:\n The constructed campaign.\n \"\"\"\n config = json.loads(config_json)\n config[\"searchspace\"] = {\n \"parameters\": config.pop(\"parameters\"),\n \"constraints\": config.pop(\"constraints\", None),\n }\n return _config_converter.structure(config, Campaign)\n\n @classmethod\n def to_config(cls) -> str:\n \"\"\"Extract the configuration of the campaign as JSON string.\n\n Note: This is not yet implemented. Use\n :func:`baybe.utils.serialization.SerialMixin.to_json` instead\n\n Returns:\n The configuration as JSON string.\n\n Raises:\n NotImplementedError: When trying to use this function.\n \"\"\"\n # TODO: Ideally, this should extract a \"minimal\" configuration, that is,\n # default values should not be exported, which cattrs supports via the\n # 'omit_if_default' option. Can be Implemented once the converter structure\n # has been cleaned up.\n raise NotImplementedError()\n\n @classmethod\n def validate_config(cls, config_json: str) -> None:\n \"\"\"Validate a given campaign configuration JSON.\n\n Args:\n config_json: The JSON that should be validated.\n \"\"\"\n config = json.loads(config_json)\n config[\"searchspace\"] = {\n \"parameters\": config.pop(\"parameters\"),\n \"constraints\": config.pop(\"constraints\", None),\n }\n _validation_converter.structure(config, Campaign)\n\n def add_measurements(self, data: pd.DataFrame) -> None:\n \"\"\"Add results from a dataframe to the internal database.\n\n Each addition of data is considered a new batch. Added results are checked for\n validity. Categorical values need to have an exact match. For numerical values,\n a campaign flag determines if values that lie outside a specified tolerance\n are accepted.\n Note that this modifies the provided data in-place.\n\n Args:\n data: The data to be added (with filled values for targets). Preferably\n created via :func:`baybe.campaign.Campaign.recommend`.\n\n Raises:\n ValueError: If one of the targets has missing values or NaNs in the provided\n dataframe.\n TypeError: If the target has non-numeric entries in the provided dataframe.\n \"\"\"\n # Invalidate recommendation cache first (in case of uncaught exceptions below)\n self._cached_recommendation = pd.DataFrame()\n\n # Check if all targets have valid values\n for target in self.targets:\n if data[target.name].isna().any():\n raise ValueError(\n f\"The target '{target.name}' has missing values or NaNs in the \"\n f\"provided dataframe. Missing target values are not supported.\"\n )\n if data[target.name].dtype.kind not in \"iufb\":\n raise TypeError(\n f\"The target '{target.name}' has non-numeric entries in the \"\n f\"provided dataframe. Non-numeric target values are not supported.\"\n )\n\n # Check if all targets have valid values\n for param in self.parameters:\n if data[param.name].isna().any():\n raise ValueError(\n f\"The parameter '{param.name}' has missing values or NaNs in the \"\n f\"provided dataframe. Missing parameter values are not supported.\"\n )\n if param.is_numeric and (data[param.name].dtype.kind not in \"iufb\"):\n raise TypeError(\n f\"The numerical parameter '{param.name}' has non-numeric entries in\"\n f\" the provided dataframe.\"\n )\n\n # Update meta data\n # TODO: refactor responsibilities\n self.searchspace.discrete.mark_as_measured(\n data, self.numerical_measurements_must_be_within_tolerance\n )\n\n # Read in measurements and add them to the database\n self.n_batches_done += 1\n to_insert = data.copy()\n to_insert[\"BatchNr\"] = self.n_batches_done\n to_insert[\"FitNr\"] = np.nan\n\n self.measurements_exp = pd.concat(\n [self.measurements_exp, to_insert], axis=0, ignore_index=True\n )\n\n # Telemetry\n telemetry_record_value(TELEM_LABELS[\"COUNT_ADD_RESULTS\"], 1)\n telemetry_record_recommended_measurement_percentage(\n self._cached_recommendation,\n data,\n self.parameters,\n self.numerical_measurements_must_be_within_tolerance,\n )\n\n def recommend(self, batch_quantity: int = 5) -> pd.DataFrame:\n \"\"\"Provide the recommendations for the next batch of experiments.\n\n Args:\n batch_quantity: Number of requested recommendations.\n\n Returns:\n Dataframe containing the recommendations in experimental representation.\n\n Raises:\n ValueError: If ``batch_quantity`` is smaller than 1.\n \"\"\"\n if batch_quantity < 1:\n raise ValueError(\n f\"You must at least request one recommendation per batch, but provided \"\n f\"{batch_quantity=}.\"\n )\n\n # If there are cached recommendations and the batch size of those is equal to\n # the previously requested one, we just return those\n if len(self._cached_recommendation) == batch_quantity:\n return self._cached_recommendation\n\n # Update recommendation meta data\n if len(self.measurements_exp) > 0:\n self.n_fits_done += 1\n self.measurements_exp[\"FitNr\"].fillna(self.n_fits_done, inplace=True)\n\n # Get the recommended search space entries\n rec = self.strategy.recommend(\n self.searchspace,\n batch_quantity,\n self.measurements_parameters_comp,\n self.measurements_targets_comp,\n )\n\n # Cache the recommendations\n self._cached_recommendation = rec.copy()\n\n # Telemetry\n telemetry_record_value(TELEM_LABELS[\"COUNT_RECOMMEND\"], 1)\n telemetry_record_value(TELEM_LABELS[\"BATCH_QUANTITY\"], batch_quantity)\n\n return rec" }, { "identifier": "Objective", "path": "baybe/objective.py", "snippet": "class Objective(SerialMixin):\n \"\"\"Class for managing optimization objectives.\"\"\"\n\n # TODO: The class currently directly depends on `NumericalTarget`. Once this\n # direct dependence is replaced with a dependence on `Target`, the type\n # annotations should be changed.\n\n mode: Literal[\"SINGLE\", \"DESIRABILITY\"] = field()\n \"\"\"The optimization mode.\"\"\"\n\n targets: List[Target] = field(validator=min_len(1))\n \"\"\"The list of targets used for the objective.\"\"\"\n\n weights: List[float] = field(converter=_normalize_weights)\n \"\"\"The weights used to balance the different targets. By default, all\n weights are equally important.\"\"\"\n\n combine_func: Literal[\"MEAN\", \"GEOM_MEAN\"] = field(\n default=\"GEOM_MEAN\", validator=in_([\"MEAN\", \"GEOM_MEAN\"])\n )\n \"\"\"The function used to combine the different targets.\"\"\"\n\n @weights.default\n def _default_weights(self) -> List[float]:\n \"\"\"Create the default weights.\"\"\"\n # By default, all targets are equally important.\n return [1.0] * len(self.targets)\n\n @targets.validator\n def _validate_targets( # noqa: DOC101, DOC103\n self, _: Any, targets: List[NumericalTarget]\n ) -> None:\n \"\"\"Validate targets depending on the objective mode.\n\n Raises:\n ValueError: If multiple targets are specified when using objective mode\n ``SINGLE``.\n \"\"\"\n # Raises a ValueError if multiple targets are specified when using objective\n # mode SINGLE.\n if (self.mode == \"SINGLE\") and (len(targets) != 1):\n raise ValueError(\n \"For objective mode 'SINGLE', exactly one target must be specified.\"\n )\n # Raises a ValueError if there are unbounded targets when using objective mode\n # DESIRABILITY.\n if self.mode == \"DESIRABILITY\":\n if any(not target.bounds.is_bounded for target in targets):\n raise ValueError(\n \"In 'DESIRABILITY' mode for multiple targets, each target must \"\n \"have bounds defined.\"\n )\n\n @weights.validator\n def _validate_weights( # noqa: DOC101, DOC103\n self, _: Any, weights: List[float]\n ) -> None:\n \"\"\"Validate target weights.\n\n Raises:\n ValueError: If the number of weights and the number of targets differ.\n \"\"\"\n if weights is None:\n return\n\n # Assert that weights is a list of numbers\n validator = deep_iterable(instance_of(float), instance_of(list))\n validator(self, _, weights)\n\n if len(weights) != len(self.targets):\n raise ValueError(\n f\"Weights list for your objective has {len(weights)} values, but you \"\n f\"defined {len(self.targets)} targets.\"\n )\n\n def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Transform targets from experimental to computational representation.\n\n Args:\n data: The data to be transformed. Must contain all target values, can\n contain more columns.\n\n Returns:\n A new dataframe with the targets in computational representation. Columns\n will be as in the input (except when objective mode is ``DESIRABILITY``).\n\n Raises:\n ValueError: If the specified averaging function is unknown.\n \"\"\"\n # Perform transformations that are required independent of the mode\n transformed = data[[t.name for t in self.targets]].copy()\n for target in self.targets:\n transformed[target.name] = target.transform(data[target.name])\n\n # In desirability mode, the targets are additionally combined further into one\n if self.mode == \"DESIRABILITY\":\n if self.combine_func == \"GEOM_MEAN\":\n func = geom_mean\n elif self.combine_func == \"MEAN\":\n func = partial(np.average, axis=1)\n else:\n raise ValueError(\n f\"The specified averaging function {self.combine_func} is unknown.\"\n )\n\n vals = func(transformed.values, weights=self.weights)\n transformed = pd.DataFrame({\"Comp_Target\": vals}, index=transformed.index)\n\n return transformed" }, { "identifier": "NumericalDiscreteParameter", "path": "baybe/parameters/numerical.py", "snippet": "class NumericalDiscreteParameter(DiscreteParameter):\n \"\"\"Parameter class for discrete numerical parameters (a.k.a. setpoints).\"\"\"\n\n # class variables\n is_numeric: ClassVar[bool] = True\n # See base class.\n\n # object variables\n # NOTE: The parameter values are assumed to be sorted by the tolerance validator.\n _values: Tuple[float, ...] = field(\n # FIXME[typing]: https://github.com/python-attrs/cattrs/issues/111\n converter=lambda x: sorted(cattrs.structure(x, Tuple[float, ...])), # type: ignore\n # FIXME[typing]: https://github.com/python-attrs/attrs/issues/1197\n validator=[\n min_len(2),\n validate_unique_values, # type: ignore\n validate_is_finite,\n ],\n )\n \"\"\"The values the parameter can take.\"\"\"\n\n tolerance: float = field(default=0.0)\n \"\"\"The absolute tolerance used for deciding whether a value is in range. A tolerance\n larger than half the minimum distance between parameter values is not allowed\n because that could cause ambiguity when inputting data points later.\"\"\"\n\n @tolerance.validator\n def _validate_tolerance( # noqa: DOC101, DOC103\n self, _: Any, tolerance: float\n ) -> None:\n \"\"\"Validate that the given tolerance is safe.\n\n The tolerance is the allowed experimental uncertainty when\n reading in measured values. A tolerance larger than half the minimum\n distance between parameter values is not allowed because that could cause\n ambiguity when inputting data points later.\n\n Raises:\n ValueError: If the tolerance is not safe.\n \"\"\"\n # For zero tolerance, the only left requirement is that all parameter values\n # are distinct, which is already ensured by the corresponding validator.\n if tolerance == 0.0:\n return\n\n min_dist = np.diff(self.values).min()\n if min_dist == (eps := np.nextafter(0, 1, dtype=DTypeFloatNumpy)):\n raise NumericalUnderflowError(\n f\"The distance between any two parameter values must be at least \"\n f\"twice the size of the used floating point resolution of {eps}.\"\n )\n\n if tolerance >= (max_tol := min_dist / 2.0):\n raise ValueError(\n f\"Parameter '{self.name}' is initialized with tolerance {tolerance} \"\n f\"but due to the given parameter values {self.values}, the specified \"\n f\"tolerance must be smaller than {max_tol} to avoid ambiguity.\"\n )\n\n @property\n def values(self) -> tuple: # noqa: D102\n # See base class.\n return self._values\n\n @cached_property\n def comp_df(self) -> pd.DataFrame: # noqa: D102\n # See base class.\n comp_df = pd.DataFrame({self.name: self.values}, index=self.values)\n return comp_df\n\n def is_in_range(self, item: float) -> bool: # noqa: D102\n # See base class.\n differences_acceptable = [\n np.abs(val - item) <= self.tolerance for val in self.values\n ]\n return any(differences_acceptable)" }, { "identifier": "SubstanceParameter", "path": "baybe/parameters/substance.py", "snippet": "class SubstanceParameter(DiscreteParameter):\n \"\"\"Generic substances that are treated with cheminformatics descriptors.\n\n Only a decorrelated subset of descriptors should be used as otherwise this can\n result in a large number of features. For a handful of molecules, keeping only\n descriptors that have a maximum correlation of 0.7 reduces the number of\n descriptors to about 5-20. The number might be substantially higher with more\n labels given.\n \"\"\"\n\n # class variables\n is_numeric: ClassVar[bool] = False\n # See base class.\n\n # object variables\n data: Dict[str, Smiles] = field(\n validator=deep_mapping(\n mapping_validator=min_len(2),\n # FIXME[typing]: https://github.com/python-attrs/attrs/issues/1206\n key_validator=and_(instance_of(str), min_len(1)),\n value_validator=lambda *x: None,\n )\n )\n \"\"\"A mapping that provides the SMILES strings for all available parameter values.\"\"\"\n\n decorrelate: Union[bool, float] = field(\n default=True, validator=validate_decorrelation\n )\n \"\"\"Specifies the used decorrelation mode for the parameter encoding.\n\n - ``False``: The encoding is used as is.\n - ``True``: The encoding is decorrelated using a default correlation threshold.\n - float in (0, 1): The encoding is decorrelated using the specified threshold.\n \"\"\"\n\n encoding: SubstanceEncoding = field(\n default=SubstanceEncoding.MORDRED, converter=SubstanceEncoding\n )\n # See base class.\n\n @encoding.validator\n def _validate_encoding(self, _: Any, value: str) -> None: # noqa: DOC101, DOC103\n \"\"\"Validate that the chosen encoding can be used.\n\n This validation is necessary since certain encodings are only usable when\n additional dependencies, in particular the ``chem`` dependency, have been\n installed.\n\n Raises:\n ImportError: If the ``chem``dependency was not installed but an encoding\n requiring this dependency is requested.\n \"\"\"\n if value is SubstanceEncoding.MORDRED and not (\n _MORDRED_INSTALLED and _RDKIT_INSTALLED\n ):\n raise ImportError(\n \"The mordred/rdkit packages are not installed, a SubstanceParameter \"\n \"with MORDRED encoding cannot be used. Consider installing baybe with \"\n \"'chem' dependency like 'pip install baybe[chem]'\"\n )\n if (\n value in [SubstanceEncoding.RDKIT, SubstanceEncoding.MORGAN_FP]\n and not _RDKIT_INSTALLED\n ):\n raise ImportError(\n \"The rdkit package is not installed, a SubstanceParameter with \"\n \"RDKIT or MORGAN_FP encoding cannot be used. Consider installing baybe \"\n \"with 'chem' dependency like 'pip install baybe[chem]'\"\n )\n\n @data.validator\n def _validate_substance_data( # noqa: DOC101, DOC103\n self, _: Any, data: Dict[str, Smiles]\n ) -> None:\n \"\"\"Validate that the substance data, provided as SMILES, is valid.\n\n Raises:\n ValueError: If one or more of the SMILES are invalid.\n ValueError: If the several entries represent the same substance.\n \"\"\"\n # Check for invalid SMILES\n canonical_smiles = {}\n exceptions = []\n for name, smiles in data.items():\n try:\n canonical_smiles[name] = get_canonical_smiles(smiles)\n except ValueError:\n exceptions.append(\n ValueError(\n f\"The SMILES '{smiles}' for molecule '{name}' does \"\n f\"not appear to be valid.\"\n )\n )\n if exceptions:\n raise ExceptionGroup(\"invalid SMILES\", exceptions)\n\n # Check for duplicate substances\n if groups := group_duplicate_values(canonical_smiles):\n exceptions = []\n for group, substances in groups.items():\n group_data = {s: data[s] for s in substances}\n exceptions.append(\n ValueError(\n f\"The following entries all represent the same substance \"\n f\"'{group}': {group_data}.\"\n )\n )\n raise ExceptionGroup(\"duplicate substances\", exceptions)\n\n @property\n def values(self) -> tuple:\n \"\"\"Returns the labels of the given set of molecules.\"\"\"\n # Since the order of dictionary keys is important here, this will only work\n # for Python 3.7 or higher\n return tuple(self.data.keys())\n\n @cached_property\n def comp_df(self) -> pd.DataFrame: # noqa: D102\n # See base class.\n vals = list(self.data.values())\n pref = self.name + \"_\"\n\n # Get the raw descriptors\n if self.encoding is SubstanceEncoding.MORDRED:\n comp_df = smiles_to_mordred_features(vals, prefix=pref)\n elif self.encoding is SubstanceEncoding.RDKIT:\n comp_df = smiles_to_rdkit_features(vals, prefix=pref)\n elif self.encoding is SubstanceEncoding.MORGAN_FP:\n comp_df = smiles_to_fp_features(vals, prefix=pref)\n else:\n raise ValueError(\n f\"Unknown parameter encoding {self.encoding} for parameter {self.name}.\"\n )\n\n # Drop NaN and constant columns\n comp_df = comp_df.loc[:, ~comp_df.isna().any(axis=0)]\n comp_df = df_drop_single_value_columns(comp_df)\n\n # If there are bool columns, convert them to int (possible for Mordred)\n comp_df.loc[:, comp_df.dtypes == bool] = comp_df.loc[\n :, comp_df.dtypes == bool\n ].astype(int)\n\n # Label the rows with the molecule names\n comp_df.index = pd.Index(self.values)\n\n # Get a decorrelated subset of the descriptors\n if self.decorrelate:\n if isinstance(self.decorrelate, bool):\n comp_df = df_uncorrelated_features(comp_df)\n else:\n comp_df = df_uncorrelated_features(comp_df, threshold=self.decorrelate)\n\n return comp_df" }, { "identifier": "SequentialGreedyRecommender", "path": "baybe/recommenders/bayesian.py", "snippet": "class SequentialGreedyRecommender(BayesianRecommender):\n \"\"\"Recommender using sequential Greedy optimization.\n\n This recommender implements the BoTorch functions ``optimize_acqf_discrete``,\n ``optimize_acqf`` and ``optimize_acqf_mixed`` for the optimization of discrete,\n continuous and hybrid search spaces. In particular, it can be applied in all\n kinds of search spaces.\n It is important to note that this algorithm performs a brute-force optimization in\n hybrid search spaces which can be computationally expensive. Thus, the behavior of\n the algorithm in hybrid search spaces can be controlled by two additional\n parameters.\n \"\"\"\n\n # Class variables\n compatibility: ClassVar[SearchSpaceType] = SearchSpaceType.HYBRID\n # See base class.\n\n # Object variables\n hybrid_sampler: str = field(\n validator=validators.in_([\"None\", \"Farthest\", \"Random\"]), default=\"None\"\n )\n \"\"\"Strategy used for sampling the discrete subspace when performing hybrid search\n space optimization.\"\"\"\n\n sampling_percentage: float = field(default=1.0)\n \"\"\"Percentage of discrete search space that is sampled when performing hybrid search\n space optimization. Ignored when ``hybrid_sampler=\"None\"``.\"\"\"\n\n @sampling_percentage.validator\n def _validate_percentage( # noqa: DOC101, DOC103\n self, _: Any, value: float\n ) -> None:\n \"\"\"Validate that the given value is in fact a percentage.\n\n Raises:\n ValueError: If ``value`` is not between 0 and 1.\n \"\"\"\n if not 0 <= value <= 1:\n raise ValueError(\n f\"Hybrid sampling percentage needs to be between 0 and 1 but is {value}\"\n )\n\n def _recommend_discrete(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n candidates_comp: pd.DataFrame,\n batch_quantity: int,\n ) -> pd.Index:\n # See base class.\n\n # determine the next set of points to be tested\n candidates_tensor = to_tensor(candidates_comp)\n try:\n points, _ = optimize_acqf_discrete(\n acquisition_function, batch_quantity, candidates_tensor\n )\n except AttributeError as ex:\n raise NoMCAcquisitionFunctionError(\n f\"The '{self.__class__.__name__}' only works with Monte Carlo \"\n f\"acquisition functions.\"\n ) from ex\n\n # retrieve the index of the points from the input dataframe\n # IMPROVE: The merging procedure is conceptually similar to what\n # `SearchSpace._match_measurement_with_searchspace_indices` does, though using\n # a simpler matching logic. When refactoring the SearchSpace class to\n # handle continuous parameters, a corresponding utility could be extracted.\n idxs = pd.Index(\n pd.merge(\n candidates_comp.reset_index(),\n pd.DataFrame(points, columns=candidates_comp.columns),\n on=list(candidates_comp),\n )[\"index\"]\n )\n assert len(points) == len(idxs)\n\n return idxs\n\n def _recommend_continuous(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n batch_quantity: int,\n ) -> pd.DataFrame:\n # See base class.\n\n try:\n points, _ = optimize_acqf(\n acq_function=acquisition_function,\n bounds=searchspace.continuous.param_bounds_comp,\n q=batch_quantity,\n num_restarts=5, # TODO make choice for num_restarts\n raw_samples=10, # TODO make choice for raw_samples\n equality_constraints=[\n c.to_botorch(searchspace.continuous.parameters)\n for c in searchspace.continuous.constraints_lin_eq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n inequality_constraints=[\n c.to_botorch(searchspace.continuous.parameters)\n for c in searchspace.continuous.constraints_lin_ineq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n )\n except AttributeError as ex:\n raise NoMCAcquisitionFunctionError(\n f\"The '{self.__class__.__name__}' only works with Monte Carlo \"\n f\"acquisition functions.\"\n ) from ex\n\n # Return optimized points as dataframe\n rec = pd.DataFrame(points, columns=searchspace.continuous.param_names)\n return rec\n\n def _recommend_hybrid(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n batch_quantity: int,\n ) -> pd.DataFrame:\n \"\"\"Recommend points using the ``optimize_acqf_mixed`` function of BoTorch.\n\n This functions samples points from the discrete subspace, performs optimization\n in the continuous subspace with these points being fixed and returns the best\n found solution.\n **Important**: This performs a brute-force calculation by fixing every possible\n assignment of discrete variables and optimizing the continuous subspace for\n each of them. It is thus computationally expensive.\n\n Args:\n acquisition_function: The acquisition function to be optimized.\n searchspace: The search space in which the recommendations should be made.\n batch_quantity: The size of the calculated batch.\n\n Returns:\n The recommended points.\n\n Raises:\n NoMCAcquisitionFunctionError: If a non Monte Carlo acquisition function\n is chosen.\n \"\"\"\n # Get discrete candidates.\n _, candidates_comp = searchspace.discrete.get_candidates(\n allow_repeated_recommendations=True,\n allow_recommending_already_measured=True,\n )\n\n # Calculate the number of samples from the given percentage\n n_candidates = int(self.sampling_percentage * len(candidates_comp.index))\n\n # Potential sampling of discrete candidates\n if self.hybrid_sampler == \"Farthest\":\n ilocs = farthest_point_sampling(candidates_comp.values, n_candidates)\n candidates_comp = candidates_comp.iloc[ilocs]\n elif self.hybrid_sampler == \"Random\":\n candidates_comp = candidates_comp.sample(n_candidates)\n\n # Prepare all considered discrete configurations in the List[Dict[int, float]]\n # format expected by BoTorch\n # TODO: Currently assumes that discrete parameters are first and continuous\n # second. Once parameter redesign [11611] is completed, we might adjust this.\n candidates_comp.columns = list(range(len(candidates_comp.columns)))\n fixed_features_list = candidates_comp.to_dict(\"records\")\n\n # Actual call of the BoTorch optimization routine\n try:\n points, _ = optimize_acqf_mixed(\n acq_function=acquisition_function,\n bounds=searchspace.param_bounds_comp,\n q=batch_quantity,\n num_restarts=5, # TODO make choice for num_restarts\n raw_samples=10, # TODO make choice for raw_samples\n fixed_features_list=fixed_features_list,\n equality_constraints=[\n c.to_botorch(\n searchspace.continuous.parameters,\n idx_offset=len(candidates_comp.columns),\n )\n for c in searchspace.continuous.constraints_lin_eq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n inequality_constraints=[\n c.to_botorch(\n searchspace.continuous.parameters,\n idx_offset=len(candidates_comp.columns),\n )\n for c in searchspace.continuous.constraints_lin_ineq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n )\n except AttributeError as ex:\n raise NoMCAcquisitionFunctionError(\n f\"The '{self.__class__.__name__}' only works with Monte Carlo \"\n f\"acquisition functions.\"\n ) from ex\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # TODO [14819]: The following code is necessary due to floating point\n # inaccuracies introduced by BoTorch (potentially due to some float32\n # conversion?). The current workaround is the match the recommendations back\n # to the closest candidate points.\n\n # Split discrete and continuous parts\n disc_points = points[:, : len(candidates_comp.columns)]\n cont_points = points[:, len(candidates_comp.columns) :]\n\n # Find the closest match with the discrete candidates\n candidates_comp_np = candidates_comp.to_numpy()\n disc_points_np = disc_points.numpy()\n if not disc_points_np.flags[\"C_CONTIGUOUS\"]:\n disc_points_np = np.ascontiguousarray(disc_points_np)\n if not candidates_comp_np.flags[\"C_CONTIGUOUS\"]:\n candidates_comp_np = np.ascontiguousarray(candidates_comp_np)\n disc_idxs_iloc = pairwise_distances_argmin(\n disc_points_np, candidates_comp_np, metric=\"manhattan\"\n )\n\n # Get the actual search space dataframe indices\n disc_idxs_loc = candidates_comp.iloc[disc_idxs_iloc].index\n\n # Get experimental representation of discrete and continuous parts\n rec_disc_exp = searchspace.discrete.exp_rep.loc[disc_idxs_loc]\n rec_cont_exp = pd.DataFrame(\n cont_points, columns=searchspace.continuous.param_names\n )\n\n # Adjust the index of the continuous part and concatenate both\n rec_cont_exp.index = rec_disc_exp.index\n rec_exp = pd.concat([rec_disc_exp, rec_cont_exp], axis=1)\n # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n return rec_exp" }, { "identifier": "RandomRecommender", "path": "baybe/recommenders/sampling.py", "snippet": "class RandomRecommender(NonPredictiveRecommender):\n \"\"\"Recommends experiments randomly.\"\"\"\n\n # Class variables\n compatibility: ClassVar[SearchSpaceType] = SearchSpaceType.HYBRID\n # See base class.\n\n def _recommend_hybrid(\n self,\n searchspace: SearchSpace,\n batch_quantity: int,\n candidates_comp: Optional[pd.DataFrame] = None,\n ) -> pd.DataFrame:\n # See base class.\n\n if searchspace.type == SearchSpaceType.DISCRETE:\n if candidates_comp is None:\n raise TypeError(\n \"\"\"You did not provide a dataframe of candidates when applying the\n random recommender to a purely discrete space. Please ensure that\n this dataframe is not None.\"\"\"\n )\n return candidates_comp.sample(batch_quantity)\n cont_random = searchspace.continuous.samples_random(n_points=batch_quantity)\n if searchspace.type == SearchSpaceType.CONTINUOUS:\n return cont_random\n disc_candidates, _ = searchspace.discrete.get_candidates(True, True)\n\n # TODO decide mechanism if number of possible discrete candidates is smaller\n # than batch size\n disc_random = disc_candidates.sample(\n n=batch_quantity,\n replace=len(disc_candidates) < batch_quantity,\n )\n\n cont_random.reset_index(drop=True)\n cont_random.index = disc_random.index\n return pd.concat([disc_random, cont_random], axis=1)" }, { "identifier": "SearchSpace", "path": "baybe/searchspace/core.py", "snippet": "class SearchSpace(SerialMixin):\n \"\"\"Class for managing the overall search space.\n\n The search space might be purely discrete, purely continuous, or hybrid.\n Note that created objects related to the computational representations of parameters\n (e.g., parameter bounds, computational dataframes, etc.) may use a different\n parameter order than what is specified through the constructor: While the\n passed parameter list can contain parameters in arbitrary order, the\n aforementioned objects (by convention) list discrete parameters first, followed\n by continuous ones.\n \"\"\"\n\n discrete: SubspaceDiscrete = field(factory=SubspaceDiscrete.empty)\n \"\"\"The (potentially empty) discrete subspace of the overall search space.\"\"\"\n\n continuous: SubspaceContinuous = field(factory=SubspaceContinuous.empty)\n \"\"\"The (potentially empty) continuous subspace of the overall search space.\"\"\"\n\n def __attrs_post_init__(self):\n \"\"\"Perform validation and record telemetry values.\"\"\"\n validate_parameters(self.parameters)\n validate_constraints(self.constraints, self.parameters)\n\n # Telemetry\n telemetry_record_value(TELEM_LABELS[\"COUNT_SEARCHSPACE_CREATION\"], 1)\n telemetry_record_value(TELEM_LABELS[\"NUM_PARAMETERS\"], len(self.parameters))\n telemetry_record_value(\n TELEM_LABELS[\"NUM_CONSTRAINTS\"],\n len(self.constraints) if self.constraints else 0,\n )\n\n @classmethod\n def from_product(\n cls,\n parameters: List[Parameter],\n constraints: Optional[List[Constraint]] = None,\n empty_encoding: bool = False,\n ) -> SearchSpace:\n \"\"\"Create a search space from a cartesian product.\n\n In the search space, optional subsequent constraints are applied.\n That is, the discrete subspace becomes the (filtered) cartesian product\n containing all discrete parameter combinations while, analogously, the\n continuous subspace represents the (filtered) cartesian product of all\n continuous parameters.\n\n Args:\n parameters: The parameters spanning the search space.\n constraints: An optional set of constraints restricting the valid parameter\n space.\n empty_encoding: If ``True``, uses an \"empty\" encoding for all parameters.\n This is useful, for instance, in combination with random search\n strategies that do not read the actual parameter values, since it avoids\n the (potentially costly) transformation of the parameter values to their\n computational representation.\n\n Returns:\n The constructed search space.\n \"\"\"\n # IMPROVE: The arguments get pre-validated here to avoid the potentially costly\n # creation of the subspaces. Perhaps there is an elegant way to bypass the\n # default validation in the initializer (which is required for other\n # ways of object creation) in this particular case.\n validate_parameters(parameters)\n if constraints:\n validate_constraints(constraints, parameters)\n else:\n constraints = []\n\n discrete: SubspaceDiscrete = SubspaceDiscrete.from_product(\n parameters=[\n cast(DiscreteParameter, p) for p in parameters if p.is_discrete\n ],\n constraints=[\n cast(DiscreteConstraint, c) for c in constraints if c.is_discrete\n ],\n empty_encoding=empty_encoding,\n )\n continuous: SubspaceContinuous = SubspaceContinuous(\n parameters=[\n cast(NumericalContinuousParameter, p)\n for p in parameters\n if not p.is_discrete\n ],\n constraints_lin_eq=[\n cast(ContinuousLinearEqualityConstraint, c)\n for c in constraints\n if isinstance(c, ContinuousLinearEqualityConstraint)\n ],\n constraints_lin_ineq=[\n cast(ContinuousLinearInequalityConstraint, c)\n for c in constraints\n if isinstance(c, ContinuousLinearInequalityConstraint)\n ],\n )\n\n return SearchSpace(discrete=discrete, continuous=continuous)\n\n @property\n def parameters(self) -> List[Parameter]:\n \"\"\"Return the list of parameters of the search space.\"\"\"\n return self.discrete.parameters + self.continuous.parameters\n\n @property\n def constraints(self) -> List[Constraint]:\n \"\"\"Return the constraints of the search space.\"\"\"\n return (\n self.discrete.constraints\n + self.continuous.constraints_lin_eq\n + self.continuous.constraints_lin_ineq\n )\n\n @property\n def type(self) -> SearchSpaceType:\n \"\"\"Return the type of the search space.\"\"\"\n if self.discrete.is_empty and not self.continuous.is_empty:\n return SearchSpaceType.CONTINUOUS\n if not self.discrete.is_empty and self.continuous.is_empty:\n return SearchSpaceType.DISCRETE\n if not self.discrete.is_empty and not self.continuous.is_empty:\n return SearchSpaceType.HYBRID\n raise RuntimeError(\"This line should be impossible to reach.\")\n\n @property\n def contains_mordred(self) -> bool:\n \"\"\"Indicates if any of the discrete parameters uses ``MORDRED`` encoding.\"\"\"\n return any(\n p.encoding is SubstanceEncoding.MORDRED for p in self.discrete.parameters\n )\n\n @property\n def contains_rdkit(self) -> bool:\n \"\"\"Indicates if any of the discrete parameters uses ``RDKIT`` encoding.\"\"\"\n return any(\n p.encoding is SubstanceEncoding.RDKIT for p in self.discrete.parameters\n )\n\n @property\n def param_bounds_comp(self) -> torch.Tensor:\n \"\"\"Return bounds as tensor.\"\"\"\n return torch.hstack(\n [self.discrete.param_bounds_comp, self.continuous.param_bounds_comp]\n )\n\n @property\n def task_idx(self) -> Optional[int]:\n \"\"\"The column index of the task parameter in computational representation.\"\"\"\n try:\n # TODO [16932]: Redesign metadata handling\n task_param = next(\n p for p in self.parameters if isinstance(p, TaskParameter)\n )\n except StopIteration:\n return None\n # TODO[11611]: The current approach has two limitations:\n # 1. It matches by column name and thus assumes that the parameter name\n # is used as the column name.\n # 2. It relies on the current implementation detail that discrete parameters\n # appear first in the computational dataframe.\n # --> Fix this when refactoring the data\n return self.discrete.comp_rep.columns.get_loc(task_param.name)\n\n @property\n def n_tasks(self) -> int:\n \"\"\"The number of tasks encoded in the search space.\"\"\"\n # TODO [16932]: This approach only works for a single task parameter. For\n # multiple task parameters, we need to align what the output should even\n # represent (e.g. number of combinatorial task combinations, number of\n # tasks per task parameter, etc).\n try:\n task_param = next(\n p for p in self.parameters if isinstance(p, TaskParameter)\n )\n return len(task_param.values)\n\n # When there are no task parameters, we effectively have a single task\n except StopIteration:\n return 1\n\n def transform(\n self,\n data: pd.DataFrame,\n ) -> pd.DataFrame:\n \"\"\"Transform data from experimental to computational representation.\n\n This function can e.g. be used to transform data obtained from measurements.\n Continuous parameters are not transformed but included.\n\n Args:\n data: The data to be transformed. Must contain all specified parameters, can\n contain more columns.\n\n Returns:\n A dataframe with the parameters in computational representation.\n \"\"\"\n # Transform subspaces separately\n df_discrete = self.discrete.transform(data)\n df_continuous = self.continuous.transform(data)\n\n # Combine Subspaces\n comp_rep = pd.concat([df_discrete, df_continuous], axis=1)\n\n return comp_rep" }, { "identifier": "TwoPhaseStrategy", "path": "baybe/strategies/composite.py", "snippet": "class TwoPhaseStrategy(Strategy):\n \"\"\"A two-phased strategy that switches the recommender at a certain specified point.\n\n The recommender is switched when a new (batch) recommendation is requested and\n the training data set size (i.e., the total number of collected measurements\n including those gathered before the strategy was active) is equal to or greater\n than the number specified via the ``switch_after`` parameter.\n\n Note:\n Throughout each phase, the strategy reuses the **same** recommender object,\n that is, no new instances are created. Therefore, special attention is required\n when using the strategy with stateful recommenders.\n \"\"\"\n\n initial_recommender: Recommender = field(factory=RandomRecommender)\n \"\"\"The initial recommender used by the strategy.\"\"\"\n\n recommender: Recommender = field(factory=SequentialGreedyRecommender)\n \"\"\"The recommender used by the strategy after the switch.\"\"\"\n\n switch_after: int = field(default=1)\n \"\"\"The number of experiments after which the recommender is switched for the next\n requested batch.\"\"\"\n\n def select_recommender( # noqa: D102\n self,\n searchspace: SearchSpace,\n batch_quantity: int = 1,\n train_x: Optional[pd.DataFrame] = None,\n train_y: Optional[pd.DataFrame] = None,\n ) -> Recommender:\n # See base class.\n\n # FIXME: enable predictive recommenders for empty training data\n if (train_x is None or len(train_x) == 0) and not isinstance(\n self.initial_recommender, NonPredictiveRecommender\n ):\n raise _unsupported_recommender_error\n\n return (\n self.recommender\n if len(train_x) >= self.switch_after\n else self.initial_recommender\n )" }, { "identifier": "NumericalTarget", "path": "baybe/targets/numerical.py", "snippet": "class NumericalTarget(Target, SerialMixin):\n \"\"\"Class for numerical targets.\"\"\"\n\n # NOTE: The type annotations of `bounds` are correctly overridden by the attrs\n # converter. Nonetheless, PyCharm's linter might incorrectly raise a type warning\n # when calling the constructor. This is a known issue:\n # https://youtrack.jetbrains.com/issue/PY-34243\n # Quote from attrs docs:\n # If a converter’s first argument has a type annotation, that type will\n # appear in the signature for __init__. A converter will override an explicit\n # type annotation or type argument.\n\n mode: TargetMode = field(converter=TargetMode)\n \"\"\"The target mode.\"\"\"\n\n bounds: Interval = field(default=None, converter=convert_bounds)\n \"\"\"Optional target bounds.\"\"\"\n\n transformation: Optional[TargetTransformation] = field(\n converter=lambda x: None if x is None else TargetTransformation(x)\n )\n \"\"\"An optional target transformation.\"\"\"\n\n @transformation.default\n def _default_transformation(self) -> Optional[TargetTransformation]:\n \"\"\"Provide the default transformation for bounded targets.\"\"\"\n if self.bounds.is_bounded:\n fun = _VALID_TRANSFORMATIONS[self.mode][0]\n warnings.warn(\n f\"The transformation for target '{self.name}' \"\n f\"in '{self.mode.name}' mode has not been specified. \"\n f\"Setting the transformation to '{fun.name}'.\",\n UserWarning,\n )\n return fun\n return None\n\n @bounds.validator\n def _validate_bounds(self, _: Any, bounds: Interval) -> None: # noqa: DOC101, DOC103\n \"\"\"Validate the bounds.\n\n Raises:\n ValueError: If the target is defined on a half-bounded interval.\n ValueError: If the target is in ``MATCH`` mode but the provided bounds\n are infinite.\n \"\"\"\n # IMPROVE: We could also include half-way bounds, which however don't work\n # for the desirability approach\n if bounds.is_half_bounded:\n raise ValueError(\"Targets on half-bounded intervals are not supported.\")\n if self.mode is TargetMode.MATCH and not bounds.is_bounded:\n raise ValueError(\n f\"Target '{self.name}' is in {TargetMode.MATCH.name} mode,\"\n f\"which requires finite bounds.\"\n )\n\n @transformation.validator\n def _validate_transformation( # noqa: DOC101, DOC103\n self, _: Any, value: Optional[TargetTransformation]\n ) -> None:\n \"\"\"Validate that the given transformation is compatible with the specified mode.\n\n Raises:\n ValueError: If the target transformation and mode are not compatible.\n \"\"\"\n if (value is not None) and (value not in _VALID_TRANSFORMATIONS[self.mode]):\n raise ValueError(\n f\"You specified bounds for target '{self.name}', but your \"\n f\"specified transformation '{value}' is not compatible \"\n f\"with the target mode {self.mode}'. It must be one \"\n f\"of {_VALID_TRANSFORMATIONS[self.mode]}.\"\n )\n\n def transform(self, data: pd.DataFrame) -> pd.DataFrame: # noqa: D102\n # See base class.\n\n # When bounds are given, apply the respective transformation\n if self.bounds.is_bounded:\n func = _get_target_transformation(\n # TODO[typing]: For bounded targets (see if clause), the attrs default\n # ensures there is always a transformation specified.\n # Use function overloads to make this explicit.\n self.mode,\n cast(TargetTransformation, self.transformation),\n )\n transformed = pd.DataFrame(\n func(data, *self.bounds.to_tuple()), index=data.index\n )\n\n # If no bounds are given, simply negate all target values for ``MIN`` mode.\n # For ``MAX`` mode, nothing needs to be done.\n # For ``MATCH`` mode, the validators avoid a situation without specified bounds.\n elif self.mode is TargetMode.MIN:\n transformed = -data\n\n else:\n transformed = data.copy()\n\n return transformed" }, { "identifier": "VARNAME_TELEMETRY_ENABLED", "path": "baybe/telemetry.py", "snippet": "VARNAME_TELEMETRY_ENABLED = \"BAYBE_TELEMETRY_ENABLED\"" }, { "identifier": "VARNAME_TELEMETRY_USERNAME", "path": "baybe/telemetry.py", "snippet": "VARNAME_TELEMETRY_USERNAME = \"BAYBE_TELEMETRY_USERNAME\"" }, { "identifier": "get_user_details", "path": "baybe/telemetry.py", "snippet": "def get_user_details() -> Dict[str, str]:\n \"\"\"Generate user details.\n\n These are submitted as metadata with requested telemetry stats.\n\n Returns:\n The hostname and username in hashed format as well as the package version.\n \"\"\"\n from baybe import __version__\n\n username_hash = os.environ.get(\n VARNAME_TELEMETRY_USERNAME, DEFAULT_TELEMETRY_USERNAME\n )\n hostname_hash = os.environ.get(\n VARNAME_TELEMETRY_HOSTNAME, DEFAULT_TELEMETRY_HOSTNAME\n )\n\n return {\"host\": hostname_hash, \"user\": username_hash, \"version\": __version__}" }, { "identifier": "add_fake_results", "path": "baybe/utils/dataframe.py", "snippet": "def add_fake_results(\n data: pd.DataFrame,\n campaign: Campaign,\n good_reference_values: Optional[Dict[str, list]] = None,\n good_intervals: Optional[Dict[str, Tuple[float, float]]] = None,\n bad_intervals: Optional[Dict[str, Tuple[float, float]]] = None,\n) -> None:\n \"\"\"Add fake results to a dataframe which was the result of a BayBE recommendation.\n\n It is possible to specify \"good\" values, which will be given a better\n target value. With this, the algorithm can be driven towards certain optimal values\n whilst still being random. Useful for testing. Note that this does not return a\n new dataframe and that the dataframe is changed in-place.\n\n Args:\n data: Output of the ``recommend`` function of a ``Campaign``, see\n :func:`baybe.campaign.Campaign.recommend`.\n campaign: The corresponding campaign, providing configuration, targets, etc.\n good_reference_values: A dictionary containing parameter names (= dict keys) and\n respective parameter values (= dict values) that specify what will be\n considered good parameter settings. Conditions for different parameters are\n connected via \"and\" logic, i.e. the targets will only get good values when\n all parameters have good reference values.\n good_intervals: A dictionary containing target names (= dict keys) and\n respective \"good\" target value ranges (= dict values) in the form of\n 2-tuples. Each target will be assigned a random value in its respective\n target range whenever the corresponding parameters meet the conditions\n specified through ``good_reference_values``.\n bad_intervals: Analogous to ``good_intervals`` but covering the cases where\n the parameters lie outside the conditions specified through\n ``good_reference_values``.\n\n Raises:\n ValueError: If good values for a parameter were specified, but this parameter\n is not part of the dataframe.\n ValueError: If the target mode is unrecognized when trying to add fake values.\n TypeError: If the entries in ``good_reference_values`` are not lists.\n \"\"\"\n # Per default, there are no reference values for good parameters\n if good_reference_values is None:\n good_reference_values = {}\n\n # Validate input\n for param, vals in good_reference_values.items():\n if param not in data.columns:\n raise ValueError(\n f\"When adding fake results you specified good \"\n f\"values for the parameter '{param}' but this \"\n f\"parameter is not in the dataframe.\"\n )\n if not isinstance(vals, list):\n raise TypeError(\n f\"Entries in parameter 'good_reference_values' \"\n f\"(which is a dictionary) must be lists, but you \"\n f\"provided {vals}.\"\n )\n\n # Set defaults for good intervals\n if good_intervals is None:\n good_intervals = {}\n for target in campaign.targets:\n if target.mode is TargetMode.MAX:\n lbound = target.bounds.lower if np.isfinite(target.bounds.lower) else 66\n ubound = (\n target.bounds.upper if np.isfinite(target.bounds.upper) else 100\n )\n interv = (lbound, ubound)\n elif target.mode is TargetMode.MIN:\n lbound = target.bounds.lower if np.isfinite(target.bounds.lower) else 0\n ubound = target.bounds.upper if np.isfinite(target.bounds.upper) else 33\n interv = (lbound, ubound)\n elif target.mode is TargetMode.MATCH:\n lbound = target.bounds.lower if np.isfinite(target.bounds.lower) else 0\n ubound = (\n target.bounds.upper if np.isfinite(target.bounds.upper) else 100\n )\n interv = (\n lbound + 0.4 * (ubound - lbound),\n lbound + 0.6 * (ubound - lbound),\n )\n else:\n raise ValueError(\n \"Unrecognized target mode when trying to add fake values.\"\n )\n good_intervals[target.name] = interv\n\n # Set defaults for bad intervals\n if bad_intervals is None:\n bad_intervals = {}\n for target in campaign.targets:\n if target.mode is TargetMode.MAX:\n lbound = target.bounds.lower if np.isfinite(target.bounds.lower) else 0\n ubound = target.bounds.upper if np.isfinite(target.bounds.upper) else 33\n interv = (lbound, ubound)\n elif target.mode is TargetMode.MIN:\n lbound = target.bounds.lower if np.isfinite(target.bounds.lower) else 66\n ubound = (\n target.bounds.upper if np.isfinite(target.bounds.upper) else 100\n )\n interv = (lbound, ubound)\n elif target.mode is TargetMode.MATCH:\n lbound = target.bounds.lower if np.isfinite(target.bounds.lower) else 0\n ubound = (\n target.bounds.upper if np.isfinite(target.bounds.upper) else 100\n )\n interv = (\n # Take as bad values the interval above the good interval\n lbound + 0.6 * (ubound - lbound),\n lbound + 1.2 * (ubound - lbound),\n )\n else:\n raise ValueError(\n \"Unrecognized target mode when trying to add fake values.\"\n )\n bad_intervals[target.name] = interv\n\n # Add the fake data for each target\n for target in campaign.targets:\n # Add bad values\n data[target.name] = np.random.uniform(\n bad_intervals[target.name][0], bad_intervals[target.name][1], len(data)\n )\n\n # Create masks that identify locations where to place good values\n masks = []\n for param, vals in good_reference_values.items():\n mask = data[param].isin(vals)\n masks.append(mask)\n\n # Overwrite bad values with good ones using the computed masks\n if len(masks) > 0:\n final_mask = pd.concat(masks, axis=1).all(axis=1)\n data.loc[final_mask, target.name] = np.random.uniform(\n good_intervals[target.name][0],\n good_intervals[target.name][1],\n final_mask.sum(),\n )" } ]
import os from random import randint from baybe.campaign import Campaign from baybe.objective import Objective from baybe.parameters import NumericalDiscreteParameter, SubstanceParameter from baybe.recommenders import RandomRecommender, SequentialGreedyRecommender from baybe.searchspace import SearchSpace from baybe.strategies import TwoPhaseStrategy from baybe.targets import NumericalTarget from baybe.telemetry import ( VARNAME_TELEMETRY_ENABLED, VARNAME_TELEMETRY_USERNAME, get_user_details, ) from baybe.utils.dataframe import add_fake_results
13,929
"""Simulate different users and telemetry settings. This script does some calls so that the results can be viewed on AWS CloudWatch. """ dict_solvent = { "DMAc": r"CC(N(C)C)=O", "Butyornitrile": r"CCCC#N", "Butyl Ester": r"CCCCOC(C)=O", "p-Xylene": r"CC1=CC=C(C)C=C1", } dict_base = { "Potassium acetate": r"O=C([O-])C.[K+]", "Potassium pivalate": r"O=C([O-])C(C)(C)C.[K+]", "Cesium acetate": r"O=C([O-])C.[Cs+]", "Cesium pivalate": r"O=C([O-])C(C)(C)C.[Cs+]", } dict_ligand = { "BrettPhos": r"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)C4CCCCC4)C(OC)=" "CC=C2OC", "Di-tert-butylphenylphosphine": r"CC(C)(C)P(C1=CC=CC=C1)C(C)(C)C", "(t-Bu)PhCPhos": r"CN(C)C1=CC=CC(N(C)C)=C1C2=CC=CC=C2P(C(C)(C)C)C3=CC=CC=C3", "Tricyclohexylphosphine": r"P(C1CCCCC1)(C2CCCCC2)C3CCCCC3", "PPh3": r"P(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3", "XPhos": r"CC(C1=C(C2=CC=CC=C2P(C3CCCCC3)C4CCCCC4)C(C(C)C)=CC(C(C)C)=C1)C", "P(2-furyl)3": r"P(C1=CC=CO1)(C2=CC=CO2)C3=CC=CO3", "Methyldiphenylphosphine": r"CP(C1=CC=CC=C1)C2=CC=CC=C2", "1268824-69-6": r"CC(OC1=C(P(C2CCCCC2)C3CCCCC3)C(OC(C)C)=CC=C1)C", "JackiePhos": r"FC(F)(F)C1=CC(P(C2=C(C3=C(C(C)C)C=C(C(C)C)C=C3C(C)C)C(OC)=CC=C2OC)" r"C4=CC(C(F)(F)F)=CC(C(F)(F)F)=C4)=CC(C(F)(F)F)=C1", "SCHEMBL15068049": r"C[C@]1(O2)O[C@](C[C@]2(C)P3C4=CC=CC=C4)(C)O[C@]3(C)C1", "Me2PPh": r"CP(C)C1=CC=CC=C1", } parameters = [ SubstanceParameter(name="Solvent", data=dict_solvent, encoding="MORDRED"), SubstanceParameter(name="Base", data=dict_base, encoding="MORDRED"), SubstanceParameter(name="Ligand", data=dict_ligand, encoding="MORDRED"), NumericalDiscreteParameter(name="Temp_C", values=[90, 105, 120], tolerance=2), NumericalDiscreteParameter( name="Concentration", values=[0.057, 0.1, 0.153], tolerance=0.005 ), ] config = { "searchspace": SearchSpace.from_product( parameters=parameters, constraints=None, ), "objective": Objective(
"""Simulate different users and telemetry settings. This script does some calls so that the results can be viewed on AWS CloudWatch. """ dict_solvent = { "DMAc": r"CC(N(C)C)=O", "Butyornitrile": r"CCCC#N", "Butyl Ester": r"CCCCOC(C)=O", "p-Xylene": r"CC1=CC=C(C)C=C1", } dict_base = { "Potassium acetate": r"O=C([O-])C.[K+]", "Potassium pivalate": r"O=C([O-])C(C)(C)C.[K+]", "Cesium acetate": r"O=C([O-])C.[Cs+]", "Cesium pivalate": r"O=C([O-])C(C)(C)C.[Cs+]", } dict_ligand = { "BrettPhos": r"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)C4CCCCC4)C(OC)=" "CC=C2OC", "Di-tert-butylphenylphosphine": r"CC(C)(C)P(C1=CC=CC=C1)C(C)(C)C", "(t-Bu)PhCPhos": r"CN(C)C1=CC=CC(N(C)C)=C1C2=CC=CC=C2P(C(C)(C)C)C3=CC=CC=C3", "Tricyclohexylphosphine": r"P(C1CCCCC1)(C2CCCCC2)C3CCCCC3", "PPh3": r"P(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3", "XPhos": r"CC(C1=C(C2=CC=CC=C2P(C3CCCCC3)C4CCCCC4)C(C(C)C)=CC(C(C)C)=C1)C", "P(2-furyl)3": r"P(C1=CC=CO1)(C2=CC=CO2)C3=CC=CO3", "Methyldiphenylphosphine": r"CP(C1=CC=CC=C1)C2=CC=CC=C2", "1268824-69-6": r"CC(OC1=C(P(C2CCCCC2)C3CCCCC3)C(OC(C)C)=CC=C1)C", "JackiePhos": r"FC(F)(F)C1=CC(P(C2=C(C3=C(C(C)C)C=C(C(C)C)C=C3C(C)C)C(OC)=CC=C2OC)" r"C4=CC(C(F)(F)F)=CC(C(F)(F)F)=C4)=CC(C(F)(F)F)=C1", "SCHEMBL15068049": r"C[C@]1(O2)O[C@](C[C@]2(C)P3C4=CC=CC=C4)(C)O[C@]3(C)C1", "Me2PPh": r"CP(C)C1=CC=CC=C1", } parameters = [ SubstanceParameter(name="Solvent", data=dict_solvent, encoding="MORDRED"), SubstanceParameter(name="Base", data=dict_base, encoding="MORDRED"), SubstanceParameter(name="Ligand", data=dict_ligand, encoding="MORDRED"), NumericalDiscreteParameter(name="Temp_C", values=[90, 105, 120], tolerance=2), NumericalDiscreteParameter( name="Concentration", values=[0.057, 0.1, 0.153], tolerance=0.005 ), ] config = { "searchspace": SearchSpace.from_product( parameters=parameters, constraints=None, ), "objective": Objective(
mode="SINGLE", targets=[NumericalTarget(name="Yield", mode="MAX")]
8
2023-11-27 17:02:40+00:00
16k
UX-Decoder/LLaVA-Grounding
llava/model/language_model/llava_llama_gd.py
[ { "identifier": "LlavaMetaModel", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config, delay_load=True)\n self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n def get_vision_tower(self):\n vision_tower = getattr(self, 'vision_tower', None)\n if type(vision_tower) is list:\n vision_tower = vision_tower[0]\n return vision_tower\n\n def initialize_vision_modules(self, model_args, fsdp=None):\n vision_tower = model_args.vision_tower\n mm_vision_select_layer = model_args.mm_vision_select_layer\n mm_vision_select_feature = model_args.mm_vision_select_feature\n pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter\n\n self.config.mm_vision_tower = vision_tower\n\n vision_tower = build_vision_tower(model_args)\n\n if fsdp is not None and len(fsdp) > 0:\n self.vision_tower = [vision_tower]\n else:\n self.vision_tower = vision_tower\n\n self.config.use_mm_proj = True\n self.config.mm_hidden_size = vision_tower.hidden_size\n self.config.mm_vision_select_layer = mm_vision_select_layer\n self.config.mm_vision_select_feature = mm_vision_select_feature\n\n if not hasattr(self, 'mm_projector'):\n self.mm_projector = nn.Linear(self.config.mm_hidden_size, self.config.hidden_size)\n\n if pretrain_mm_mlp_adapter is not None:\n mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')\n def get_w(weights, keyword):\n return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}\n\n # self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))\n self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))" }, { "identifier": "LlavaMetaForCausalLM", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features)\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n # inp_embs = self.get_input_embeddings().weight.data[nums]\n # out_embs = self.get_output_embeddings().weight.data[nums]\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n # print(\"Emb length:\", len(self.get_input_embeddings().weight.data))\n # if len(self.get_input_embeddings().weight.data) > 0:\n # if len(self.get_input_embeddings().weight.data) > 0:\n # self.get_input_embeddings().weight.data[-num_new_tokens:] = inp_embs\n # self.get_output_embeddings().weight.data[-num_new_tokens:] = out_embs\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True" }, { "identifier": "LlavaMetaForCausalLM_gd", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM_gd(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features.to(self.get_model().mm_projector.state_dict()[\"weight\"].dtype))\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n def initialize_seg_modules(self, cfg):\n seg_model = BaseModel(cfg, build_model(cfg))\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.seg_model = seg_model\n\n def freeze_seg_modules(self):\n for p in self.seg_model.parameters():\n p.requires_grad = False" }, { "identifier": "LlavaMetaForCausalLM_gd_interactive", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM_gd_interactive(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features.to(self.get_model().mm_projector.state_dict()[\"weight\"].dtype))\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images,obj_feats=None,num_it=0\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if batch_idx >= len(input_ids) - num_it:\n obj_idx = cur_input_ids == 1273\n idx_in_inter=batch_idx-(len(input_ids)-num_it)\n cur_new_input_embeds[-1][obj_idx] = obj_feats[idx_in_inter].to(cur_new_input_embeds[-1].dtype)\n if labels is not None:\n cur_labels[cur_labels==1273]=IGNORE_INDEX\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n def prepare_inputs_labels_for_multimodal_NoInter(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n # inp_embs = self.get_input_embeddings().weight.data[nums]\n # out_embs = self.get_output_embeddings().weight.data[nums]\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n def initialize_seg_modules(self, cfg):\n seg_model = BaseModel(cfg, build_model(cfg))\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.seg_model = seg_model\n\n def initialize_interactive_modules(self, cfg):\n from .semsam.BaseModel import BaseModel as SemSamBaseModel\n from .semsam import build_model as build_semsam_model\n\n seg_model = SemSamBaseModel(cfg, build_semsam_model(cfg))\n if not (cfg.MODEL.WEIGHTS == \"None\"):\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.interactive_model = seg_model\n def freeze_seg_modules(self):\n for p in self.seg_model.parameters():\n p.requires_grad = False" } ]
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM, LlavaMetaForCausalLM_gd,LlavaMetaForCausalLM_gd_interactive import torch import torch.nn as nn import transformers
13,049
) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config) class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): config_class = LlavaConfig def __init__(self, config): super(LlamaForCausalLM, self).__init__(config) self.model = LlavaLlamaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config) class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): config_class = LlavaConfig def __init__(self, config): super(LlamaForCausalLM, self).__init__(config) self.model = LlavaLlamaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs
class LlavaLlamaForCausalLM_gd(LlamaForCausalLM, LlavaMetaForCausalLM_gd):
2
2023-12-04 10:59:21+00:00
16k
daveredrum/SceneTex
models/pipeline/texture_pipeline.py
[ { "identifier": "TextureMesh", "path": "models/modules/meshes.py", "snippet": "class TextureMesh(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n self.num_instances = 0\n\n self._init_mesh()\n\n def apply_texture_to_mesh(self, mesh, faces, aux, texture_tensor, sampling_mode=\"bilinear\"):\n new_mesh = mesh.clone() # in-place operation - DANGER!!!\n new_mesh.textures = TexturesUV(\n maps=texture_tensor, # B, H, W, C\n faces_uvs=faces.textures_idx[None, ...],\n verts_uvs=aux.verts_uvs[None, ...],\n sampling_mode=sampling_mode,\n # align_corners=False\n )\n\n return new_mesh\n \n def repeat_meshes_as_batch(self, mesh, batch_size):\n return join_meshes_as_batch(\n [mesh for _ in range(batch_size)],\n include_textures=True\n )\n\n def _init_mesh(self):\n cache_dir = self.config.log_dir\n\n self.mesh_dict = init_multiple_meshes_as_scene(\n json.load(open(self.config.scene_config_path)), \n str(cache_dir), \n self.device, \n subdivide_factor=self.config.subdivide_factor,\n return_dict=True\n )\n\n self.mesh, self.texture = self._init_texture(self.mesh_dict)\n\n if self.config.use_background:\n self.background_mesh_dict = init_background(\n self.config.background,\n self.mesh.get_bounding_boxes().cpu().numpy()[0],\n str(cache_dir),\n self.device,\n return_dict=True\n )\n\n self.background_mesh, self.background_texture = self._init_texture(self.background_mesh_dict)\n\n def _init_texture(self, mesh_dict):\n texture = torch.randn((\n 1, \n self.config.latent_texture_size, \n self.config.latent_texture_size, \n self.config.latent_channels\n ), requires_grad=True, device=self.device)\n\n mesh = self.apply_texture_to_mesh(\n mesh_dict[\"mesh\"],\n mesh_dict[\"faces\"],\n mesh_dict[\"aux\"],\n texture\n )\n\n if self.config.texture_type == \"hashgrid\":\n texture = HashGrid(\n 2,\n self.config.hashgrid_config.otype,\n self.config.hashgrid_config.n_levels,\n self.config.hashgrid_config.n_features_per_level,\n self.config.hashgrid_config.log2_hashmap_size,\n self.config.hashgrid_config.base_resolution,\n self.config.hashgrid_config.max_resolution,\n torch.float16 if self.config.hashgrid_config.dtype == \"half\" else torch.float32 # full precision to avoid NaN\n )\n \n elif self.config.texture_type == \"hashgrid_mlp\":\n texture = HashGridMLP(\n 2,\n self.config.hashgrid_config,\n self.config.mlp_config\n )\n\n else:\n texture = torch.randn((\n 1, \n self.config.latent_texture_size, \n self.config.latent_texture_size, \n self.config.latent_channels\n ), requires_grad=True, device=self.device)\n\n mesh = self.apply_texture_to_mesh(\n mesh_dict[\"mesh\"],\n mesh_dict[\"faces\"],\n mesh_dict[\"aux\"],\n texture\n )\n\n return mesh, texture\n \n def sort_rand_gpu(self, pop_size, num_samples):\n \"\"\"Generate a random torch.Tensor (GPU) and sort it to generate indices.\"\"\"\n return torch.argsort(torch.rand(pop_size, device=self.device))[:num_samples]\n\n def build_instance_map(self, studio, cache_dir):\n # build instance masks\n instance_map = build_instance_map(studio, \n cache_dir, cache_dir,\n self.config.dummy_texture_path, \n self.device, self.config.texture_size, self.config.render_size, 500).to(self.device)\n\n assert len(instance_map.shape) == 2, \"instance map should be in shape (W, H)\"\n\n # replace the dummy texture with the instance map\n self.mesh = self.apply_texture_to_mesh(\n self.mesh_dict[\"mesh\"],\n self.mesh_dict[\"faces\"],\n self.mesh_dict[\"aux\"],\n instance_map[None, :, :, None].repeat(1, 1, 1, 3),\n \"nearest\"\n )\n \n self.instance_map = instance_map\n \n def sample_instance_anchors(self, cache_dir):\n cache_path = Path(cache_dir) / \"anchors.pth\"\n\n if cache_path.exists():\n print(\"=> loading instance anchors from {}...\".format(str(cache_path)))\n self.instance_anchors = torch.load(str(cache_path))\n self.num_instances = self.instance_anchors.shape[0]\n else:\n print(\"=> sampling instance anchors...\")\n instance_labels = torch.unique(self.instance_map)\n assert instance_labels.shape[0] > 1\n instance_labels = instance_labels[instance_labels != 0]\n\n instance_anchors = []\n for instance_id in instance_labels:\n instance_mask = self.instance_map == instance_id\n uv_coords = torch.nonzero(instance_mask) # NumInsTex, 2\n sampled_ids = self.sort_rand_gpu(uv_coords.shape[0], self.config.num_anchors)\n sampled_uv_coords = uv_coords[sampled_ids, :]\n instance_anchors.append(sampled_uv_coords)\n\n instance_anchors = torch.stack(instance_anchors) # M, NumAnchor, 2\n instance_anchors = instance_anchors.float() / self.config.texture_size\n\n assert instance_anchors.min() >= 0 and instance_anchors.max() <= 1\n\n print(\"=> saving anchors to {}\".format(str(cache_path)))\n torch.save(instance_anchors, str(cache_path))\n\n self.instance_anchors = instance_anchors\n self.num_instances = self.instance_anchors.shape[0]" }, { "identifier": "Studio", "path": "models/modules/studio.py", "snippet": "class Studio(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n # render function\n self.render_func = self._init_render_func()\n\n self._init_camera_settings()\n\n def _init_camera_settings(self):\n if self.config.use_sphere_cameras and not self.config.use_blenderproc_cameras: # use random cameras\n\n self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)\n \n dist_linspace = np.linspace(\n self.sphere_cameras.dist.min,\n self.sphere_cameras.dist.max,\n 1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,\n )\n elev_linspace = np.linspace(\n self.sphere_cameras.elev.min,\n self.sphere_cameras.elev.max,\n 1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,\n )\n azim_linspace = np.linspace(\n self.sphere_cameras.azim.min,\n self.sphere_cameras.azim.max,\n 1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,\n )\n fov_linspace = np.linspace(\n self.sphere_cameras.fov.min,\n self.sphere_cameras.fov.max,\n 1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,\n )\n at = np.array(self.sphere_cameras.at)\n\n combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)\n dist_list = combinations[:, 0].tolist()\n elev_list = combinations[:, 1].tolist()\n azim_list = combinations[:, 2].tolist()\n\n self.Rs, self.Ts = init_trajectory(dist_list, elev_list, azim_list, at)\n self.fov_list = combinations[:, 3].tolist()\n\n self.num_cameras = len(self.Rs)\n\n print(\"=> using {} spherical cameras for training\".format(self.num_cameras))\n\n elif not self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:\n\n poses = json.load(open(self.config.blenderproc_cameras))\n self.Rs, self.Ts = init_blenderproc_trajectory(poses, self.device)\n\n self.num_cameras = len(self.Rs)\n self.fov_list = [self.config.fov] * self.num_cameras\n\n print(\"=> using {} blenderproc cameras for training\".format(self.num_cameras))\n\n elif self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:\n\n # spherical cameras\n self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)\n \n dist_linspace = np.linspace(\n self.sphere_cameras.dist.min,\n self.sphere_cameras.dist.max,\n 1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,\n )\n elev_linspace = np.linspace(\n self.sphere_cameras.elev.min,\n self.sphere_cameras.elev.max,\n 1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,\n )\n azim_linspace = np.linspace(\n self.sphere_cameras.azim.min,\n self.sphere_cameras.azim.max,\n 1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,\n )\n fov_linspace = np.linspace(\n self.sphere_cameras.fov.min,\n self.sphere_cameras.fov.max,\n 1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,\n )\n at = np.array(self.sphere_cameras.at)\n\n combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)\n dist_list = combinations[:, 0].tolist()\n elev_list = combinations[:, 1].tolist()\n azim_list = combinations[:, 2].tolist()\n\n sphere_Rs, sphere_Ts = init_trajectory(dist_list, elev_list, azim_list, at)\n sphere_fov_list = combinations[:, 3].tolist()\n\n # blenderproc cameras\n poses = json.load(open(self.config.blenderproc_cameras))\n blenderproc_Rs, blenderproc_Ts = init_blenderproc_trajectory(poses, self.device)\n blenderproc_fov_list = [self.config.fov] * len(blenderproc_Rs)\n \n self.Rs = sphere_Rs + blenderproc_Rs\n self.Ts = sphere_Ts + blenderproc_Ts\n self.fov_list = sphere_fov_list + blenderproc_fov_list\n self.num_cameras = len(self.Rs)\n\n print(\"=> using {} spherical cameras and {} blenderproc cameras for training\".format(len(sphere_Rs), len(blenderproc_Rs)))\n\n # self.sphere_Rs = sphere_Rs\n # self.sphere_Ts = sphere_Ts\n # self.sphere_fov_list = sphere_fov_list\n # self.num_sphere_cameras = len(self.sphere_Rs)\n\n # self.Rs = sphere_Rs + blenderproc_Rs\n # self.Ts = sphere_Ts + blenderproc_Ts\n # self.fov_list = sphere_fov_list + blenderproc_fov_list\n # self.num_cameras = len(self.Rs)\n\n # print(\"=> using {} spherical cameras and {} blenderproc cameras for training\".format(len(sphere_Rs), len(blenderproc_Rs)))\n # print(\"=> using {} cameras before annealing and {} cameras afterwards\".format(self.num_sphere_cameras, self.num_cameras))\n\n else: # use fixed cameras\n raise NotImplementedError\n\n # for inference \n # FIXME only support spherical cameras for now\n # spherical cameras\n self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)\n\n dist_linspace = [self.sphere_cameras.dist.min] # always take the min dist from spherical cameras\n elev_linspace = [self.config.elev]\n azim_linspace = np.linspace(\n self.config.azim[0],\n self.config.azim[1],\n self.config.log_latents_views,\n )\n fov_linspace = [self.config.fov]\n at = np.array(self.sphere_cameras.at) # always take the cameras center from spherical cameras\n\n combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)\n self.inference_dist_list = combinations[:, 0].tolist()\n self.inference_elev_list = combinations[:, 1].tolist()\n self.inference_azim_list = combinations[:, 2].tolist()\n self.inference_fov_list = combinations[:, 3].tolist()\n self.inference_at = at\n\n self.num_inference_cameras = len(self.inference_dist_list)\n\n print(\"=> using {} cameras for training, {} cameras for inference.\".format(self.num_cameras, self.num_inference_cameras))\n\n def _init_render_func(self):\n if self.config.render_func_type == \"mlp\":\n if self.config.texture_type == \"hashgrid\":\n in_channels = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level\n elif self.config.texture_type == \"hashgrid_mlp\":\n in_channels = self.config.mlp_config.out_channels\n else:\n in_channels = self.config.latent_channels\n\n render_func = MLP(\n in_channels,\n self.config.render_channels,\n self.config.view_embedding_hidden_dim,\n self.config.num_view_embedding_layers,\n dtype=torch.float32\n ).to(self.device)\n \n elif self.config.render_func_type == \"none\":\n render_func = nn.Identity()\n\n else:\n raise NotImplementedError(\"not supported render function type: {}\".format(self.config.render_func_type))\n\n return render_func\n \n def init_anchor_func(self, num_instances):\n if self.config.texture_type == \"hashgrid\":\n anchor_dim = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level\n elif self.config.texture_type == \"hashgrid_mlp\":\n anchor_dim = self.config.mlp_config.out_channels\n else:\n anchor_dim = self.config.latent_channels\n\n anchor_func = AnchorTransformer(self.config, self.device, anchor_dim=anchor_dim, num_instances=num_instances).to(self.device)\n\n self.anchor_func = anchor_func\n\n def set_cameras(self, R, T, fov, image_size):\n return init_camera_R_T(R, T, image_size, self.device, fov)\n \n def set_renderer(self, camera, image_size):\n return init_renderer(camera,\n shader=init_flat_texel_shader(\n camera=camera,\n device=self.device\n ),\n image_size=image_size, \n faces_per_pixel=self.config.faces_per_pixel\n )\n\n def _sample_one_camera(self, step, random_cameras=False, inference=False):\n R, T, fov, idx = None, None, None, None\n if inference:\n idx = step % self.num_inference_cameras\n dist = self.inference_dist_list[idx]\n elev = self.inference_elev_list[idx]\n azim = self.inference_azim_list[idx]\n fov = self.inference_fov_list[idx]\n at = self.inference_at\n R, T = look_at_view_transform(dist, elev, azim, at=at)\n else:\n\n if random_cameras:\n idx = random.choice(range(self.num_cameras))\n else:\n idx = step % self.num_cameras\n\n R, T, fov = self.Rs[idx], self.Ts[idx], self.fov_list[idx]\n\n # if self.config.use_sphere_cameras and self.config.use_blenderproc_cameras and step < self.config.num_anneal_steps:\n \n # if random_cameras:\n # idx = random.choice(range(self.num_sphere_cameras))\n # else:\n # idx = step % self.num_sphere_cameras\n\n # R, T, fov = self.sphere_Rs[idx], self.sphere_Ts[idx], self.sphere_fov_list[idx]\n\n # else:\n\n # if random_cameras:\n # idx = random.choice(range(self.num_cameras))\n # else:\n # idx = step % self.num_cameras\n\n # R, T, fov = self.Rs[idx], self.Ts[idx], self.fov_list[idx]\n\n return R, T, fov, idx\n \n def sample_cameras(self, step, num_samples, random_cameras=False, inference=False):\n if num_samples == 1:\n return self._sample_one_camera(step, random_cameras, inference)\n else:\n Rs, Ts, fovs, ids = [], [], [], []\n cur_step = step % self.num_cameras\n \n if random_cameras:\n pool = [e for e in range(self.num_cameras) if e != cur_step]\n next_steps = random.sample(pool, k=num_samples-1)\n else:\n next_steps = [(cur_step+s+1) % self.num_cameras for s in range(num_samples-1)]\n\n steps = [cur_step] + next_steps\n for s in steps:\n R, T, fov, idx = self._sample_one_camera(s)\n Rs.append(R)\n Ts.append(T)\n fovs.append(fov)\n ids.append(idx)\n\n Rs = torch.cat(Rs, dim=0)\n Ts = torch.cat(Ts, dim=0)\n\n return Rs, Ts, fovs, ids\n\n def get_uv_coordinates(self, mesh, fragments):\n xyzs = mesh.verts_padded() # (N, V, 3)\n faces = mesh.faces_padded() # (N, F, 3)\n\n faces_uvs = mesh.textures.faces_uvs_padded()\n verts_uvs = mesh.textures.verts_uvs_padded()\n\n # NOTE Meshes are replicated in batch. Taking the first one is enough.\n batch_size, _, _ = xyzs.shape\n xyzs, faces, faces_uvs, verts_uvs = xyzs[0], faces[0], faces_uvs[0], verts_uvs[0]\n faces_coords = verts_uvs[faces_uvs] # (F, 3, 2)\n\n # replicate the coordinates as batch\n faces_coords = faces_coords.repeat(batch_size, 1, 1)\n\n invalid_mask = fragments.pix_to_face == -1\n target_coords = interpolate_face_attributes(\n fragments.pix_to_face, fragments.bary_coords, faces_coords\n ) # (N, H, W, 1, 3)\n _, H, W, K, _ = target_coords.shape\n target_coords[invalid_mask] = 0\n assert K == 1 # pixel_per_faces should be 1\n target_coords = target_coords.squeeze(3) # (N, H, W, 2)\n\n return target_coords\n\n def get_relative_depth_map(self, zbuf, pad_value=10):\n absolute_depth = zbuf[..., 0] # B, H, W\n no_depth = -1\n\n depth_min, depth_max = absolute_depth[absolute_depth != no_depth].min(), absolute_depth[absolute_depth != no_depth].max()\n target_min, target_max = 50, 255\n\n depth_value = absolute_depth[absolute_depth != no_depth]\n depth_value = depth_max - depth_value # reverse values\n\n depth_value /= (depth_max - depth_min)\n depth_value = depth_value * (target_max - target_min) + target_min\n\n relative_depth = absolute_depth.clone()\n relative_depth[absolute_depth != no_depth] = depth_value\n relative_depth[absolute_depth == no_depth] = pad_value # not completely black\n\n return absolute_depth, relative_depth\n\n def query_texture(self, coords, texture, encode=True):\n assert \"hashgrid\" in self.config.texture_type\n\n if encode:\n B, H, W, C = coords.shape\n inputs = coords.reshape(-1, C)\n outputs = texture(inputs)\n outputs = outputs.reshape(B, H, W, -1)\n else:\n outputs = coords\n\n return outputs.to(torch.float32)\n \n def query_anchor_features(self, anchors, texture, features, instances_in_view, is_background=False):\n if is_background:\n anchor_features = features\n else:\n # with torch.no_grad():\n # anchors = self.query_texture(anchors.unsqueeze(2), texture).squeeze(2) # M, NumAnchor, C\n # if self.config.detach_anchors:\n # anchors = anchors.detach() # the original UV features won't be updated\n\n anchors = self.query_texture(anchors.unsqueeze(2), texture).squeeze(2) # M, NumAnchor, C\n if self.config.detach_anchors:\n anchors = anchors.detach() # the original UV features won't be updated\n \n anchor_features = self.anchor_func(anchors, features, instances_in_view) # M, C\n\n return anchor_features\n\n def render_features(self, renderer, mesh, texture, is_direct=False, is_background=False, anchors=None):\n # if enable_anchor_embedding is True\n # latents will be the rendered instance map\n latents, fragments = renderer(mesh) # image: (N, H, W, C)\n\n if is_direct:\n features = latents\n else:\n uv_coords = self.get_uv_coordinates(mesh, fragments)\n features = self.query_texture(uv_coords, texture)\n\n if self.config.enable_anchor_embedding:\n features = self.query_anchor_features(anchors, texture, features, latents[..., 0], is_background)\n\n features = self.render_func(features)\n\n absolute_depth, relative_depth = self.get_relative_depth_map(fragments.zbuf)\n\n return features, fragments, absolute_depth, relative_depth # (N, H, W, C)\n \n def render(self, renderer, mesh, texture, background=None, background_texture=None, anchors=None, is_direct=False):\n features, fragments, absolute_depth, relative_depth = self.render_features(renderer, mesh, texture, is_direct=is_direct, is_background=False, anchors=anchors)\n\n # blend background\n # NOTE there's no need to render background if no views see the background\n if background is not None and -1 in fragments.zbuf:\n background_features, background_fragments, _, _ = self.render_features(renderer, background, background_texture, is_direct=is_direct, is_background=True, anchors=None)\n\n # blend rendering\n background_mask = fragments.zbuf == -1\n background_mask = background_mask.repeat(1, 1, 1, background_features.shape[-1])\n features[background_mask] = background_features[background_mask]\n\n # blend depth\n background_mask = fragments.zbuf == -1\n blend_zbuf = fragments.zbuf\n blend_zbuf[background_mask] = background_fragments.zbuf[background_mask]\n absolute_depth, relative_depth = self.get_relative_depth_map(blend_zbuf)\n\n return features, absolute_depth, relative_depth" }, { "identifier": "Guidance", "path": "models/modules/guidance.py", "snippet": "class Guidance(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n self.prompt = config.prompt + \", \" + config.a_prompt if config.a_prompt else config.prompt\n self.n_prompt = config.n_prompt\n \n self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32\n\n self._init_guidance()\n\n def _init_guidance(self):\n self._init_backbone()\n self._init_t_schedule()\n\n def _init_backbone(self):\n if self.config.diffusion_type == \"t2i\":\n from diffusers import StableDiffusionPipeline as DiffusionPipeline\n checkpoint_name = \"stabilityai/stable-diffusion-2-1-base\"\n # diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name).to(self.device)\n # checkpoint_name = \"runwayml/stable-diffusion-v1-5\"\n diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name).to(self.device)\n elif self.config.diffusion_type == \"d2i\":\n from diffusers import StableDiffusionDepth2ImgPipeline as DiffusionPipeline\n checkpoint_name = \"stabilityai/stable-diffusion-2-depth\"\n diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name).to(self.device)\n elif self.config.diffusion_type == \"d2i_controlnet\":\n from diffusers import StableDiffusionControlNetPipeline as DiffusionPipeline\n controlnet_name = \"lllyasviel/control_v11f1p_sd15_depth\"\n controlnet = ControlNetModel.from_pretrained(controlnet_name)\n checkpoint_name = \"runwayml/stable-diffusion-v1-5\"\n diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name, controlnet=controlnet).to(self.device)\n\n # freeze controlnet\n self.controlnet = diffusion_model.controlnet.to(self.weights_dtype)\n self.controlnet.requires_grad_(False)\n else:\n raise ValueError(\"invalid diffusion type.\")\n\n if self.config.enable_memory_efficient_attention:\n print(\"=> Enable memory efficient attention.\")\n diffusion_model.enable_xformers_memory_efficient_attention()\n\n # pretrained diffusion model\n self.tokenizer = diffusion_model.tokenizer\n self.text_encoder = diffusion_model.text_encoder\n self.vae = diffusion_model.vae\n self.unet = diffusion_model.unet.to(self.weights_dtype)\n\n self.text_encoder.requires_grad_(False)\n self.vae.requires_grad_(False)\n self.unet.requires_grad_(False)\n\n # use DDIMScheduler by default\n self.scheduler = DDIMScheduler.from_pretrained(checkpoint_name, subfolder=\"scheduler\")\n self.scheduler.betas = self.scheduler.betas.to(self.device)\n self.scheduler.alphas = self.scheduler.alphas.to(self.device)\n self.scheduler.alphas_cumprod = self.scheduler.alphas_cumprod.to(self.device)\n\n self.num_train_timesteps = len(self.scheduler.betas)\n\n if self.config.generation_mode == \"t2i\":\n self.scheduler.set_timesteps(self.config.num_steps)\n raise NotImplementedError\n else:\n self.scheduler.set_timesteps(self.num_train_timesteps)\n\n # phi\n # unet_phi is the same instance as unet that has been modified in-place\n # unet_phi not grad -> only train unet_phi_layers\n if self.config.loss_type == \"vsd\":\n self.unet_phi, self.unet_phi_layers = extract_lora_diffusers(self.unet, self.device)\n\n # load pretrained lora\n if len(self.config.load_lora_weights) > 0 and os.path.exists(self.config.load_lora_weights):\n print(\"=> loading pretrained LoRA weights from: {}\".format(self.config.load_lora_weights))\n self.unet_phi.load_attn_procs(self.config.load_lora_weights)\n\n # loss weights\n self.loss_weights = self._init_loss_weights(self.scheduler.betas)\n\n self.avg_loss_vsd = []\n self.avg_loss_phi = []\n self.avg_loss_rgb = []\n\n if self.config.loss_type == \"l2\": \n self.label = torchvision.io.read_image(self.config.label_path).float().to(self.device) / 255.\n self.label = self.label * 2 - 1 # -1 to 1\n self.label = self.label.unsqueeze(0)\n\n max_memory_allocated = torch.cuda.max_memory_allocated()\n print(f\"=> Maximum GPU memory allocated by PyTorch: {max_memory_allocated / 1024**3:.2f} GB\")\n\n def _init_loss_weights(self, betas): \n num_train_timesteps = len(betas)\n betas = torch.tensor(betas).to(torch.float32) if not torch.is_tensor(betas) else betas\n alphas = 1.0 - betas\n alphas_cumprod = torch.cumprod(alphas, axis=0)\n sqrt_1m_alphas_cumprod = torch.sqrt(1. - alphas_cumprod)\n \n weights = []\n for i in range(num_train_timesteps):\n weights.append(sqrt_1m_alphas_cumprod[i]**2)\n \n return weights\n \n def _init_t_schedule(self, t_start=0.02, t_end=0.98):\n # Create a list of time steps from 0 to num_train_timesteps\n ts = list(range(self.num_train_timesteps))\n # set ts to U[0.02,0.98] as least\n t_start = int(t_start * self.num_train_timesteps)\n t_end = int(t_end * self.num_train_timesteps)\n ts = ts[t_start:t_end]\n\n # If the scheduling strategy is \"random\", choose args.num_steps random time steps without replacement\n if self.config.t_schedule == \"random\":\n chosen_ts = np.random.choice(ts, self.config.num_steps, replace=True)\n\n # If the scheduling strategy is \"t_stages\", the total number of time steps are divided into several stages.\n # In each stage, a decreasing portion of the total time steps is considered for selection.\n # For each stage, time steps are randomly selected with replacement from the respective portion.\n # The final list of chosen time steps is a concatenation of the time steps selected in all stages.\n # Note: The total number of time steps should be evenly divisible by the number of stages.\n elif \"t_stages\" in self.config.t_schedule:\n # Parse the number of stages from the scheduling strategy string\n num_stages = int(self.config.t_schedule[8:]) if len(self.config.t_schedule[8:]) > 0 else 2\n chosen_ts = []\n for i in range(num_stages):\n # Define the portion of ts to be considered in this stage\n portion = ts[:int((num_stages-i)*len(ts)//num_stages)]\n selected_ts = np.random.choice(portion, self.config.num_steps//num_stages, replace=True).tolist()\n chosen_ts += selected_ts\n \n elif \"anneal\" in self.config.t_schedule:\n print(\"=> time step annealing after {} steps\".format(self.config.num_anneal_steps))\n\n ts_before_anneal = np.random.choice(ts, self.config.num_anneal_steps, replace=True).tolist()\n ts_after_anneal = np.random.choice(ts[:len(ts)//2], self.config.num_steps-self.config.num_anneal_steps, replace=True).tolist()\n chosen_ts = ts_before_anneal + ts_after_anneal\n \n else:\n raise ValueError(f\"Unknown scheduling strategy: {self.config.t_schedule}\")\n\n # Return the list of chosen time steps\n self.chosen_ts = chosen_ts\n\n def init_text_embeddings(self, batch_size):\n ### get text embedding\n text_input = self.tokenizer(\n [self.prompt], \n padding=\"max_length\", \n max_length=self.tokenizer.model_max_length, \n truncation=True, \n return_tensors=\"pt\"\n ).input_ids.to(self.device)\n\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input)[0].repeat(batch_size, 1, 1)\n\n max_length = text_input.shape[-1]\n uncond_input = self.tokenizer(\n [self.n_prompt], \n padding=\"max_length\", \n max_length=max_length, \n return_tensors=\"pt\"\n ).input_ids.to(self.device)\n\n with torch.no_grad():\n uncond_embeddings = self.text_encoder(uncond_input)[0].repeat(batch_size, 1, 1)\n\n self.text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n def prepare_depth_map(self, depth_map):\n assert len(depth_map.shape) == 4\n if \"controlnet\" in self.config.diffusion_type:\n depth_map = depth_map.repeat(1, 3, 1, 1).float()\n depth_map = F.interpolate(depth_map, (self.config.render_size, self.config.render_size), mode=\"bilinear\", align_corners=False)\n \n # expected range [0,1]\n depth_map /= 255.0\n else:\n # down-sample and normalize\n depth_map = F.interpolate(depth_map, (self.config.latent_size, self.config.latent_size), mode=\"bilinear\", align_corners=False)\n\n # expected range [-1,1]\n depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_map = 2.0 * (depth_map - depth_min) / (depth_max - depth_min) - 1.0\n # depth_map /= 255.0\n # depth_map = 2.0 * depth_map - 1.0\n\n depth_map = depth_map.to(torch.float32)\n\n return depth_map\n \n @torch.no_grad()\n def decode_latent_texture(self, inputs, use_patches=False):\n outputs = 1 / self.vae.config.scaling_factor * inputs\n\n if use_patches:\n assert self.config.latent_texture_size % self.config.decode_texture_size == 0\n batch_size = inputs.shape[0]\n num_iter_x = self.config.latent_texture_size // self.config.decode_texture_size\n num_iter_y = self.config.latent_texture_size // self.config.decode_texture_size\n patch_stride = self.config.decode_texture_size\n decoded_stride = self.config.decode_texture_size * 8\n decoded_size = self.config.latent_texture_size * 8\n decoded_texture = torch.zeros(batch_size, 3, decoded_size, decoded_size).to(self.device)\n\n for x in range(num_iter_x):\n for y in range(num_iter_y):\n patch = outputs[:, :, x*patch_stride:(x+1)*patch_stride, y*patch_stride:(y+1)*patch_stride]\n patch = self.vae.decode(patch.contiguous()).sample # B, 3, H, W\n\n decoded_texture[:, :, x*decoded_stride:(x+1)*decoded_stride, y*decoded_stride:(y+1)*decoded_stride] = patch\n \n outputs = (decoded_texture / 2 + 0.5).clamp(0, 1)\n\n else:\n outputs = self.vae.decode(outputs.contiguous()).sample # B, 3, H, W\n outputs = (outputs / 2 + 0.5).clamp(0, 1)\n\n return outputs\n \n def encode_latent_texture(self, inputs, deterministic=False):\n inputs = inputs.clamp(-1, 1)\n \n h = self.vae.encoder(inputs)\n moments = self.vae.quant_conv(h)\n mean, logvar = torch.chunk(moments, 2, dim=1)\n std = torch.zeros_like(mean) if deterministic else torch.exp(0.5 * logvar)\n sample = mean + std * torch.randn_like(mean)\n \n return self.vae.config.scaling_factor * sample\n\n def normalize_latent_texture(self, inputs):\n outputs = (inputs / 2 + 0.5).clamp(0, 1)\n\n return outputs\n \n def prepare_one_latent(self, latents, t):\n noise = torch.randn_like(latents).to(self.device)\n noisy_latents = self.scheduler.add_noise(latents, noise, t)\n clean_latents = self.scheduler.step(noise, t, noisy_latents).pred_original_sample\n\n return noise, noisy_latents, clean_latents\n\n def prepare_latents(self, latents, t, batch_size):\n t = torch.tensor([t]).to(self.device)\n noise, noisy_latents, clean_latents = self.prepare_one_latent(latents, t)\n\n return t, noise, noisy_latents, clean_latents\n \n def predict_noise(self, unet, noisy_latents, t, cross_attention_kwargs, guidance_scale, control=None):\n down_block_res_samples, mid_block_res_sample = None, None\n\n if guidance_scale == 1:\n latent_model_input = noisy_latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n \n text_embeddings, _ = self.text_embeddings.chunk(2)\n\n if control is not None: \n if \"controlnet\" in self.config.diffusion_type:\n with torch.no_grad():\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n latent_model_input.to(self.weights_dtype),\n t,\n encoder_hidden_states=text_embeddings.to(self.weights_dtype),\n controlnet_cond=control.to(self.weights_dtype),\n conditioning_scale=1.0,\n guess_mode=False,\n return_dict=False,\n )\n\n down_block_res_samples = [e.to(self.weights_dtype) for e in down_block_res_samples]\n mid_block_res_sample = mid_block_res_sample.to(self.weights_dtype)\n else:\n latent_model_input = torch.cat([latent_model_input, control], dim=1)\n\n # if self.config.verbose_mode: start = time.time()\n noise_pred = unet(\n latent_model_input.to(self.weights_dtype), \n t, \n encoder_hidden_states=text_embeddings.to(self.weights_dtype), \n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample\n ).sample.to(torch.float32)\n # if self.config.verbose_mode: print(\"=> UNet forward: {}s\".format(time.time() - start))\n else:\n latent_model_input = torch.cat([noisy_latents] * 2)\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n \n if control is not None: \n if \"controlnet\" in self.config.diffusion_type:\n with torch.no_grad():\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n latent_model_input.to(self.weights_dtype),\n t,\n encoder_hidden_states=self.text_embeddings.to(self.weights_dtype),\n controlnet_cond=torch.cat([control]*2).to(self.weights_dtype),\n conditioning_scale=1.0,\n guess_mode=False,\n return_dict=False,\n )\n\n down_block_res_samples = [e.to(self.weights_dtype) for e in down_block_res_samples]\n mid_block_res_sample = mid_block_res_sample.to(self.weights_dtype)\n else:\n latent_model_input = torch.cat([latent_model_input, torch.cat([control]*2)], dim=1)\n\n # if self.config.verbose_mode: start = time.time()\n noise_pred = unet(\n latent_model_input.to(self.weights_dtype), \n t, \n encoder_hidden_states=self.text_embeddings.to(self.weights_dtype), \n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample\n ).sample.to(torch.float32)\n # if self.config.verbose_mode: print(\"=> UNet forward: {}s\".format(time.time() - start))\n\n # perform guidance\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n return noise_pred\n\n def compute_sds_loss(self, latents, noisy_latents, noise, t, control=None):\n with torch.no_grad():\n noise_pred = self.predict_noise(\n self.unet, \n noisy_latents, \n t, \n cross_attention_kwargs={},\n guidance_scale=self.config.guidance_scale,\n control=control\n )\n\n grad = self.config.grad_scale * (noise_pred - noise)\n grad = torch.nan_to_num(grad)\n\n grad *= self.loss_weights[int(t)]\n \n # d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad\n target = (latents - grad).detach()\n loss = 0.5 * F.mse_loss(latents, target, reduction=\"mean\")\n\n return loss\n \n def compute_vsd_loss(self, latents, noisy_latents, noise, t, cross_attention_kwargs, control=None): \n with torch.no_grad():\n # predict the noise residual with unet\n # set cross_attention_kwargs={\"scale\": 0} to use the pre-trained model\n if self.config.verbose_mode: start = time.time()\n noise_pred = self.predict_noise(\n self.unet, \n noisy_latents, \n t, \n cross_attention_kwargs={\"scale\": 0},\n guidance_scale=self.config.guidance_scale,\n control=control\n )\n if self.config.verbose_mode: print(\"=> VSD pretrained forward: {}s\".format(time.time() - start))\n\n if self.config.verbose_mode: start = time.time()\n noise_pred_phi = self.predict_noise(\n self.unet_phi, \n noisy_latents, \n t, \n cross_attention_kwargs=cross_attention_kwargs,\n guidance_scale=self.config.guidance_scale_phi,\n control=control\n )\n if self.config.verbose_mode: print(\"=> VSD lora forward: {}s\".format(time.time() - start))\n\n grad = self.config.grad_scale * (noise_pred - noise_pred_phi.detach())\n grad = torch.nan_to_num(grad)\n\n grad *= self.loss_weights[int(t)]\n \n # d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad\n target = (latents - grad).detach()\n loss = 0.5 * F.mse_loss(latents, target, reduction=\"none\")\n\n return loss, loss.mean()\n \n def compute_vsd_phi_loss(self, noisy_latents, clean_latents, noise, t, cross_attention_kwargs, control=None):\n if self.config.verbose_mode: start = time.time()\n noise_pred_phi = self.predict_noise(\n self.unet_phi, \n noisy_latents, \n t, \n cross_attention_kwargs=cross_attention_kwargs,\n guidance_scale=self.config.guidance_scale_phi,\n control=control\n )\n\n if self.config.verbose_mode: print(\"=> phi lora forward: {}s\".format(time.time() - start))\n\n target = noise\n\n loss = self.config.grad_scale * F.mse_loss(noise_pred_phi, target, reduction=\"none\")\n\n return loss, loss.mean()" } ]
import random import wandb import json import os import time import torch import torch.nn as nn import torch.nn.functional as F import torchvision import numpy as np import pytorch_lightning as pl import matplotlib.pyplot as plt import sys import open_clip from torch.optim import Adam, AdamW from torch.optim.lr_scheduler import LinearLR from omegaconf import OmegaConf from tqdm import tqdm from omegaconf import OmegaConf from PIL import Image from copy import deepcopy from pathlib import Path from pytorch3d.io import ( load_obj, load_objs_as_meshes ) from pytorch3d.renderer import TexturesUV from pytorch3d.ops import interpolate_face_attributes from models.modules import TextureMesh, Studio, Guidance
10,837
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self): self.studio = Studio(self.config, self.device) def _init_mesh(self):
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self): self.studio = Studio(self.config, self.device) def _init_mesh(self):
self.texture_mesh = TextureMesh(self.config, self.device)
0
2023-11-28 15:38:40+00:00
16k
HyeonHo99/Video-Motion-Customization
showone/models/unet_3d_condition.py
[ { "identifier": "TransformerTemporalModel", "path": "showone/models/transformer_temporal.py", "snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n \"\"\"\n A Transformer model for video-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n The number of channels in the input and output (specify if the input is **continuous**).\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.\n sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).\n This is fixed during training since it is used to learn a number of position embeddings.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to use in feed-forward.\n attention_bias (`bool`, *optional*):\n Configure if the `TransformerBlock` attention should contain a bias parameter.\n double_self_attention (`bool`, *optional*):\n Configure if each `TransformerBlock` should contain two self-attention layers.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n norm_elementwise_affine: bool = True,\n double_self_attention: bool = True,\n ):\n super().__init__()\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n attention_bias=attention_bias,\n double_self_attention=double_self_attention,\n norm_elementwise_affine=norm_elementwise_affine,\n )\n for d in range(num_layers)\n ]\n )\n\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states,\n encoder_hidden_states=None,\n timestep=None,\n class_labels=None,\n num_frames=1,\n cross_attention_kwargs=None,\n return_dict: bool = True,\n ):\n \"\"\"\n The [`TransformerTemporal`] forward method.\n\n Args:\n hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):\n Input hidden_states.\n encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.long`, *optional*):\n Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.\n class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):\n Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in\n `AdaLayerZeroNorm`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n\n Returns:\n [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is\n returned, otherwise a `tuple` where the first element is the sample tensor.\n \"\"\"\n # 1. Input\n batch_frames, channel, height, width = hidden_states.shape\n batch_size = batch_frames // num_frames\n\n residual = hidden_states\n\n hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width)\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4)\n\n hidden_states = self.norm(hidden_states)\n hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel)\n\n hidden_states = self.proj_in(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states[None, None, :]\n .reshape(batch_size, height, width, channel, num_frames)\n .permute(0, 3, 4, 1, 2)\n .contiguous()\n )\n hidden_states = hidden_states.reshape(batch_frames, channel, height, width)\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return TransformerTemporalModelOutput(sample=output)" }, { "identifier": "CrossAttnDownBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n attentions = []\n temp_attentions = []\n temp_convs = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n output_states = ()\n\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, **ckpt_kwargs,\n ).sample\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n\n output_states = output_states + (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "CrossAttnUpBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n attentions = []\n temp_attentions = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n num_frames: int = 1,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "DownBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, num_frames=1):\n output_states = ()\n\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, use_reentrant=False)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n output_states = output_states + (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "UNetMidBlock3DCrossAttn", "path": "showone/models/unet_3d_blocks.py", "snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n ]\n attentions = []\n temp_attentions = []\n\n for _ in range(num_layers):\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=1, #todo: transformer_layers_per_block?\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ) -> torch.FloatTensor:\n hidden_states = self.resnets[0](hidden_states, temb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)\n for attn, temp_attn, resnet, temp_conv in zip(\n self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n return hidden_states" }, { "identifier": "UNetMidBlock3DSimpleCrossAttn", "path": "showone/models/unet_3d_blocks.py", "snippet": "class UNetMidBlock3DSimpleCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attention_head_dim=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n skip_time_act=False,\n only_cross_attention=False,\n cross_attention_norm=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n\n self.attention_head_dim = attention_head_dim\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n self.num_heads = in_channels // self.attention_head_dim\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n ]\n attentions = []\n temp_attentions = []\n\n for _ in range(num_layers):\n processor = (\n AttnAddedKVProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") else AttnAddedKVProcessor()\n )\n\n attentions.append(\n Attention(\n query_dim=in_channels,\n cross_attention_dim=in_channels,\n heads=self.num_heads,\n dim_head=self.attention_head_dim,\n added_kv_proj_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n bias=True,\n upcast_softmax=True,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n processor=processor,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n self.attention_head_dim,\n in_channels // self.attention_head_dim,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n\n if attention_mask is None:\n # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.\n mask = None if encoder_hidden_states is None else encoder_attention_mask\n else:\n # when attention_mask is defined: we don't even check for encoder_attention_mask.\n # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.\n # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.\n # then we can simplify this whole if/else block to:\n # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask\n mask = attention_mask\n\n hidden_states = self.resnets[0](hidden_states, temb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)\n for attn, temp_attn, resnet, temp_conv in zip(\n self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=mask,\n **cross_attention_kwargs,\n )\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n return hidden_states" }, { "identifier": "UpBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, num_frames=1):\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, use_reentrant=False)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "get_down_block", "path": "showone/models/unet_3d_blocks.py", "snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n downsample_type=None,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"SimpleCrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnDownBlock3D\")\n return SimpleCrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif down_block_type == \"ResnetDownsampleBlock3D\":\n return ResnetDownsampleBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")" }, { "identifier": "get_up_block", "path": "showone/models/unet_3d_blocks.py", "snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n upsample_type=None,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"SimpleCrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnUpBlock3D\")\n return SimpleCrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif up_block_type == \"ResnetUpsampleBlock3D\":\n return ResnetUpsampleBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin from .transformer_temporal import TransformerTemporalModel from .unet_3d_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UNetMidBlock3DSimpleCrossAttn, UpBlock3D, get_down_block, get_up_block, ) from diffusers.utils import WEIGHTS_NAME import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import os, json
13,790
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock3DCrossAttn":
# Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. # Copyright 2023 The ModelScope Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from diffusers.models.transformer_temporal import TransformerTemporalModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet3DConditionOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, it will skip the normalization and activation layers in post-processing norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), mid_block_type: Optional[str] = "UNetMidBlock3DCrossAttn", up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, transfromer_in_opt: bool =False, ): super().__init__() self.sample_size = sample_size self.transformer_in_opt = transfromer_in_opt if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) if self.transformer_in_opt: self.transformer_in = TransformerTemporalModel( num_attention_heads=8, attention_head_dim=64, in_channels=block_out_channels[0], num_layers=1, ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
4
2023-11-29 17:23:45+00:00
16k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n self.validate = validate\n self.gif = gif\n self.aspect = FLAGS.train_res[1] / FLAGS.train_res[0]\n self.fovy_range_min = np.deg2rad(FLAGS.fovy_range[0])\n self.fovy_range_max = np.deg2rad(FLAGS.fovy_range[1])\n self.elevation_range_min= np.deg2rad(FLAGS.elevation_range[0])\n self.elevation_range_max= np.deg2rad(FLAGS.elevation_range[1])\n self.angle_front = np.deg2rad(FLAGS.front_threshold)\n \n\n def _gif_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.display_res[1] / self.FLAGS.display_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 100) * np.pi * 2\n rotate_x = np.deg2rad(20)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(-rotate_x) @ util.rotate_y(ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n \n \n\n def _validate_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 4) * np.pi * 2\n rotate_x = np.random.uniform(-np.pi/4,np.pi/18)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(rotate_x) @ util.rotate_y( ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n\n def _train_scene(self, itr):\n fovy = np.random.uniform(self.fovy_range_min, self.fovy_range_max)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n if self.FLAGS.gpu_number == 8: # All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0,4]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1,5]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2,6]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3,7]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n elif self.FLAGS.gpu_number == 4: #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n else:\n rotate_y = np.random.uniform(np.deg2rad(-180), np.deg2rad(180)) #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n \n rotate_x = -np.random.uniform(self.elevation_range_min, self.elevation_range_max)\n # angle_front = np.deg2rad(45)\n prompt_index = get_view_direction(thetas= rotate_x, phis = rotate_y, front= self.angle_front)\n cam_radius = 3\n x = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n y = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n mv = util.translate(x, y, -cam_radius) @ (util.rotate_x(rotate_x) @ util.rotate_y(rotate_y))\n if ((itr+1)/self.FLAGS.batch) <=self.FLAGS.coarse_iter:\n rotate_y1 = np.random.uniform(0,np.pi*2) \n rotate_x1 = np.random.uniform(-np.pi,np.pi)\n normal_rotate = util.rotate_y_1(rotate_y1 )@ util.rotate_x_1(rotate_x1) \n else:\n normal_rotate = util.rotate_y_1(0)@util.rotate_x_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(rotate_y), torch.tensor([fovy])\n\n def __len__(self):\n if self.gif == True:\n return 100\n else:\n return 4 if self.validate else (self.FLAGS.iter + 1) * self.FLAGS.batch\n\n def __getitem__(self, itr):\n if self.gif:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._gif_scene(itr)\n elif self.validate:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._validate_scene(itr)\n else:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._train_scene(itr)\n\n return {\n 'mv' : mv,\n 'mvp' : mvp,\n 'campos' : campos,\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate': normal_rotate,\n 'prompt_index' : prompt_index,\n 'elev': elev,\n 'azim': azim,\n 'fov': fov\n }\n def collate(self, batch):\n iter_res, iter_spp = batch[0]['resolution'], batch[0]['spp']\n return {\n 'mv' : torch.cat(list([item['mv'] for item in batch]), dim=0),\n 'mvp' : torch.cat(list([item['mvp'] for item in batch]), dim=0),\n 'campos' : torch.cat(list([item['campos'] for item in batch]), dim=0),\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate' : torch.cat(list([item['normal_rotate'] for item in batch]), dim=0),\n # 'prompt_index' : torch.cat(list([item['prompt_index'] for item in batch]), dim=0),\n 'prompt_index' : np.array([item['prompt_index'] for item in batch], dtype=np.int32),\n 'elev' : np.array([item['elev'] for item in batch], dtype=np.float16),\n 'azim' : np.array([item['azim'] for item in batch], dtype=np.float16),\n 'fov' : torch.cat(list([item['fov'] for item in batch]), dim=0),\n }" }, { "identifier": "get_camera_params", "path": "dataset/dataset_mesh.py", "snippet": "def get_camera_params(resolution= 512, fov=45, elev_angle=-20, azim_angle=0):\n fovy = np.deg2rad(fov) \n elev = np.radians( elev_angle )\n azim = np.radians( azim_angle ) \n proj_mtx = util.perspective(fovy, resolution /resolution, 1, 50)\n mv = util.translate(0, 0, -3) @ (util.rotate_x(elev) @ util.rotate_y(azim))\n normal_rotate = util.rotate_y_1(-azim ) @ util.rotate_x_1(-elev) \n # nomral_rotate = util.rotate_y_1(0) @ util.rotate_x_1(0) \n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n bkgs = torch.ones(1, resolution, resolution, 3, dtype=torch.float32, device='cuda')\n return {\n 'mvp' : mvp[None, ...].cuda(),\n 'mv' : mv[None, ...].cuda(),\n 'campos' : campos[None, ...].cuda(),\n 'resolution' : [resolution, resolution], \n 'spp' : 1,\n 'background' : bkgs,\n 'normal_rotate' : normal_rotate[None,...].cuda(),\n 'elev_angle' : torch.tensor(elev_angle).cuda(),\n 'azim_angle' : torch.tensor(azim_angle).cuda(),\n 'fov' : torch.tensor(fovy).cuda(),\n }" }, { "identifier": "DMTetGeometry", "path": "geometry/dmtet_x_dreamer.py", "snippet": "class DMTetGeometry(torch.nn.Module):\n def __init__(self, grid_res, scale, FLAGS):\n super(DMTetGeometry, self).__init__()\n\n self.FLAGS = FLAGS\n self.grid_res = grid_res\n self.marching_tets = DMTet()\n \n tets = np.load('data/tets/{}_tets.npz'.format(self.grid_res))\n self.verts = torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * scale\n print(\"tet grid min/max\", torch.min(self.verts).item(), torch.max(self.verts).item())\n self.decoder = Decoder(multires=0 , AABB= self.getAABB(), mesh_scale= scale)\n self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda')\n self.generate_edges()\n self.pos_encoder = CameraEncoder().to(self.verts.device)\n\n def generate_edges(self):\n with torch.no_grad():\n edges = torch.tensor([0,1,0,2,0,3,1,2,1,3,2,3], dtype = torch.long, device = \"cuda\")\n all_edges = self.indices[:,edges].reshape(-1,2) \n all_edges_sorted = torch.sort(all_edges, dim=1)[0]\n self.all_edges = torch.unique(all_edges_sorted, dim=0)\n\n @torch.no_grad()\n def getAABB(self):\n return torch.min(self.verts, dim=0).values, torch.max(self.verts, dim=0).values\n\n def getMesh(self, material):\n pred= self.decoder(self.verts)\n \n self.sdf , self.deform = pred[:, 0], pred[:, 1:] \n v_deformed = self.verts + 1 / (self.grid_res ) * torch.tanh(self.deform)\n verts, faces = self.marching_tets(v_deformed, self.sdf, self.indices)\n \n imesh = mesh.Mesh(verts, faces, material=material)\n imesh = mesh.auto_normals(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None, if_normal=False, mode = 'geometry_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material) \n return render.render_mesh(glctx, \n opt_mesh, \n target['mvp'], \n target['campos'], \n lgt, \n target['resolution'], \n spp=target['spp'], \n msaa= True,\n background= target['background'],\n bsdf= bsdf,\n if_normal= if_normal,\n normal_rotate= target['normal_rotate'],\n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n \n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal= if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]]) # [B*2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z]) # [B * 2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n \n if iteration <=self.FLAGS.coarse_iter:\n t = torch.randint( guidance.min_step_early, guidance.max_step_early + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n pred_rgb_512 = buffers['shaded'][..., 0:4].permute(0, 3, 1, 2).contiguous() # [B, 4, 64, 64]\n latents = F.interpolate(pred_rgb_512, (64, 64), mode='bilinear', align_corners=False)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n \n else:\n t = torch.randint(guidance.min_step_late, guidance.max_step_late + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda')\n srgb = buffers['shaded'][...,0:3] #* buffers['shaded'][..., 3:4] # normal * mask\n # \n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [B, 3, 512, 512]\n latents = guidance.encode_imgs(pred_rgb_512)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n\n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings, index=indexs, came_posfeat=came_posfeat)\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred =noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond) # [B, 4, 64, 64]\n if iteration <= self.FLAGS.coarse_iter:\n w = (1 - guidance.alphas[t]) # [B]\n else:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w * (noise_pred - noise ) #*w1\n grad = torch.nan_to_num(grad)\n \n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n\n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask_sizes[i], mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "DLMesh", "path": "geometry/dlmesh_x_dreamer.py", "snippet": "class DLMesh(torch.nn.Module):\n def __init__(self, initial_guess, FLAGS):\n super(DLMesh, self).__init__()\n self.FLAGS = FLAGS\n self.initial_guess = initial_guess\n self.mesh = initial_guess.clone()\n self.pos_encoder = CameraEncoder().cuda()\n print(\"Base mesh has %d triangles and %d vertices.\" % (self.mesh.t_pos_idx.shape[0], self.mesh.v_pos.shape[0]))\n \n @torch.no_grad()\n def getAABB(self):\n return mesh.aabb(self.mesh)\n\n def getMesh(self, material):\n self.mesh.material = material\n\n imesh = mesh.Mesh(base=self.mesh)\n # Compute normals and tangent space\n imesh = mesh.auto_normals(imesh)\n imesh = mesh.compute_tangents(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None,if_normal=False, mode = 'appearance_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material)\n return render.render_mesh(glctx, \n opt_mesh,\n target['mvp'],\n target['campos'],\n lgt,\n target['resolution'], \n spp=target['spp'], \n msaa=True,\n background= target['background'] ,\n bsdf= bsdf,\n if_normal=if_normal,\n normal_rotate=target['normal_rotate'], \n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal = if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]])\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z])\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n\n if iteration <= self.FLAGS.coarse_iter:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_early, guidance.max_step_early+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n else:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_late, guidance.max_step_late+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n\n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [1, 3, H, W]\n latents = guidance.encode_imgs(pred_rgb_512)\n \n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states= text_embeddings, index=indexs, came_posfeat=came_posfeat)#.sample######################\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond)\n \n if guidance.sds_weight_strategy == 0:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 1:\n w = 1 / (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 2:\n if iteration <= self.FLAGS.coarse_iter:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n else:\n w = 1 / (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w* (noise_pred -noise) \n grad = torch.nan_to_num(grad)\n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n \n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask2.shape, mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "obj", "path": "render/obj.py", "snippet": "def _find_mat(materials, name):\ndef load_obj(filename, clear_ks=True, mtl_override=None):\ndef write_obj(folder, mesh, save_material=True):" }, { "identifier": "material", "path": "render/material.py", "snippet": "class Material(torch.nn.Module):\n def __init__(self, mat_dict):\n def __contains__(self, key):\n def __getitem__(self, key):\n def __setitem__(self, key, val):\n def __delitem__(self, key):\n def keys(self):\ndef load_mtl(fn, clear_ks=True):\ndef save_mtl(fn, material):\ndef _upscale_replicate(x, full_res):\ndef merge_materials(materials, texcoords, tfaces, mfaces):" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "mesh", "path": "render/mesh.py", "snippet": "class Mesh:\n def __init__(self, v_pos=None, t_pos_idx=None, v_nrm=None, t_nrm_idx=None, v_tex=None, t_tex_idx=None, v_tng=None, t_tng_idx=None, material=None, base=None):\n def copy_none(self, other):\n def clone(self):\ndef load_mesh(filename, mtl_override=None):\ndef aabb(mesh):\ndef compute_edges(attr_idx, return_inverse=False):\ndef compute_edge_to_face_mapping(attr_idx, return_inverse=False):\ndef unit_size(mesh):\ndef center_by_reference(base_mesh, ref_aabb, scale):\ndef auto_normals(imesh):\ndef compute_tangents(imesh):" }, { "identifier": "texture", "path": "render/texture.py", "snippet": "class texture2d_mip(torch.autograd.Function):\nclass Texture2D(torch.nn.Module):\n def forward(ctx, texture):\n def backward(ctx, dout):\n def __init__(self, init, min_max=None):\n def sample(self, texc, texc_deriv, filter_mode='linear-mipmap-linear'):\n def getRes(self):\n def getChannels(self):\n def getMips(self):\n def clamp_(self):\n def normalize_(self):\ndef create_trainable(init, res=None, auto_mipmaps=True, min_max=None):\ndef srgb_to_rgb(texture):\ndef rgb_to_srgb(texture):\ndef _load_mip2D(fn, lambda_fn=None, channels=None):\ndef load_texture2D(fn, lambda_fn=None, channels=None):\ndef _save_mip2D(fn, mip, mipidx, lambda_fn):\ndef save_texture2D(fn, tex, lambda_fn=None):" }, { "identifier": "mlptexture", "path": "render/mlptexture.py", "snippet": "class _MLP(torch.nn.Module):\nclass MLPTexture3D(torch.nn.Module):\n def __init__(self, cfg, loss_scale=1.0):\n def forward(self, x):\n def _init_weights(m):\n def __init__(self, AABB, channels = 3, internal_dims = 32, hidden = 1, min_max = None):\n def sample(self, texc):\n def clamp_(self):\n def cleanup(self):" }, { "identifier": "light", "path": "render/light.py", "snippet": "class cubemap_mip(torch.autograd.Function):\nclass EnvironmentLight(torch.nn.Module):\n def forward(ctx, cubemap):\n def backward(ctx, dout):\n def __init__(self, base):\n def xfm(self, mtx):\n def clone(self):\n def clamp_(self, min=None, max=None):\n def get_mip(self, roughness):\n def build_mips(self, cutoff=0.99):\n def regularizer(self):\n def shade(self, gb_pos, gb_normal, kd, ks, view_pos, specular=True):\ndef _load_env_hdr(fn, scale=1.0):\ndef load_env(fn, scale=1.0):\ndef save_env_map(fn, light):\ndef create_trainable_env_rnd(base_res, scale=0.5, bias=0.25):\n LIGHT_MIN_RES = 16\n MIN_ROUGHNESS = 0.08\n MAX_ROUGHNESS = 0.5" }, { "identifier": "render", "path": "render/render.py", "snippet": "def interpolate(attr, rast, attr_idx, rast_db=None):\ndef shade(\n gb_pos,\n gb_geometric_normal,\n gb_normal,\n gb_tangent,\n gb_texc,\n gb_texc_deriv,\n view_pos,\n lgt,\n material,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_layer(\n rast,\n rast_deriv,\n mesh,\n view_pos,\n lgt,\n resolution,\n spp,\n msaa,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_mesh(\n ctx,\n mesh,\n mtx_in,\n view_pos,\n lgt,\n resolution,\n spp = 1,\n num_layers = 1,\n msaa = False,\n background = None, \n bsdf = None,\n if_normal = False,\n normal_rotate = None,\n mode = 'geometry_modeling',\n if_flip_the_normal = False,\n if_use_bump = False\n ):\n def prepare_input_vector(x):\n def composite_buffer(key, layers, background, antialias):\ndef render_uv(ctx, mesh, resolution, mlp_texture):\ndef uv_padding(image, hole_mask, padding = 2, uv_padding_block = 4):\ndef render_uv1(ctx, mesh, resolution, mlp_texture, uv_padding_block):" }, { "identifier": "StableDiffusion", "path": "sd_cglora.py", "snippet": "class StableDiffusion(nn.Module):\n def __init__(self, \n device, \n mode='geometry', \n text= '', \n add_directional_text= False, \n batch = 1, \n guidance_weight = 100, \n sds_weight_strategy = 0,\n early_time_step_range = [0.02, 0.5],\n late_time_step_range = [0.02, 0.5]):\n super().__init__()\n\n self.device = device\n self.mode = mode\n self.text= text\n self.add_directional_text = add_directional_text\n self.batch = batch \n print(f'[INFO] loading stable diffusion...')\n model_key = \"stabilityai/stable-diffusion-2-1-base\"\n self.vae = AutoencoderKL.from_pretrained(model_key, subfolder=\"vae\",torch_dtype=torch.float16).to(self.device)\n self.tokenizer = CLIPTokenizer.from_pretrained(model_key, subfolder=\"tokenizer\",torch_dtype=torch.float16)\n self.text_encoder = CLIPTextModel.from_pretrained(model_key, subfolder=\"text_encoder\",torch_dtype=torch.float16).to(self.device)\n self.unet = UNet2DConditionModel.from_pretrained(model_key, subfolder=\"unet\",torch_dtype=torch.float16).to(self.device)\n if is_xformers_available():\n self.unet.enable_xformers_memory_efficient_attention()\n self.negative_text = ''\n if add_directional_text:\n self.text_z = []\n self.uncond_z = []\n self.index = []\n self.uncond_index = []\n for d in ['front', 'side', 'back', 'side']:\n text = f\"{self.text}, {d} view\"\n # text = f\"{d} view of {self.text}\"\n negative_text = f\"{self.negative_text}\"\n # if d == 'back': negative_text += \"face\"\n text_z, index = self.get_text_embeds([text], batch = 1)\n uncond_z, uncond_index =self.get_uncond_embeds([negative_text], batch = 1)\n self.text_z.append(text_z)\n self.uncond_z.append(uncond_z)\n self.index.append(index)\n self.uncond_index.append(uncond_index)\n self.text_z = torch.cat(self.text_z)\n self.uncond_z = torch.cat(self.uncond_z)\n self.index = torch.cat(self.index)\n self.uncond_index = torch.cat(self.uncond_index)\n else: \n self.text_z, self.index = self.get_text_embeds([self.text], batch = self.batch)\n self.uncond_z =self.get_uncond_embeds([self.negative_text], batch = self.batch)\n # del self.text_encoder\n self.scheduler = DPMSolverMultistepScheduler.from_pretrained(model_key, subfolder=\"scheduler\", torch_dtype=torch.float16)\n self.num_train_timesteps = self.scheduler.config.num_train_timesteps\n self.min_step_early = int(self.num_train_timesteps * early_time_step_range[0])\n self.max_step_early = int(self.num_train_timesteps * early_time_step_range[1])\n self.min_step_late = int(self.num_train_timesteps * late_time_step_range[0])\n self.max_step_late = int(self.num_train_timesteps * late_time_step_range[1])\n self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience\n self.guidance_weight = guidance_weight\n self.sds_weight_strategy = sds_weight_strategy\n print(f'[INFO] loaded stable diffusion!')\n\n for p in self.parameters():\n p.requires_grad_(False)\n self.unet_lora_params, self.names = inject_trainable_cglora(self.unet) # This will\n\n\n def get_text_embeds_global(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n \n global_embedding = text_embeddings[:,text_input['input_ids'].argmax(dim=-1),:].squeeze()\n \n return global_embedding\n\n\n def get_text_embeds(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = text_input['input_ids'].argmax(dim=-1)\n #global_embedding = text_embeddings[:, index, :].squeeze()\n ##################################################################\n \n return text_embeddings, index\n \n def get_uncond_embeds(self, negative_prompt, batch):\n uncond_input = self.tokenizer(negative_prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt')\n with torch.no_grad():\n uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]\n \n if batch > 1:\n uncond_embeddings = uncond_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = uncond_input['input_ids'].argmax(dim=-1)\n # global_embedding = uncond_embeddings[:, index, :].squeeze()\n ##################################################################\n return uncond_embeddings,index\n\n def encode_imgs(self, imgs):\n # imgs: [B, 3, H, W]\n if self.mode == 'appearance_modeling':\n \n imgs = 2 * imgs - 1\n\n posterior = self.vae.encode(imgs).latent_dist\n latents = posterior.sample() * 0.18215\n\n return latents" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "Video", "path": "render/video.py", "snippet": "class Video():\n def __init__(self, path, name='video_log.mp4', mode='I', fps=30, codec='libx264', bitrate='16M') -> None:\n \n if path[-1] != \"/\":\n path += \"/\"\n \n self.writer = imageio.get_writer(path+name, mode=mode, fps=fps, codec=codec, bitrate=bitrate)\n \n def ready_image(self, image, write_video=True):\n # assuming channels last - as renderer returns it\n if len(image.shape) == 4: \n image = image.squeeze(0)[..., :3].detach().cpu().numpy()\n else:\n image = image[..., :3].detach().cpu().numpy()\n\n image = np.clip(np.rint(image*255.0), 0, 255).astype(np.uint8)\n\n if write_video:\n self.writer.append_data(image)\n\n return image\n\n def close(self):\n self.writer.close()" } ]
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
11,894
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh)
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh)
mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal'])
5
2023-11-27 13:44:01+00:00
16k
zhenzhiwang/intercontrol
sample/global_joint_control.py
[ { "identifier": "ControlGaussianDiffusion", "path": "diffusion/control_diffusion.py", "snippet": "class ControlGaussianDiffusion(SpacedDiffusion):\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = th.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = th.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = th.add(th.mul(data, std), mean)\n return output\n \n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n \"\"\"\n overrides q_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n if noise is None:\n noise = th.randn_like(x_start)\n assert noise.shape == x_start.shape\n\n bs, feat, _, frames = noise.shape\n noise *= 1. #- model_kwargs['y']['inpainting_mask']\n\n return (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def global_joint_bfgs_optimize(self, x, model_kwargs=None):\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.humanml_to_global_joint(x)\n cond_joint = model_kwargs['y']['global_joint']\n mask = model_kwargs['y']['global_joint_mask']\n pred_joint = th.masked_select(pred_joint, mask.bool())\n cond_joint = th.masked_select(cond_joint, mask.bool())\n assert pred_joint.shape == cond_joint.shape, f\"pred_joint: {pred_joint.shape}, cond_joint: {cond_joint.shape}\"\n loss = self.mse_loss(pred_joint, cond_joint)\n return loss\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert pred_joint.shape[1] == 1\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n return pred_joint\n \n def global_joint_position_conditioning(self, x, model_kwargs=None):\n n_joints = 22 if x.shape[1] == 263 else 21\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n #pred_joint.requires_grad = True\n assert pred_joint.shape == model_kwargs['y']['global_joint'].shape == model_kwargs['y']['global_joint_mask'].shape, f\"pred_joint: {pred_joint.shape}, global_joint: {model_kwargs['y']['global_joint'].shape}, global_joint_mask: {model_kwargs['y']['global_joint_mask'].shape}\"\n loss = self.global_joint_condition_loss(pred_joint, model_kwargs['y']['global_joint'], model_kwargs['y']['global_joint_mask'])\n diff_scale = ((pred_joint.clamp(min=1e-4) - model_kwargs['y']['global_joint'].clamp(min=1e-4)).abs() / model_kwargs['y']['global_joint'].clamp(min=1e-4).abs()).mean().item()\n #loss.requires_grad = True\n gradient = th.autograd.grad(loss, x, \n grad_outputs=th.ones_like(loss),\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return gradient.clone().detach(), loss.item(), diff_scale\n\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n use_posterior=False,\n ):\n \"\"\"\n overrides p_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n #assert use_posterior == False\n p_mean_variance_func = self.p_mean_variance_bfgs_posterior if use_posterior else self.p_mean_variance_bfgs_x0\n out = p_mean_variance_func(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n k_first = self.bfgs_times_first,\n k_last = self.bfgs_times_last,\n )\n \n noise = th.randn_like(x)\n if const_noise:\n noise = noise[[0]].repeat(x.shape[0], 1, 1, 1)\n\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n \n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n \n def condition_mean_with_grad(self, cond_fn, x_mean, x_var, t, strength, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n with th.enable_grad():\n x_mean = x_mean.clone().detach().requires_grad_(True)\n gradient, loss_value, diff_scale = cond_fn(x_mean, model_kwargs) # p_mean_var[\"mean\"]\n gradient_guidance = - strength * gradient.float() # x_var.clamp(min = 0.01) \n new_mean = (x_mean + gradient_guidance).clone().detach()\n return new_mean, loss_value, gradient_guidance.clone().detach().abs().cpu(), x_mean.clone().detach().abs().cpu(), diff_scale\n\n\n def condition_mean_bfgs(self, x_mean, num_condition, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n \n with th.enable_grad():\n x_mean = x_mean.clone().detach().contiguous().requires_grad_(True)\n def closure():\n lbfgs.zero_grad()\n objective = self.global_joint_bfgs_optimize(x_mean, model_kwargs)\n objective.backward()\n return objective\n lbfgs = optim.LBFGS([x_mean],\n history_size=10, \n max_iter=4, \n line_search_fn=\"strong_wolfe\")\n for _ in range(num_condition):\n lbfgs.step(closure)\n #loss_value = self.global_joint_bfgs_optimize(x_mean, model_kwargs).item()\n return x_mean #, loss_value\n\n def p_mean_variance_bfgs_x0(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_output = self.condition_mean_bfgs(model_output, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n \n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def p_mean_variance_bfgs_posterior(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_mean = self.condition_mean_bfgs(model_mean, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, dataset=None,\n use_posterior = True,\n k_first = 1,\n k_last = 10,\n t_threshold = 10,):\n \"\"\"\n Compute training losses for a single timestep.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param t: a batch of timestep indices.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param noise: if specified, the specific Gaussian noise to try to remove.\n :return: a dict with the key \"loss\" containing a tensor of shape [N].\n Some mean or variance settings may also have other keys.\n \"\"\"\n\n # enc = model.model._modules['module']\n model = self._wrap_model(model)\n \n enc = model.model\n mask = model_kwargs['y']['mask']\n get_xyz = lambda sample: enc.rot2xyz(sample, mask=None, pose_rep=enc.pose_rep, translation=enc.translation,\n glob=enc.glob,\n # jointstype='vertices', # 3.4 iter/sec # USED ALSO IN MotionCLIP\n jointstype='smpl', # 3.4 iter/sec\n vertstrans=False)\n\n if model_kwargs is None:\n model_kwargs = {}\n if noise is None:\n noise = th.randn_like(x_start)\n x_t = self.q_sample(x_start, t, noise=noise, model_kwargs=model_kwargs)\n \n #assert k_first == 1, \"k_first must be 1, {}\".format(k_first)\n #assert k_last == 10, \"k_last must be 10, {}\".format(k_last)\n assert use_posterior == True, \"use_posterior must be True, {}\".format(use_posterior)\n if use_posterior:\n '''\n # loss-guided condition in training time\n if t[0] >= t_threshold:\n assert (t >= t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n num_condition = k_first # else k_last\n else:\n num_condition = k_last\n assert (t < t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n '''\n num_condition = k_first\n x_t = self.condition_mean_bfgs(x_t, num_condition, model_kwargs=model_kwargs)\n\n terms = {}\n if self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:\n model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)\n\n target = {\n ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )[0],\n ModelMeanType.START_X: x_start,\n ModelMeanType.EPSILON: noise,\n }[self.model_mean_type]\n\n assert model_output.shape == target.shape == x_start.shape, \"model_output {}, target {}, x_start {}\".format(model_output.shape ,target.shape ,x_start.shape) # [bs, njoints, nfeats, nframes]\n\n terms[\"rot_mse\"] = self.masked_l2(target, model_output, mask) # mean_flat(rot_mse)\n\n terms[\"loss\"] = terms[\"rot_mse\"] + terms.get('vb', 0.) +\\\n (self.lambda_vel * terms.get('vel_mse', 0.)) +\\\n (self.lambda_rcxyz * terms.get('rcxyz_mse', 0.)) + \\\n (self.lambda_fc * terms.get('fc', 0.))\n else:\n raise NotImplementedError(self.loss_type)\n\n return terms" }, { "identifier": "SpacedDiffusion", "path": "diffusion/respace.py", "snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "edit_control_args", "path": "utils/parser_util.py", "snippet": "def edit_control_args():\n parser = ArgumentParser()\n # args specified by the user: (all other will be loaded from the model)\n add_base_options(parser)\n add_sampling_options(parser)\n add_edit_inpainting_options(parser)\n return parse_and_load_from_model(parser)" }, { "identifier": "load_controlmdm_and_diffusion", "path": "utils/model_util.py", "snippet": "def load_controlmdm_and_diffusion(args, data, device, ModelClass=ControlMDM, DiffusionClass=ControlGaussianDiffusion): \n model, diffusion = create_model_and_diffusion(args, data, ModelClass=ControlMDM, DiffusionClass=DiffusionClass)\n model_path = args.model_path\n print(f\"Loading checkpoints from [{model_path}]...\")\n state_dict = torch.load(model_path, map_location='cpu')\n load_model_wo_clip(model, state_dict)\n model.mean = data.dataset.t2m_dataset.mean\n model.std = data.dataset.t2m_dataset.std\n\n model.to(device)\n model.eval() # disable random masking\n model = wrap_model(model, args)\n return model, diffusion" }, { "identifier": "dist_util", "path": "utils/dist_util.py", "snippet": "GPUS_PER_NODE = 8\nSETUP_RETRY_COUNT = 3\ndef setup_dist(device=0):\ndef dev():\ndef load_state_dict(path, **kwargs):\ndef sync_params(params):\ndef _find_free_port():" }, { "identifier": "wrap_model", "path": "model/cfg_sampler.py", "snippet": "def wrap_model(model, args):\n if args.guidance_param not in [0., 1.]:\n return ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler\n elif args.guidance_param == 0:\n return UnconditionedModel(model)\n else:\n return model" }, { "identifier": "get_dataset_loader", "path": "data_loaders/get_data.py", "snippet": "def get_dataset_loader(name, batch_size, num_frames, split='train', load_mode='train', opt=None, short_db=False, cropping_sampler=False, size=None):\n if load_mode == 'text_only':\n load_mode = 'train'\n dataset = get_dataset(name, num_frames, split, load_mode, batch_size, opt, short_db, cropping_sampler, size)\n collate = get_collate_fn(name, load_mode)\n\n n_workers = 1 if load_mode in ['movement_train', 'evaluator_train'] else 8\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True,\n num_workers=n_workers, drop_last=True, collate_fn=collate\n )\n\n return loader" }, { "identifier": "recover_from_ric", "path": "data_loaders/humanml/scripts/motion_process.py", "snippet": "def recover_from_ric(data, joints_num):\n r_rot_quat, r_pos = recover_root_rot_pos(data)\n positions = data[..., 4:(joints_num - 1) * 3 + 4]\n positions = positions.view(positions.shape[:-1] + (-1, 3))\n\n '''Add Y-axis rotation to local joints'''\n positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)\n\n '''Add root XZ to joints'''\n positions[..., 0] += r_pos[..., 0:1]\n positions[..., 2] += r_pos[..., 2:3]\n\n '''Concate root and joints'''\n positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)\n\n return positions" }, { "identifier": "get_control_mask", "path": "data_loaders/humanml_utils.py", "snippet": "def get_control_mask(mask_name, shape, **kwargs):\n assert mask_name == \"global_joint\", \"mask_name must be 'global_joint', got {}\".format(mask_name)\n mask = np.zeros(shape)\n mask = np.maximum(mask, get_global_joint_mask(shape, **kwargs))\n return mask" }, { "identifier": "HML_JOINT_NAMES", "path": "data_loaders/humanml_utils.py", "snippet": "HML_JOINT_NAMES = [\n 'pelvis',\n 'left_hip',\n 'right_hip',\n 'spine1',\n 'left_knee',\n 'right_knee',\n 'spine2',\n 'left_ankle',\n 'right_ankle',\n 'spine3',\n 'left_foot',\n 'right_foot',\n 'neck',\n 'left_collar',\n 'right_collar',\n 'head',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',\n 'left_wrist',\n 'right_wrist',\n]" }, { "identifier": "plot_3d_motion", "path": "data_loaders/humanml/utils/plot_script.py", "snippet": "def plot_3d_motion(save_path, kinematic_tree, joints, title, dataset, figsize=(8, 8), fps=120, radius=4,\n vis_mode='default', gt_frames=[], handshake_size=0, blend_size=0, step_sizes=[], lengths = [], joints2=None, painting_features=[], guidance=None):\n matplotlib.use('Agg')\n \"\"\"\n A wrapper around explicit_plot_3d_motion that \n uses gt_frames to determine the colors of the frames\n \"\"\"\n data = joints.copy().reshape(len(joints), -1, 3)\n frames_number = data.shape[0]\n frame_colors = ['blue' if index in gt_frames else 'orange' for index in range(frames_number)]\n if vis_mode == 'unfold':\n frame_colors = ['purple'] *handshake_size + ['blue']*blend_size + ['orange'] *(120-handshake_size*2-blend_size*2) +['orange']*blend_size\n frame_colors = ['orange'] *(120-handshake_size-blend_size) + ['orange']*blend_size + frame_colors*1024\n elif vis_mode == 'unfold_arb_len':\n for ii, step_size in enumerate(step_sizes):\n if ii == 0:\n frame_colors = ['orange']*(step_size - handshake_size - blend_size) + ['orange']*blend_size + ['purple'] * (handshake_size//2)\n continue\n if ii == len(step_sizes)-1:\n frame_colors += ['purple'] * (handshake_size//2) + ['orange'] * blend_size + ['orange'] * (lengths[ii] - handshake_size - blend_size)\n continue\n frame_colors += ['purple'] * (handshake_size // 2) + ['orange'] * blend_size + ['orange'] * (\n lengths[ii] - 2 * handshake_size - 2 * blend_size) + ['orange'] * blend_size + \\\n ['purple'] * (handshake_size // 2)\n elif vis_mode == 'gt':\n frame_colors = ['blue'] * frames_number\n explicit_plot_3d_motion(save_path, kinematic_tree, joints, title, dataset, figsize=figsize, fps=fps, radius=radius, \n vis_mode=vis_mode, frame_colors=frame_colors, joints2=joints2, painting_features=painting_features, guidance=guidance)" }, { "identifier": "ControlMDM", "path": "model/ControlMDM.py", "snippet": "class ControlMDM(MDM):\n\n def __init__(self, modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n ablation=None, activation=\"gelu\", legacy=False, data_rep='rot6d', dataset='amass', clip_dim=512,\n arch='trans_enc', emb_trans_dec=False, clip_version=None, args=None, **kargs):\n\n super(ControlMDM, self).__init__(modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim, ff_size, num_layers, num_heads, dropout,\n ablation, activation, legacy, data_rep, dataset, clip_dim,\n arch, emb_trans_dec, clip_version, **kargs)\n self.args = args\n self.num_layers = num_layers\n self.multi_person = args.multi_person\n self.upper_orientation_index = [0, 16, 17] # root, l_shoulder, r_shoulder\n self.lower_orientation_index = [0, 1, 2] # root, l_hip, r_hip\n\n # linear layers init with zeros\n if self.dataset == 'kit':\n self.first_zero_linear = nn.Linear(21*3*2 + 2*3, self.latent_dim)\n elif self.dataset == 'humanml':\n self.first_zero_linear = nn.Linear(22*3*2 + 2*3, self.latent_dim)\n else:\n raise NotImplementedError('Supporting only kit and humanml dataset, got {}'.format(self.dataset))\n \n nn.init.zeros_(self.first_zero_linear.weight)\n nn.init.zeros_(self.first_zero_linear.bias)\n self.mid_zero_linear = nn.ModuleList(\n [nn.Linear(self.latent_dim, self.latent_dim) for _ in range(self.num_layers)])\n for m in self.mid_zero_linear:\n nn.init.zeros_(m.weight)\n nn.init.zeros_(m.bias)\n\n if self.arch == 'trans_enc':\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation)\n del self.seqTransEncoder\n self.seqTransEncoder_mdm = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n self.seqTransEncoder_control = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n else:\n raise ValueError('Supporting only trans_enc arch.')\n\n self.freeze_block(self.input_process)\n self.freeze_block(self.sequence_pos_encoder)\n self.freeze_block(self.seqTransEncoder_mdm)\n self.freeze_block(self.embed_timestep)\n if 'text' in self.cond_mode:\n self.freeze_block(self.embed_text)\n self.freeze_block(self.output_process)\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = torch.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = torch.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = torch.add(torch.mul(data, std), mean)\n return output\n \n def compute_triangle_normals(self, triangles):\n # Compute the vectors from the first point to the other two points\n v1 = triangles[:,:, 1] - triangles[:, :,0]\n v2 = triangles[:,:, 2] - triangles[:,:,0]\n\n # Compute the cross product of v1 and v2 to get the normal vectors\n normals = torch.cross(v2, v1, dim=-1)\n\n # Normalize the normal vectors to unit length\n normals = nn.functional.normalize(normals, dim=-1)\n return normals\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n curr_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert curr_joint.shape[1] == 1\n curr_joint = recover_from_ric(curr_joint, n_joints)\n curr_joint = curr_joint.view(-1, *curr_joint.shape[2:]).permute(0, 2, 3, 1)\n # change root positions for multi-person purpose\n if self.multi_person:\n curr_joint[1::2, :,2,:] *= -1\n curr_joint[1::2, :,0,:] *= -1\n curr_joint[1::2, :,2,:] += 2\n\n # more than 3 people\n #curr_joint[1, :,2,:] *= -1\n #curr_joint[1, :,0,:] *= -1\n #curr_joint[1, :,2,:] += 2\n #curr_joint[2, :,0,:] += 1\n return curr_joint\n\n def forward(self, x, timesteps, y=None):\n bs, njoints, nfeats, seqlen = x.shape\n control_bs, n_global_joints, xyz_dim, control_frames = y['global_joint'].shape\n assert bs == control_bs and seqlen == control_frames, \"bs {} != {} or seqlen {} != {}\".format(bs, control_bs, seqlen, control_frames)\n assert xyz_dim ==3, \"xyz_dim {} != 3\".format(xyz_dim)\n # prepare global joints for controlmdm\n curr_joint = self.humanml_to_global_joint(x).clone().detach() # [bs, njoints, 3, seqlen]\n curr_joint.requires_grad = False\n\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n\n # controlmdm\n # orientation\n upper_triangles = curr_joint[:,self.upper_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n lower_triangles = curr_joint[:,self.lower_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n upper_orientation = self.compute_triangle_normals(upper_triangles) # [seqlen, bs, 3]\n lower_orientation = self.compute_triangle_normals(lower_triangles) # [seqlen, bs, 3]\n\n # relative position to joint\n '''\n relative_position = torch.zeros_like(curr_joint, device = xseq.device, dtype=torch.float32) # [bs, njoints, 3, seqlen]\n relative_position[1::2,:,:,:] = ((y['global_joint'][::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,1::2,:,:].unsqueeze(2))*y['global_joint_mask'][::2,:,:,:].bool().float()).float().sum(1)\n relative_position[::2,:,:,:] = ((y['global_joint'][1::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,::2,:,:].unsqueeze(2))*y['global_joint_mask'][1::2,:,:,:].bool().float()).float().sum(1)\n '''\n relative_position = ((y['global_joint'].float() - curr_joint)*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_position = relative_position.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n\n # relative position to root\n relative_root = ((y['global_joint'].float() - curr_joint[:,[0],:,:])*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_root = relative_root.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n global_joint_feat = torch.cat((relative_position, relative_root, upper_orientation, lower_orientation), axis=-1) # [seqlen, bs, 22*3 *2 +3 +3]\n \n global_joint_feat = self.first_zero_linear(global_joint_feat) # [seqlen, bs, d]\n control_input = xseq + torch.cat((torch.zeros_like(emb, device = xseq.device, dtype=torch.float32), global_joint_feat), axis=0) # [seqlen+1, bs, d]\n control_output_list = self.seqTransEncoder_control.return_all_layers(control_input) # [seqlen+1, bs, d]\n for i in range(self.num_layers):\n control_output_list[i] = self.mid_zero_linear[i](control_output_list[i])\n \n output = self.seqTransEncoder_mdm.forward_with_condition(xseq, control_output_list)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output\n\n def trainable_parameters(self):\n return [p for name, p in self.named_parameters() if p.requires_grad]\n # return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n \n def trainable_parameter_names(self):\n return [name for name, p in self.named_parameters() if p.requires_grad]\n\n def freeze_block(self, block):\n block.eval()\n for p in block.parameters():\n p.requires_grad = False\n\n def unfreeze_block(self, block):\n block.train()\n for p in block.parameters():\n p.requires_grad = True\n \n def forward_without_control(self, x, timesteps, y=None): #\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n output = self.seqTransEncoder_mdm(xseq)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output" } ]
from diffusion.control_diffusion import ControlGaussianDiffusion from diffusion.respace import SpacedDiffusion from utils.fixseed import fixseed from utils.parser_util import edit_control_args from utils.model_util import load_controlmdm_and_diffusion from utils import dist_util from model.cfg_sampler import wrap_model from data_loaders.get_data import get_dataset_loader from data_loaders.humanml.scripts.motion_process import recover_from_ric from data_loaders.humanml_utils import get_control_mask, HML_JOINT_NAMES from data_loaders.humanml.utils.plot_script import plot_3d_motion from model.ControlMDM import ControlMDM import os import numpy as np import torch import data_loaders.humanml.utils.paramUtil as paramUtil import shutil
11,120
# This code is based on https://github.com/openai/guided-diffusion """ Generate a large batch of image samples from a model and save them as a large numpy array. This can be used to produce samples for FID evaluation. """ def main(): args = edit_control_args() assert args.multi_person == False, 'multi-person is not supported for this script' fixseed(args.seed) out_path = args.output_dir name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 fps = 12.5 if args.dataset == 'kit' else 20 dist_util.setup_dist(args.device) if out_path == '': out_path = os.path.join(os.path.dirname(args.model_path), 'edit_{}_{}_{}_seed{}'.format(name, niter, args.inpainting_mask, args.seed)) if args.text_condition != '': out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '') print('Loading dataset...') assert args.num_samples <= args.batch_size, \ f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})' # So why do we need this check? In order to protect GPU from a memory overload in the following line. # If your GPU can handle batch size larger then default, you can specify it through --batch_size flag. # If it doesn't, and you still want to sample more prompts, run this script with different seeds # (specify through the --seed flag) args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples data = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=max_frames, split='test', load_mode='train', size=args.num_samples) # in train mode, you get both text and motion. # data.fixed_length = n_frames total_num_samples = args.num_samples * args.num_repetitions print("Creating model and diffusion...") DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion model, diffusion = load_controlmdm_and_diffusion(args, data, dist_util.dev(), ModelClass=ControlMDM, DiffusionClass=DiffusionClass) diffusion.mean = data.dataset.t2m_dataset.mean diffusion.std = data.dataset.t2m_dataset.std iterator = iter(data) input_motions, model_kwargs = next(iterator) input_motions = input_motions.to(dist_util.dev()) if args.text_condition != '': texts = [args.text_condition] * args.num_samples model_kwargs['y']['text'] = texts # add inpainting mask according to args control_joint = 'right_wrist' assert max_frames == input_motions.shape[-1] gt_frames_per_sample = {} n_joints = 22 if input_motions.shape[1] == 263 else 21 unnormalized_motion = data.dataset.t2m_dataset.inv_transform_torch(input_motions.permute(0, 2, 3, 1)).float() global_joints = recover_from_ric(unnormalized_motion, n_joints) global_joints = global_joints.view(-1, *global_joints.shape[2:]).permute(0, 2, 3, 1) global_joints.requires_grad = False model_kwargs['y']['global_joint'] = global_joints
# This code is based on https://github.com/openai/guided-diffusion """ Generate a large batch of image samples from a model and save them as a large numpy array. This can be used to produce samples for FID evaluation. """ def main(): args = edit_control_args() assert args.multi_person == False, 'multi-person is not supported for this script' fixseed(args.seed) out_path = args.output_dir name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 fps = 12.5 if args.dataset == 'kit' else 20 dist_util.setup_dist(args.device) if out_path == '': out_path = os.path.join(os.path.dirname(args.model_path), 'edit_{}_{}_{}_seed{}'.format(name, niter, args.inpainting_mask, args.seed)) if args.text_condition != '': out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '') print('Loading dataset...') assert args.num_samples <= args.batch_size, \ f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})' # So why do we need this check? In order to protect GPU from a memory overload in the following line. # If your GPU can handle batch size larger then default, you can specify it through --batch_size flag. # If it doesn't, and you still want to sample more prompts, run this script with different seeds # (specify through the --seed flag) args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples data = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=max_frames, split='test', load_mode='train', size=args.num_samples) # in train mode, you get both text and motion. # data.fixed_length = n_frames total_num_samples = args.num_samples * args.num_repetitions print("Creating model and diffusion...") DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion model, diffusion = load_controlmdm_and_diffusion(args, data, dist_util.dev(), ModelClass=ControlMDM, DiffusionClass=DiffusionClass) diffusion.mean = data.dataset.t2m_dataset.mean diffusion.std = data.dataset.t2m_dataset.std iterator = iter(data) input_motions, model_kwargs = next(iterator) input_motions = input_motions.to(dist_util.dev()) if args.text_condition != '': texts = [args.text_condition] * args.num_samples model_kwargs['y']['text'] = texts # add inpainting mask according to args control_joint = 'right_wrist' assert max_frames == input_motions.shape[-1] gt_frames_per_sample = {} n_joints = 22 if input_motions.shape[1] == 263 else 21 unnormalized_motion = data.dataset.t2m_dataset.inv_transform_torch(input_motions.permute(0, 2, 3, 1)).float() global_joints = recover_from_ric(unnormalized_motion, n_joints) global_joints = global_joints.view(-1, *global_joints.shape[2:]).permute(0, 2, 3, 1) global_joints.requires_grad = False model_kwargs['y']['global_joint'] = global_joints
model_kwargs['y']['global_joint_mask'] = torch.tensor(get_control_mask(args.inpainting_mask, global_joints.shape, joint = control_joint, ratio=args.mask_ratio, dataset = args.dataset)).float().to(dist_util.dev())
9
2023-11-27 05:28:02+00:00
16k
moonbow721/DPoser
run/train.py
[ { "identifier": "save_obj", "path": "lib/body_model/visual.py", "snippet": "def save_obj(v, f, file_name='output.obj'):\n obj_file = open(file_name, 'w')\n for i in range(len(v)):\n obj_file.write('v ' + str(v[i][0]) + ' ' + str(v[i][1]) + ' ' + str(v[i][2]) + '\\n')\n for i in range(len(f)):\n obj_file.write('f ' + str(f[i][0] + 1) + '/' + str(f[i][0] + 1) + ' ' + str(f[i][1] + 1) + '/' + str(\n f[i][1] + 1) + ' ' + str(f[i][2] + 1) + '/' + str(f[i][2] + 1) + '\\n')\n obj_file.close()" }, { "identifier": "render_mesh", "path": "lib/body_model/visual.py", "snippet": "def render_mesh(img, mesh, face, cam_param, view='random'):\n # mesh\n mesh = trimesh.Trimesh(mesh, face)\n\n centroid = np.mean(mesh.vertices, axis=0)\n translation_to_origin = trimesh.transformations.translation_matrix(-centroid)\n mesh.apply_transform(translation_to_origin)\n\n if view == 'random':\n options_side = ['half', '']\n options_direction = ['left', 'right', 'front', 'back']\n options_height = ['above', 'bottom', '']\n\n chosen_side = random.choice(options_side)\n chosen_direction = random.choice(options_direction)\n chosen_height = random.choice(options_height)\n\n view = '_'.join([opt for opt in [chosen_side, chosen_direction, chosen_height] if opt])\n\n if 'half' in view:\n side_angle = 45\n else:\n side_angle = 90\n\n if 'left' in view:\n angle = np.radians(-side_angle)\n elif 'right' in view:\n angle = np.radians(side_angle)\n elif 'back' in view:\n angle = np.radians(180)\n else: # front\n angle = np.radians(0)\n axis = [0, 1, 0]\n rotation = trimesh.transformations.rotation_matrix(angle, axis)\n mesh.apply_transform(rotation)\n\n if 'above' in view:\n angle = np.radians(30)\n elif 'bottom' in view:\n angle = np.radians(-30)\n else: # nothing\n angle = np.radians(0)\n axis = [1, 0, 0]\n rotation = trimesh.transformations.rotation_matrix(angle, axis)\n mesh.apply_transform(rotation)\n\n translation_to_centroid = trimesh.transformations.translation_matrix(centroid)\n mesh.apply_transform(translation_to_centroid)\n\n mesh.vertices[:, 2] -= 7\n material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.0, alphaMode='OPAQUE',\n # baseColorFactor=(1.0, 1.0, 0.9, 1.0),\n baseColorFactor=(0.93, 0.6, 0.4, 1.0),\n )\n mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=False)\n scene = pyrender.Scene(ambient_light=(0.3, 0.3, 0.3))\n scene.add(mesh, 'mesh')\n\n focal, princpt = cam_param['focal'], cam_param['princpt']\n camera = pyrender.IntrinsicsCamera(fx=focal[0], fy=focal[1], cx=princpt[0], cy=princpt[1])\n scene.add(camera)\n\n # renderer\n renderer = pyrender.OffscreenRenderer(viewport_width=img.shape[1], viewport_height=img.shape[0], point_size=1.0)\n\n # light\n light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=0.8)\n light_pose = np.eye(4)\n light_pose[:3, 3] = np.array([0, -1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([0, 1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([1, 1, 2])\n scene.add(light, pose=light_pose)\n\n # render\n rgb, depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)\n rgb = rgb[:, :, :3].astype(np.float32)\n valid_mask = (depth > 0)[:, :, None]\n\n # save to image\n render_img = rgb * valid_mask + img * (1 - valid_mask)\n return render_img" }, { "identifier": "average_pairwise_distance", "path": "lib/utils/metric.py", "snippet": "def average_pairwise_distance(joints3d):\r\n \"\"\"\r\n Calculate Average Pairwise Distance (APD) for a batch of poses.\r\n\r\n Parameters:\r\n - joints3d (torch.Tensor): A tensor of shape (batch_size, num_joints, 3)\r\n\r\n Returns:\r\n - APD (torch.Tensor): Average Pairwise Distance\r\n \"\"\"\r\n batch_size, num_joints, _ = joints3d.shape\r\n\r\n # Initialize tensor to store pairwise distances between samples in the batch\r\n pairwise_distances = torch.zeros(batch_size, batch_size)\r\n\r\n for i in range(batch_size):\r\n for j in range(i + 1, batch_size):\r\n # Calculate the pairwise distance between sample i and sample j\r\n dist = torch.mean(torch.norm(joints3d[i, :, :] - joints3d[j, :, :], dim=1))\r\n\r\n pairwise_distances[i, j] = dist\r\n pairwise_distances[j, i] = dist # Distance is symmetric\r\n\r\n # The diagonal is zero as the distance between a sample and itself is zero\r\n pairwise_distances.fill_diagonal_(0)\r\n\r\n # Calculate the mean over all the pairwise distances in the batch to get APD\r\n APD = torch.sum(pairwise_distances) / (batch_size * (batch_size - 1))\r\n\r\n return APD\r" }, { "identifier": "create_mask", "path": "lib/utils/misc.py", "snippet": "def create_mask(body_poses, part='legs', observation_type='noise'):\r\n assert len(body_poses.shape) == 2 and body_poses.shape[1] % N_POSES == 0\r\n rot_N = body_poses.shape[1] // N_POSES\r\n assert rot_N in [3, 6]\r\n # for axis-angle or rot6d\r\n mask_joints = getattr(BodyPartIndices, part)\r\n mask = body_poses.new_ones(body_poses.shape)\r\n mask_indices = torch.tensor(mask_joints).view(-1, 1) * rot_N + torch.arange(rot_N).view(1, -1)\r\n mask_indices = mask_indices.flatten()\r\n mask[:, mask_indices] = 0\r\n\r\n # masked data as Gaussian noise\r\n observation = body_poses.clone()\r\n if observation_type == 'noise':\r\n observation[:, mask_indices] = torch.randn_like(observation[:, mask_indices])\r\n # load the mean pose as observation\r\n else:\r\n batch_size = body_poses.shape[0]\r\n smpl_mean_params = np.load(constants.SMPL_MEAN_PATH)\r\n rot6d_body_poses = torch.tensor(smpl_mean_params['pose'][6:,], dtype=torch.float32, device=body_poses.device) # [138]\r\n axis_body_pose = rot6d_to_axis_angle(rot6d_body_poses.reshape(-1, 6)).reshape(-1) # [69]\r\n if rot_N == 3:\r\n observation[:, mask_indices] = axis_body_pose[None, mask_indices].repeat(batch_size, 1)\r\n elif rot_N == 6:\r\n observation[:, mask_indices] = rot6d_body_poses[None, mask_indices].repeat(batch_size, 1)\r\n else:\r\n raise NotImplementedError\r\n\r\n return mask, observation\r" }, { "identifier": "create_logger", "path": "lib/utils/generic.py", "snippet": "def create_logger(cfg, phase='train', no_logger=False, folder_name=''):\n root_output_dir = Path(cfg.OUTPUT_DIR)\n # set up logger\n if not root_output_dir.exists():\n print('=> creating {}'.format(root_output_dir))\n root_output_dir.mkdir()\n\n dataset = cfg.DATASET.TRAIN_DATASET + '_' + cfg.DATASET.TEST_DATASET\n dataset = dataset.replace(':', '_')\n\n # cfg_name = os.path.basename(cfg_name).split('.')[0]\n\n time_str = time.strftime('%Y-%m-%d-%H-%M-%S')\n\n if folder_name:\n final_output_dir = root_output_dir / dataset / f'{time_str}-{folder_name}'\n else:\n final_output_dir = root_output_dir / dataset / time_str\n\n # only get final output dir for distributed usage\n if no_logger:\n return None, str(final_output_dir), None\n\n print('=> creating {}'.format(final_output_dir))\n final_output_dir.mkdir(parents=True, exist_ok=True)\n\n log_file = '{}_{}.log'.format(time_str, phase)\n final_log_file = final_output_dir / log_file\n head = '%(asctime)-15s %(message)s'\n logging.basicConfig(filename=str(final_log_file),\n format=head,\n force=True) # >= python 3.8\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n logging.getLogger('').addHandler(console)\n\n # tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / time_str\n # print('=> creating {}'.format(tensorboard_log_dir))\n # tensorboard_log_dir.mkdir(parents=True, exist_ok=True)\n\n return logger, str(final_output_dir), str(final_output_dir)" }, { "identifier": "ScoreModelFC", "path": "lib/algorithms/advanced/model.py", "snippet": "class ScoreModelFC(nn.Module):\n \"\"\"\n Independent condition feature projection layers for each block\n \"\"\"\n\n def __init__(self, config, n_poses=21, pose_dim=6, hidden_dim=64,\n embed_dim=32, n_blocks=2):\n super(ScoreModelFC, self).__init__()\n\n self.config = config\n self.n_poses = n_poses\n self.joint_dim = pose_dim\n self.n_blocks = n_blocks\n\n self.act = get_act(config)\n\n self.pre_dense = nn.Linear(n_poses * pose_dim, hidden_dim)\n self.pre_dense_t = nn.Linear(embed_dim, hidden_dim)\n self.pre_dense_cond = nn.Linear(hidden_dim, hidden_dim)\n self.pre_gnorm = nn.GroupNorm(32, num_channels=hidden_dim)\n self.dropout = nn.Dropout(p=config.model.dropout)\n\n # time embedding\n self.time_embedding_type = config.model.embedding_type.lower()\n if self.time_embedding_type == 'fourier':\n self.gauss_proj = GaussianFourierProjection(embed_dim=embed_dim, scale=config.model.fourier_scale)\n elif self.time_embedding_type == 'positional':\n self.posit_proj = functools.partial(get_timestep_embedding, embedding_dim=embed_dim)\n else:\n assert 0\n\n self.shared_time_embed = nn.Sequential(\n nn.Linear(embed_dim, embed_dim),\n self.act,\n )\n self.register_buffer('sigmas', torch.tensor(get_sigmas(config), dtype=torch.float))\n\n for idx in range(n_blocks):\n setattr(self, f'b{idx + 1}_dense1', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_dense1_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_gnorm1', nn.GroupNorm(32, num_channels=hidden_dim))\n\n setattr(self, f'b{idx + 1}_dense2', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_dense2_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_gnorm2', nn.GroupNorm(32, num_channels=hidden_dim))\n\n self.post_dense = nn.Linear(hidden_dim, n_poses * pose_dim)\n\n def forward(self, batch, t, condition=None, mask=None):\n \"\"\"\n batch: [B, j*3] or [B, j*6]\n t: [B]\n Return: [B, j*3] or [B, j*6] same dim as batch\n \"\"\"\n bs = batch.shape[0]\n\n # batch = batch.view(bs, -1) # [B, j*3]\n\n # time embedding\n if self.time_embedding_type == 'fourier':\n # Gaussian Fourier features embeddings.\n used_sigmas = t\n temb = self.gauss_proj(torch.log(used_sigmas))\n elif self.time_embedding_type == 'positional':\n # Sinusoidal positional embeddings.\n timesteps = t\n used_sigmas = self.sigmas[t.long()]\n temb = self.posit_proj(timesteps)\n else:\n raise ValueError(f'time embedding type {self.time_embedding_type} unknown.')\n\n temb = self.shared_time_embed(temb)\n\n h = self.pre_dense(batch)\n h += self.pre_dense_t(temb)\n h = self.pre_gnorm(h)\n h = self.act(h)\n h = self.dropout(h)\n\n for idx in range(self.n_blocks):\n h1 = getattr(self, f'b{idx + 1}_dense1')(h)\n h1 += getattr(self, f'b{idx + 1}_dense1_t')(temb)\n h1 = getattr(self, f'b{idx + 1}_gnorm1')(h1)\n h1 = self.act(h1)\n # dropout, maybe\n h1 = self.dropout(h1)\n\n h2 = getattr(self, f'b{idx + 1}_dense2')(h1)\n h2 += getattr(self, f'b{idx + 1}_dense2_t')(temb)\n h2 = getattr(self, f'b{idx + 1}_gnorm2')(h2)\n h2 = self.act(h2)\n # dropout, maybe\n h2 = self.dropout(h2)\n\n h = h + h2\n\n res = self.post_dense(h) # [B, j*3]\n\n ''' normalize the output '''\n if self.config.model.scale_by_sigma:\n used_sigmas = used_sigmas.reshape((bs, 1))\n res = res / used_sigmas\n\n return res" }, { "identifier": "TimeMLPs", "path": "lib/algorithms/advanced/model.py", "snippet": "class TimeMLPs(torch.nn.Module):\n def __init__(self, config, n_poses=21, pose_dim=6, hidden_dim=64, n_blocks=2):\n super().__init__()\n dim = n_poses * pose_dim\n self.act = get_act(config)\n\n layers = [torch.nn.Linear(dim + 1, hidden_dim),\n self.act]\n\n for _ in range(n_blocks):\n layers.extend([\n torch.nn.Linear(hidden_dim, hidden_dim),\n self.act,\n torch.nn.Dropout(p=config.model.dropout)\n ])\n\n layers.append(torch.nn.Linear(hidden_dim, dim))\n\n self.net = torch.nn.Sequential(*layers)\n\n def forward(self, x, t, condition=None, mask=None):\n return self.net(torch.cat([x, t[:, None]], dim=1))" }, { "identifier": "losses", "path": "lib/algorithms/advanced/losses.py", "snippet": "def get_optimizer(config, params):\ndef optimization_manager(config):\n def optimize_fn(optimizer, params, step, lr=config.optim.lr,\n warmup=config.optim.warmup,\n grad_clip=config.optim.grad_clip):\ndef get_sde_loss_fn(sde, train, reduce_mean=False, continuous=True, likelihood_weighting=False, eps=1e-5,\n return_data=False, denoise_steps=5):\n def loss_fn(model, batch, condition, mask):\n def multi_step_denoise(x_t, t, t_end, N=10):\ndef get_smld_loss_fn(vesde, train, reduce_mean=False):\n def loss_fn(model, batch, condition, mask):\ndef get_ddpm_loss_fn(vpsde, train, reduce_mean=True):\n def loss_fn(model, batch, condition, mask):\ndef get_step_fn(sde, train, optimize_fn=None, reduce_mean=False, continuous=True,\n likelihood_weighting=False, auxiliary_loss=False,\n denormalize=None, body_model=None, rot_rep='rot6d', denoise_steps=5):\n def step_fn(state, batch, condition=None, mask=None):\n SNR = alpha / sigma[:, None]" }, { "identifier": "sde_lib", "path": "lib/algorithms/advanced/sde_lib.py", "snippet": "class SDE(abc.ABC):\n class RSDE(self.__class__):\nclass VPSDE(SDE):\nclass subVPSDE(SDE):\nclass VESDE(SDE):\n def __init__(self, N):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n def reverse(self, score_fn, probability_flow=False):\n def __init__(self):\n def T(self):\n def sde(self, x, t, condition=None, mask=None, guide=False):\n def discretize(self, x, t, condition=None, mask=None):\n def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def return_alpha_sigma(self, t):\n def __init__(self, sigma_min=0.01, sigma_max=50, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n G = diffusion * torch.sqrt(torch.tensor(dt, device=t.device))\n N = self.N\n T = self.T\n N = np.prod(shape[1:])\n G = sqrt_beta\n N = np.prod(shape[1:])\n N = np.prod(shape[1:])\n G = torch.sqrt(sigma ** 2 - adjacent_sigma ** 2)" }, { "identifier": "sampling", "path": "lib/algorithms/advanced/sampling.py", "snippet": "_CORRECTORS = {}\n_PREDICTORS = {}\ndef register_predictor(cls=None, *, name=None):\n def _register(cls):\ndef register_corrector(cls=None, *, name=None):\n def _register(cls):\ndef get_predictor(name):\ndef get_corrector(name):\ndef get_sampling_fn(config, sde, shape, inverse_scaler, eps, device=None):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def update_fn_guide(self, x_t, t, observation, mask, condition=None, grad_step=1.0):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t):\n def __init__(self, sde, score_fn, probability_flow=False):\n def vesde_update_fn(self, x, t):\n def vpsde_update_fn(self, x, t):\n def update_fn(self, x, t):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\ndef shared_predictor_update_fn(x, t, observation, mask, sde, model, predictor, probability_flow, continuous):\ndef shared_corrector_update_fn(x, t, observation, mask, sde, model, corrector, continuous, snr, n_steps):\ndef get_pc_sampler(sde, shape, predictor, corrector, inverse_scaler, snr,\n n_steps=1, probability_flow=False, continuous=False,\n denoise=True, eps=1e-3, device='cuda'):\n def get_imputation_update_fn(update_fn):\n def imputation_update_fn(x, vec_t, observation, mask, model, args):\n def pc_sampler(model, observation=None, mask=None, z=None, start_step=0, args=None):\ndef get_ode_sampler(sde, shape, inverse_scaler,\n denoise=False, rtol=1e-5, atol=1e-5,\n method='RK45', eps=1e-3, device='cuda'):\n def denoise_update_fn(model, x):\n def drift_fn(model, x, t):\n def ode_sampler(model, z=None):\n def ode_func(t, x):\nclass Predictor(abc.ABC):\nclass Corrector(abc.ABC):\nclass EulerMaruyamaPredictor(Predictor):\nclass ReverseDiffusionPredictor(Predictor):\nclass AncestralSamplingPredictor(Predictor):\nclass NonePredictor(Predictor):\nclass LangevinCorrector(Corrector):\nclass AnnealedLangevinDynamics(Corrector):\nclass NoneCorrector(Corrector):" }, { "identifier": "likelihood", "path": "lib/algorithms/advanced/likelihood.py", "snippet": "def get_div_fn(fn):\r\n def div_fn(x, t, eps):\r\ndef get_likelihood_fn(sde, inverse_scaler, hutchinson_type='Rademacher',\r\n rtol=1e-5, atol=1e-5, method='RK45', eps=1e-5):\r\n def drift_fn(model, x, t):\r\n def div_fn(model, x, t, noise):\r\n def likelihood_fn(model, data):\r\n def ode_func(t, x):\r\n N = np.prod(shape[1:])\r" }, { "identifier": "ExponentialMovingAverage", "path": "lib/algorithms/ema.py", "snippet": "class ExponentialMovingAverage:\n \"\"\"\n Maintains (exponential) moving average of a set of parameters.\n \"\"\"\n\n def __init__(self, parameters, decay=0.999, use_num_updates=True):\n \"\"\"\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the result of\n `model.parameters()`.\n decay: The exponential decay.\n use_num_updates: Whether to use number of updates when computing\n averages.\n \"\"\"\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n self.decay = decay\n self.num_updates = 0 if use_num_updates else None\n self.shadow_params = [p.clone().detach()\n for p in parameters if p.requires_grad]\n self.collected_params = []\n\n def update(self, parameters):\n \"\"\"\n Update currently maintained parameters.\n\n Call this every time the parameters are updated, such as the result of\n the `optimizer.step()` call.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the same set of\n parameters used to initialize this object.\n \"\"\"\n decay = self.decay\n if self.num_updates is not None:\n self.num_updates += 1\n decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))\n one_minus_decay = 1.0 - decay\n with torch.no_grad():\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n s_param.sub_(one_minus_decay * (s_param - param))\n\n def copy_to(self, parameters):\n \"\"\"\n Copy current parameters into given collection of parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored moving averages.\n \"\"\"\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n if param.requires_grad:\n param.data.copy_(s_param.data)\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)\n\n def state_dict(self):\n return dict(decay=self.decay, num_updates=self.num_updates,\n shadow_params=self.shadow_params)\n\n def load_state_dict(self, state_dict):\n self.decay = state_dict['decay']\n self.num_updates = state_dict['num_updates']\n self.shadow_params = state_dict['shadow_params']" }, { "identifier": "AMASSDataset", "path": "lib/dataset/AMASS.py", "snippet": "class AMASSDataset(torch.utils.data.Dataset):\r\n def __init__(self, root_path, version='version0', subset='train',\r\n sample_interval=None, rot_rep='rot6d', return_shape=False,\r\n normalize=True, min_max=True):\r\n\r\n self.root_path = root_path\r\n self.version = version\r\n assert subset in ['train', 'valid', 'test']\r\n self.subset = subset\r\n self.sample_interval = sample_interval\r\n assert rot_rep in ['axis', 'rot6d']\r\n self.rot_rep = rot_rep\r\n self.return_shape = return_shape\r\n self.normalize = normalize\r\n self.min_max = min_max\r\n\r\n self.poses, self.shapes = self.read_data()\r\n\r\n if self.sample_interval:\r\n self._sample(sample_interval)\r\n if self.normalize:\r\n if self.min_max:\r\n self.min_poses, self.max_poses, self.min_shapes, self.max_shapes = self.Normalize()\r\n else:\r\n self.mean_poses, self.std_poses, self.mean_shapes, self.std_shapes = self.Normalize()\r\n\r\n self.real_data_len = len(self.poses)\r\n\r\n def __getitem__(self, idx):\r\n \"\"\"\r\n Return:\r\n [21, 3] or [21, 6] for poses including body and root orient\r\n [10] for shapes (betas) [Optimal]\r\n \"\"\"\r\n data_poses = self.poses[idx % self.real_data_len]\r\n data_dict = {'poses': data_poses}\r\n if self.return_shape:\r\n data_dict['shapes'] = self.shapes[idx % self.real_data_len]\r\n return data_dict\r\n\r\n def __len__(self, ):\r\n return len(self.poses)\r\n\r\n def _sample(self, sample_interval):\r\n print(f'Class AMASSDataset({self.subset}): sample dataset every {sample_interval} frame')\r\n self.poses = self.poses[::sample_interval]\r\n\r\n def read_data(self):\r\n data_path = os.path.join(self.root_path, self.version, self.subset)\r\n # root_orient = torch.load(os.path.join(data_path, 'root_orient.pt'))\r\n poses = torch.load(os.path.join(data_path, 'pose_body.pt'))\r\n shapes = torch.load(os.path.join(data_path, 'betas.pt')) if self.return_shape else None\r\n # poses = torch.cat([root_orient, pose_body], dim=1)\r\n data_len = len(poses)\r\n if self.rot_rep == 'rot6d':\r\n poses = axis_angle_to_rot6d(poses.reshape(-1, 3)).reshape(data_len, -1)\r\n\r\n return poses, shapes\r\n\r\n def Normalize(self):\r\n # Use train dataset for normalize computing, Z_score or min-max Normalize\r\n if self.min_max:\r\n normalize_path = os.path.join(self.root_path, self.version, 'train', self.rot_rep + '_normalize1.pt')\r\n else:\r\n normalize_path = os.path.join(self.root_path, self.version, 'train', self.rot_rep + '_normalize2.pt')\r\n\r\n if os.path.exists(normalize_path):\r\n normalize_params = torch.load(normalize_path)\r\n if self.min_max:\r\n min_poses, max_poses, min_shapes, max_shapes = (\r\n normalize_params['min_poses'],\r\n normalize_params['max_poses'],\r\n normalize_params['min_shapes'],\r\n normalize_params['max_shapes']\r\n )\r\n else:\r\n mean_poses, std_poses, mean_shapes, std_shapes = (\r\n normalize_params['mean_poses'],\r\n normalize_params['std_poses'],\r\n normalize_params['mean_shapes'],\r\n normalize_params['std_shapes']\r\n )\r\n else:\r\n if self.min_max:\r\n min_poses = torch.min(self.poses, dim=0)[0]\r\n max_poses = torch.max(self.poses, dim=0)[0]\r\n\r\n min_shapes = torch.min(self.shapes, dim=0)[0] if self.return_shape else None\r\n max_shapes = torch.max(self.shapes, dim=0)[0] if self.return_shape else None\r\n\r\n torch.save({\r\n 'min_poses': min_poses,\r\n 'max_poses': max_poses,\r\n 'min_shapes': min_shapes,\r\n 'max_shapes': max_shapes\r\n }, normalize_path)\r\n else:\r\n mean_poses = torch.mean(self.poses, dim=0)\r\n std_poses = torch.std(self.poses, dim=0)\r\n\r\n mean_shapes = torch.mean(self.shapes, dim=0) if self.return_shape else None\r\n std_shapes = torch.std(self.shapes, dim=0) if self.return_shape else None\r\n\r\n torch.save({\r\n 'mean_poses': mean_poses,\r\n 'std_poses': std_poses,\r\n 'mean_shapes': mean_shapes,\r\n 'std_shapes': std_shapes\r\n }, normalize_path)\r\n\r\n if self.min_max:\r\n self.poses = 2 * (self.poses - min_poses) / (max_poses - min_poses) - 1\r\n if self.return_shape:\r\n self.shapes = 2 * (self.shapes - min_shapes) / (max_shapes - min_shapes) - 1\r\n return min_poses, max_poses, min_shapes, max_shapes\r\n\r\n else:\r\n self.poses = (self.poses - mean_poses) / std_poses\r\n if self.return_shape:\r\n self.shapes = (self.shapes - mean_shapes) / std_shapes\r\n return mean_poses, std_poses, mean_shapes, std_shapes\r\n\r\n\r\n def Denormalize(self, poses, shapes=None):\r\n assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]\r\n\r\n if self.min_max:\r\n min_poses = self.min_poses.view(1, -1).to(poses.device)\r\n max_poses = self.max_poses.view(1, -1).to(poses.device)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n min_poses = min_poses.unsqueeze(0)\r\n max_poses = max_poses.unsqueeze(0)\r\n\r\n normalized_poses = 0.5 * ((poses + 1) * (max_poses - min_poses) + 2 * min_poses)\r\n\r\n if shapes is not None and self.min_shapes is not None:\r\n min_shapes = self.min_shapes.view(1, -1).to(shapes.device)\r\n max_shapes = self.max_shapes.view(1, -1).to(shapes.device)\r\n\r\n if len(shapes.shape) == 3:\r\n min_shapes = min_shapes.unsqueeze(0)\r\n max_shapes = max_shapes.unsqueeze(0)\r\n\r\n normalized_shapes = 0.5 * ((shapes + 1) * (max_shapes - min_shapes) + 2 * min_shapes)\r\n return normalized_poses, normalized_shapes\r\n else:\r\n return normalized_poses\r\n else:\r\n mean_poses = self.mean_poses.view(1, -1).to(poses.device)\r\n std_poses = self.std_poses.view(1, -1).to(poses.device)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n mean_poses = mean_poses.unsqueeze(0)\r\n std_poses = std_poses.unsqueeze(0)\r\n\r\n normalized_poses = poses * std_poses + mean_poses\r\n\r\n if shapes is not None and self.mean_shapes is not None:\r\n mean_shapes = self.mean_shapes.view(1, -1)\r\n std_shapes = self.std_shapes.view(1, -1)\r\n\r\n if len(shapes.shape) == 3:\r\n mean_shapes = mean_shapes.unsqueeze(0)\r\n std_shapes = std_shapes.unsqueeze(0)\r\n\r\n normalized_shapes = shapes * std_shapes + mean_shapes\r\n return normalized_poses, normalized_shapes\r\n else:\r\n return normalized_poses\r\n\r\n def eval(self, preds):\r\n pass\r" }, { "identifier": "N_POSES", "path": "lib/dataset/AMASS.py", "snippet": "N_POSES = 21\r" }, { "identifier": "rot6d_to_axis_angle", "path": "lib/utils/transforms.py", "snippet": "def rot6d_to_axis_angle(rot6d):\n \"\"\"Convert 6d rotation representation to 3d vector of axis-angle rotation.\n\n Args:\n angle_axis (Tensor): tensor of 3d vector of axis-angle rotations.\n\n Returns:\n Tensor: tensor of 3d vector of axis-angle rotation.\n\n Shape:\n - Input: :math:`(N, 6)`\n - Output: :math:`(N, 3)`\n \"\"\"\n batch_size = rot6d.shape[0]\n\n rot6d = rot6d.view(batch_size, 3, 2)\n a1 = rot6d[:, :, 0]\n a2 = rot6d[:, :, 1]\n b1 = F.normalize(a1)\n b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)\n b3 = torch.cross(b1, b2)\n rot_mat = torch.stack((b1, b2, b3), dim=-1) # 3x3 rotation matrix\n\n rot_mat = torch.cat([rot_mat, torch.zeros((batch_size, 3, 1), device=rot_mat.device).float()],\n 2) # 3x4 rotation matrix\n axis_angle = tgm.rotation_matrix_to_angle_axis(rot_mat).reshape(-1, 3) # axis-angle\n axis_angle[torch.isnan(axis_angle)] = 0.0\n return axis_angle" }, { "identifier": "BodyModel", "path": "lib/body_model/body_model.py", "snippet": "class BodyModel(nn.Module):\r\n '''\r\n Wrapper around SMPLX body model class.\r\n from https://github.com/davrempe/humor/blob/main/humor/body_model/body_model.py\r\n '''\r\n\r\n def __init__(self,\r\n bm_path,\r\n num_betas=10,\r\n batch_size=1,\r\n num_expressions=10,\r\n model_type='smplx'):\r\n super(BodyModel, self).__init__()\r\n '''\r\n Creates the body model object at the given path.\r\n\r\n :param bm_path: path to the body model pkl file\r\n :param num_expressions: only for smplx\r\n :param model_type: one of [smpl, smplh, smplx]\r\n :param use_vtx_selector: if true, returns additional vertices as joints that correspond to OpenPose joints\r\n '''\r\n\r\n kwargs = {\r\n 'model_type': model_type,\r\n 'num_betas': num_betas,\r\n 'batch_size': batch_size,\r\n 'num_expression_coeffs': num_expressions,\r\n 'use_pca': False,\r\n 'flat_hand_mean': True\r\n }\r\n\r\n assert (model_type in ['smpl', 'smplh', 'smplx'])\r\n if model_type == 'smpl':\r\n self.bm = SMPL(bm_path, **kwargs)\r\n self.num_joints = SMPL.NUM_JOINTS\r\n elif model_type == 'smplh':\r\n # smplx does not support .npz by default, so have to load in manually\r\n smpl_dict = np.load(bm_path, encoding='latin1')\r\n data_struct = Struct(**smpl_dict)\r\n # print(smpl_dict.files)\r\n if model_type == 'smplh':\r\n data_struct.hands_componentsl = np.zeros((0))\r\n data_struct.hands_componentsr = np.zeros((0))\r\n data_struct.hands_meanl = np.zeros((15 * 3))\r\n data_struct.hands_meanr = np.zeros((15 * 3))\r\n V, D, B = data_struct.shapedirs.shape\r\n data_struct.shapedirs = np.concatenate(\r\n [data_struct.shapedirs, np.zeros((V, D, SMPL.SHAPE_SPACE_DIM - B))],\r\n axis=-1) # super hacky way to let smplh use 16-size beta\r\n kwargs['data_struct'] = data_struct\r\n self.bm = SMPLH(bm_path, **kwargs)\r\n self.num_joints = SMPLH.NUM_JOINTS\r\n elif model_type == 'smplx':\r\n self.bm = SMPLX(bm_path, **kwargs)\r\n self.num_joints = SMPLX.NUM_JOINTS\r\n\r\n self.model_type = model_type\r\n self.J_regressor = self.bm.J_regressor.numpy()\r\n self.J_regressor_idx = {'pelvis': 0, 'lwrist': 20, 'rwrist': 21, 'neck': 12}\r\n\r\n def forward(self, root_orient=None, pose_body=None, pose_hand=None, pose_jaw=None, pose_eye=None, betas=None,\r\n trans=None, dmpls=None, expression=None, return_dict=False, **kwargs):\r\n '''\r\n Note dmpls are not supported.\r\n '''\r\n assert (dmpls is None)\r\n # parameters of SMPL should not be updated\r\n out_obj = self.bm(\r\n betas=betas,\r\n global_orient=root_orient,\r\n body_pose=pose_body,\r\n left_hand_pose=None if pose_hand is None else pose_hand[:, :(SMPLH.NUM_HAND_JOINTS * 3)],\r\n right_hand_pose=None if pose_hand is None else pose_hand[:, (SMPLH.NUM_HAND_JOINTS * 3):],\r\n transl=trans,\r\n expression=expression,\r\n jaw_pose=pose_jaw,\r\n leye_pose=None if pose_eye is None else pose_eye[:, :3],\r\n reye_pose=None if pose_eye is None else pose_eye[:, 3:],\r\n return_full_pose=True,\r\n **kwargs\r\n )\r\n\r\n out = {\r\n 'v': out_obj.vertices,\r\n 'f': self.bm.faces_tensor,\r\n 'betas': out_obj.betas,\r\n 'Jtr': out_obj.joints,\r\n 'body_joints': out_obj.joints[:22], # only body joints\r\n 'pose_body': out_obj.body_pose,\r\n 'full_pose': out_obj.full_pose\r\n }\r\n if self.model_type in ['smplh', 'smplx']:\r\n out['pose_hand'] = torch.cat([out_obj.left_hand_pose, out_obj.right_hand_pose], dim=-1)\r\n if self.model_type == 'smplx':\r\n out['pose_jaw'] = out_obj.jaw_pose\r\n out['pose_eye'] = pose_eye\r\n\r\n # if not self.use_vtx_selector:\r\n # # don't need extra joints\r\n # out['Jtr'] = out['Jtr'][:, :self.num_joints + 1] # add one for the root\r\n\r\n if not return_dict:\r\n out = Struct(**out)\r\n\r\n return out\r" } ]
import os import pprint import traceback import cv2 import numpy as np import torch from pathlib import Path from absl import app from absl import flags from absl.flags import argparse_flags from ml_collections.config_flags import config_flags from torch.utils.data import DataLoader from lib.body_model.visual import save_obj, render_mesh from lib.utils.metric import average_pairwise_distance from lib.utils.misc import create_mask from tensorboardX import SummaryWriter from torch.utils.tensorboard import SummaryWriter from lib.utils.generic import create_logger from lib.algorithms.advanced.model import ScoreModelFC, TimeMLPs from lib.algorithms.advanced import losses, sde_lib, sampling, likelihood from lib.algorithms.ema import ExponentialMovingAverage from lib.dataset.AMASS import AMASSDataset, N_POSES from lib.utils.transforms import rot6d_to_axis_angle from lib.body_model.body_model import BodyModel from lib.dataset.AMASS import Evaler
11,146
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") bg_img = np.ones([512, 384, 3]) * 255 # background canvas focal = [1500, 1500] princpt = [200, 192] def parse_args(argv): parser = argparse_flags.ArgumentParser(description='train diffusion model') parser.add_argument('--dataset-folder', type=str, default='./data/AMASS/amass_processed', help='the folder includes necessary normalizing parameters') parser.add_argument('--version', type=str, default='version1', help='dataset version') parser.add_argument('--bodymodel-path', type=str, default='../body_models/smplx/SMPLX_NEUTRAL.npz', help='path of SMPLX model [for visual validation]') parser.add_argument('--restore-dir', type=str, help='resume training') parser.add_argument('--shape', type=bool, default=False, help='handle human shapes (have not been tested)') parser.add_argument('--sample', type=int, help='sample trainset to reduce data') parser.add_argument('--task', type=str, default=None, help='for validating') parser.add_argument('--name', type=str, default='', help='name of checkpoint folder') args = parser.parse_args(argv[1:]) return args def get_dataloader(root_path='', subset='train', version='', sample_interval=None, rot_rep='rot6d', return_shape=False, normalize=True, min_max=True): dataset = AMASSDataset(root_path=root_path, version=version, subset=subset, sample_interval=sample_interval, rot_rep=rot_rep, return_shape=return_shape, normalize=normalize, min_max=min_max) print('AMASS version: {}, rot_rep: {}, normalize: {}'.format(version, rot_rep, normalize)) # drop the last batch to ensure that body model can work all the time if subset == 'train': dataloader = DataLoader(dataset, batch_size=FLAGS.config.training.batch_size, shuffle=True, num_workers=4, pin_memory=False, drop_last=True) else: dataloader = DataLoader(dataset, batch_size=FLAGS.config.eval.batch_size, shuffle=False, num_workers=4, pin_memory=False, drop_last=True) return dataloader, dataset def main(args): def log_metrics(metrics, step, config, logger): log_freq = config.training.log_freq msg = f'Iter: [{step}/{num_train_steps}, {step / num_train_steps * 100:.2f}%][{idx}/{len(train_loader)}],\t' for key, value in metrics.items(): metrics[key] /= log_freq msg += f"{key}: {metrics[key]:.6f},\t" logger.info(msg) metrics = {key: 0.0 for key in metrics} return metrics def log_eval_metrics(metrics, step, writer): for key, value in metrics.items(): avg_value = np.mean(value).item() writer.add_scalar(f'eval_{key}', avg_value, step) metrics[key] = [] # Reset for the next evaluation # args = parse_args() config = FLAGS.config logger, final_output_dir, tb_log_dir = create_logger( config, 'train', folder_name=args.name) if config.training.render: obj_dir = Path(final_output_dir) / 'obj_results' render_dir = Path(final_output_dir) / 'render_results' if not obj_dir.exists(): print('=> creating {}'.format(obj_dir)) obj_dir.mkdir() if not render_dir.exists(): print('=> creating {}'.format(render_dir)) render_dir.mkdir() logger.info(pprint.pformat(config)) logger.info(pprint.pformat(args)) writer = SummaryWriter(tb_log_dir) ''' setup body model for val''' body_model_vis = BodyModel(bm_path=args.bodymodel_path, num_betas=10, batch_size=50, model_type='smplx').to(device) ''' setup datasets, dataloaders''' if args.sample: logger.info(f'sample trainset every {args.sample} frame') train_loader, train_dataset = get_dataloader(args.dataset_folder, 'train', args.version, args.sample, config.data.rot_rep, args.shape, config.data.normalize, config.data.min_max) test_loader, test_dataset = get_dataloader(args.dataset_folder, 'test', args.version, 100, config.data.rot_rep, args.shape, config.data.normalize, config.data.min_max) # always sample testset to save time denormalize_data = train_dataset.Denormalize if config.data.normalize else lambda x: x logger.info(f'total train samples: {len(train_dataset)}') logger.info(f'total test samples: {len(test_dataset)}') ''' setup score networks ''' POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 if config.model.type == 'ScoreModelFC': model = ScoreModelFC( config, n_poses=N_POSES, pose_dim=POSE_DIM, hidden_dim=config.model.HIDDEN_DIM, embed_dim=config.model.EMBED_DIM, n_blocks=config.model.N_BLOCKS, ) elif config.model.type == 'TimeMLPs':
try: except ImportError as e: try: except ImportError as e: print('Tensorboard is not Installed') FLAGS = flags.FLAGS config_flags.DEFINE_config_file( "config", None, "Training configuration.", lock_config=False) flags.mark_flags_as_required(["config"]) # global device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") bg_img = np.ones([512, 384, 3]) * 255 # background canvas focal = [1500, 1500] princpt = [200, 192] def parse_args(argv): parser = argparse_flags.ArgumentParser(description='train diffusion model') parser.add_argument('--dataset-folder', type=str, default='./data/AMASS/amass_processed', help='the folder includes necessary normalizing parameters') parser.add_argument('--version', type=str, default='version1', help='dataset version') parser.add_argument('--bodymodel-path', type=str, default='../body_models/smplx/SMPLX_NEUTRAL.npz', help='path of SMPLX model [for visual validation]') parser.add_argument('--restore-dir', type=str, help='resume training') parser.add_argument('--shape', type=bool, default=False, help='handle human shapes (have not been tested)') parser.add_argument('--sample', type=int, help='sample trainset to reduce data') parser.add_argument('--task', type=str, default=None, help='for validating') parser.add_argument('--name', type=str, default='', help='name of checkpoint folder') args = parser.parse_args(argv[1:]) return args def get_dataloader(root_path='', subset='train', version='', sample_interval=None, rot_rep='rot6d', return_shape=False, normalize=True, min_max=True): dataset = AMASSDataset(root_path=root_path, version=version, subset=subset, sample_interval=sample_interval, rot_rep=rot_rep, return_shape=return_shape, normalize=normalize, min_max=min_max) print('AMASS version: {}, rot_rep: {}, normalize: {}'.format(version, rot_rep, normalize)) # drop the last batch to ensure that body model can work all the time if subset == 'train': dataloader = DataLoader(dataset, batch_size=FLAGS.config.training.batch_size, shuffle=True, num_workers=4, pin_memory=False, drop_last=True) else: dataloader = DataLoader(dataset, batch_size=FLAGS.config.eval.batch_size, shuffle=False, num_workers=4, pin_memory=False, drop_last=True) return dataloader, dataset def main(args): def log_metrics(metrics, step, config, logger): log_freq = config.training.log_freq msg = f'Iter: [{step}/{num_train_steps}, {step / num_train_steps * 100:.2f}%][{idx}/{len(train_loader)}],\t' for key, value in metrics.items(): metrics[key] /= log_freq msg += f"{key}: {metrics[key]:.6f},\t" logger.info(msg) metrics = {key: 0.0 for key in metrics} return metrics def log_eval_metrics(metrics, step, writer): for key, value in metrics.items(): avg_value = np.mean(value).item() writer.add_scalar(f'eval_{key}', avg_value, step) metrics[key] = [] # Reset for the next evaluation # args = parse_args() config = FLAGS.config logger, final_output_dir, tb_log_dir = create_logger( config, 'train', folder_name=args.name) if config.training.render: obj_dir = Path(final_output_dir) / 'obj_results' render_dir = Path(final_output_dir) / 'render_results' if not obj_dir.exists(): print('=> creating {}'.format(obj_dir)) obj_dir.mkdir() if not render_dir.exists(): print('=> creating {}'.format(render_dir)) render_dir.mkdir() logger.info(pprint.pformat(config)) logger.info(pprint.pformat(args)) writer = SummaryWriter(tb_log_dir) ''' setup body model for val''' body_model_vis = BodyModel(bm_path=args.bodymodel_path, num_betas=10, batch_size=50, model_type='smplx').to(device) ''' setup datasets, dataloaders''' if args.sample: logger.info(f'sample trainset every {args.sample} frame') train_loader, train_dataset = get_dataloader(args.dataset_folder, 'train', args.version, args.sample, config.data.rot_rep, args.shape, config.data.normalize, config.data.min_max) test_loader, test_dataset = get_dataloader(args.dataset_folder, 'test', args.version, 100, config.data.rot_rep, args.shape, config.data.normalize, config.data.min_max) # always sample testset to save time denormalize_data = train_dataset.Denormalize if config.data.normalize else lambda x: x logger.info(f'total train samples: {len(train_dataset)}') logger.info(f'total test samples: {len(test_dataset)}') ''' setup score networks ''' POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 if config.model.type == 'ScoreModelFC': model = ScoreModelFC( config, n_poses=N_POSES, pose_dim=POSE_DIM, hidden_dim=config.model.HIDDEN_DIM, embed_dim=config.model.EMBED_DIM, n_blocks=config.model.N_BLOCKS, ) elif config.model.type == 'TimeMLPs':
model = TimeMLPs(
6
2023-11-29 15:55:50+00:00
16k
KylinYee/R2-Talker-code
test.py
[ { "identifier": "NeRFDataset_Test", "path": "nerf/provider.py", "snippet": "class NeRFDataset_Test:\n def __init__(self, opt, device, downscale=1):\n super().__init__()\n \n self.opt = opt\n self.device = device\n self.downscale = downscale\n self.scale = opt.scale # camera radius scale to make sure camera are inside the bounding box.\n self.offset = opt.offset # camera offset\n self.bound = opt.bound # bounding box half length, also used as the radius to random sample poses.\n self.fp16 = opt.fp16\n\n self.start_index = opt.data_range[0]\n self.end_index = opt.data_range[1]\n\n self.training = False\n self.num_rays = -1\n\n # load nerf-compatible format data.\n \n with open(opt.pose, 'r') as f:\n transform = json.load(f)\n\n # load image size\n self.H = int(transform['cy']) * 2 // downscale\n self.W = int(transform['cx']) * 2 // downscale\n \n # read images\n frames = transform[\"frames\"]\n\n # use a slice of the dataset\n if self.end_index == -1: # abuse...\n self.end_index = len(frames)\n\n frames = frames[self.start_index:self.end_index]\n\n print(f'[INFO] load {len(frames)} frames.')\n\n # only load pre-calculated aud features when not live-streaming\n if not self.opt.asr:\n\n aud_features = np.load(self.opt.aud)\n\n if self.opt.cond_type == 'idexp':\n aud_features = aud_features.reshape(-1, 68, 3)\n aud_features = torch.from_numpy(aud_features)\n\n idexp_lm3d_mean = aud_features.mean(axis=0).reshape([1,68,3])\n idexp_lm3d_std = aud_features.std(axis=0).reshape([1,68,3])\n idexp_lm3d_normalized = (aud_features.reshape([-1,68,3]) - idexp_lm3d_mean)/idexp_lm3d_std\n\n # step1. clamp the lm3d, to regularize apparent outliers\n lm3d_clamp_std = 2.3 # typically 1.~5., reduce it when blurry or bad cases occurs\n idexp_lm3d_normalized[:,0:17] = torch.clamp(idexp_lm3d_normalized[:,0:17], -lm3d_clamp_std, lm3d_clamp_std) # yaw_x_y_z\n idexp_lm3d_normalized[:,17:27,0:2] = torch.clamp(idexp_lm3d_normalized[:,17:27,0:2], -lm3d_clamp_std/2, lm3d_clamp_std/2) # brow_x_y\n idexp_lm3d_normalized[:,17:27,2] = torch.clamp(idexp_lm3d_normalized[:,17:27,2], -lm3d_clamp_std, lm3d_clamp_std) # brow_z\n idexp_lm3d_normalized[:,27:36] = torch.clamp(idexp_lm3d_normalized[:,27:36], -lm3d_clamp_std, lm3d_clamp_std) # nose\n idexp_lm3d_normalized[:,36:48,0:2] = torch.clamp(idexp_lm3d_normalized[:,36:48,0:2], -lm3d_clamp_std/2, lm3d_clamp_std/2) # eye_x_y\n idexp_lm3d_normalized[:,36:48,2] = torch.clamp(idexp_lm3d_normalized[:,36:48,2], -lm3d_clamp_std, lm3d_clamp_std) # eye_z\n idexp_lm3d_normalized[:,48:68] = torch.clamp(idexp_lm3d_normalized[:,48:68], -lm3d_clamp_std, lm3d_clamp_std) # mouth\n\n aud_features = idexp_lm3d_normalized*idexp_lm3d_std + idexp_lm3d_mean\n\n\n # _lambda_other = 0.4\n # _lambda_lip = 0.2\n # moving_lm = aud_features[0].clone()\n # print(aud_features[0,:48].shape)\n # for i in range(aud_features.size()[0]):\n # aud_features[i,0:17] = 2.0*_lambda_other * moving_lm[0:17] + (1 - 2.0*_lambda_other) * aud_features[i,0:17] # yaw\n # aud_features[i,17:27] = 2.0*_lambda_other * moving_lm[17:27] + (1 - 2.0*_lambda_other) * aud_features[i,17:27] # brow\n # aud_features[i,27:36] = 2.0*_lambda_other * moving_lm[27:36] + (1 - 2.0*_lambda_other) * aud_features[i,27:36] # nose\n # aud_features[i,36:48] = _lambda_other * moving_lm[36:48] + (1 - _lambda_other) * aud_features[i,36:48] # eye\n # aud_features[i,:48] = moving_lm[:48]\n # aud_features[i,48:68] = _lambda_lip * moving_lm[48:68] + (1 - _lambda_lip) * aud_features[i,48:68]\n else:\n aud_features = torch.from_numpy(aud_features)\n\n aud_features = aud_features.reshape(-1, 68, 3)\n\n if self.opt.method == 'genefaceDagger':\n video_idexp_lm3d_mean = aud_features.mean(axis=0).reshape([1,68,3])\n video_idexp_lm3d_std = aud_features.std(axis=0).reshape([1,68,3])\n aud_features = (aud_features - video_idexp_lm3d_mean) / video_idexp_lm3d_std\n\n # support both [N, 16] labels and [N, 16, K] logits\n if len(aud_features.shape) == 3:\n # if self.opt.cond_type in ['eo','ds']:\n # aud_features = aud_features.float().permute(0, 2, 1) # [N, 16, 29] --> [N, 29, 16] \n \n\n if self.opt.emb:\n print(f'[INFO] argmax to aud features {aud_features.shape} for --emb mode')\n aud_features = aud_features.argmax(1) # [N, 16]\n \n else:\n assert self.opt.emb, \"aud only provide labels, must use --emb\"\n aud_features = aud_features.long()\n\n print(f'[INFO] load {self.opt.aud} aud_features: {aud_features.shape}')\n\n self.poses = []\n self.auds = []\n self.eye_area = []\n\n for f in tqdm.tqdm(frames, desc=f'Loading data'):\n \n pose = np.array(f['transform_matrix'], dtype=np.float32) # [4, 4]\n pose = nerf_matrix_to_ngp(pose, scale=self.scale, offset=self.offset)\n self.poses.append(pose)\n\n # find the corresponding audio to the image frame\n if not self.opt.asr and self.opt.aud == '':\n aud = aud_features[min(f['aud_id'], aud_features.shape[0] - 1)] # careful for the last frame...\n self.auds.append(aud)\n\n if self.opt.exp_eye:\n \n if 'eye_ratio' in f:\n area = f['eye_ratio']\n else:\n area = 0.25 # default value for opened eye\n \n self.eye_area.append(area)\n \n # load pre-extracted background image (should be the same size as training image...)\n\n if self.opt.bg_img == 'white': # special\n bg_img = np.ones((self.H, self.W, 3), dtype=np.float32)\n elif self.opt.bg_img == 'black': # special\n bg_img = np.zeros((self.H, self.W, 3), dtype=np.float32)\n else: # load from file\n bg_img = cv2.imread(self.opt.bg_img, cv2.IMREAD_UNCHANGED) # [H, W, 3]\n if bg_img.shape[0] != self.H or bg_img.shape[1] != self.W:\n bg_img = cv2.resize(bg_img, (self.W, self.H), interpolation=cv2.INTER_AREA)\n bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2RGB)\n bg_img = bg_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.bg_img = bg_img\n\n self.poses = np.stack(self.poses, axis=0)\n\n # smooth camera path...\n if self.opt.smooth_path:\n self.poses = smooth_camera_path(self.poses, self.opt.smooth_path_window)\n \n self.poses = torch.from_numpy(self.poses) # [N, 4, 4]\n \n if self.opt.asr:\n # live streaming, no pre-calculated auds\n self.auds = None\n else:\n # auds corresponding to images\n if self.opt.aud == '':\n self.auds = torch.stack(self.auds, dim=0) # eo: [N, 32, 16], idexp_lm3ds: [N, 68, 3]\n # auds is novel, may have a different length with images\n else:\n self.auds = aud_features\n \n self.bg_img = torch.from_numpy(self.bg_img)\n\n if self.opt.exp_eye:\n self.eye_area = np.array(self.eye_area, dtype=np.float32) # [N]\n print(f'[INFO] eye_area: {self.eye_area.min()} - {self.eye_area.max()}')\n\n if self.opt.smooth_eye:\n\n # naive 5 window average\n ori_eye = self.eye_area.copy()\n for i in range(ori_eye.shape[0]):\n start = max(0, i - 1)\n end = min(ori_eye.shape[0], i + 2)\n self.eye_area[i] = ori_eye[start:end].mean()\n\n self.eye_area = torch.from_numpy(self.eye_area).view(-1, 1) # [N, 1]\n\n # always preload\n self.poses = self.poses.to(self.device)\n\n if self.auds is not None:\n self.auds = self.auds.to(self.device)\n\n self.bg_img = self.bg_img.to(torch.half).to(self.device)\n \n if self.opt.exp_eye:\n self.eye_area = self.eye_area.to(self.device)\n\n # load intrinsics\n \n fl_x = fl_y = transform['focal_len']\n\n cx = (transform['cx'] / downscale)\n cy = (transform['cy'] / downscale)\n\n self.intrinsics = np.array([fl_x, fl_y, cx, cy])\n\n # directly build the coordinate meshgrid in [-1, 1]^2\n self.bg_coords = get_bg_coords(self.H, self.W, self.device) # [1, H*W, 2] in [-1, 1]\n \n def mirror_index(self, index):\n size = self.poses.shape[0]\n turn = index // size\n res = index % size\n if turn % 2 == 0:\n return res\n else:\n return size - res - 1\n\n def collate(self, index):\n\n B = len(index) # a list of length 1\n # assert B == 1\n\n results = {}\n\n # audio use the original index\n if self.auds is not None:\n if self.opt.cond_type == 'idexp':\n auds = get_audio_features(self.auds, self.opt.att, index[0], smooth_win_size=5).to(self.device)\n else:\n auds = get_audio_features(self.auds, self.opt.att, index[0]).to(self.device)\n \n results['auds'] = auds\n\n # head pose and bg image may mirror (replay --> <-- --> <--).\n index[0] = self.mirror_index(index[0])\n\n poses = self.poses[index].to(self.device) # [B, 4, 4]\n \n rays = get_rays(poses, self.intrinsics, self.H, self.W, self.num_rays, self.opt.patch_size)\n\n results['index'] = index # for ind. code\n results['H'] = self.H\n results['W'] = self.W\n results['rays_o'] = rays['rays_o']\n results['rays_d'] = rays['rays_d']\n\n if self.opt.exp_eye:\n results['eye'] = self.eye_area[index].to(self.device) # [1]\n else:\n results['eye'] = None\n\n bg_img = self.bg_img.view(1, -1, 3).repeat(B, 1, 1).to(self.device)\n\n results['bg_color'] = bg_img\n\n bg_coords = self.bg_coords # [1, N, 2]\n results['bg_coords'] = bg_coords\n\n results['poses'] = convert_poses(poses) # [B, 6]\n results['poses_matrix'] = poses # [B, 4, 4]\n \n return results\n\n def dataloader(self):\n\n \n # test with novel auds, then use its length\n if self.auds is not None:\n size = self.auds.shape[0]\n # live stream test, use 2 * len(poses), so it naturally mirrors.\n else:\n size = 2 * self.poses.shape[0]\n\n loader = DataLoader(list(range(size)), batch_size=1, collate_fn=self.collate, shuffle=False, num_workers=0)\n loader._data = self # an ugly fix... we need poses in trainer.\n\n # do evaluate if has gt images and use self-driven setting\n loader.has_gt = False\n\n return loader" }, { "identifier": "NeRFGUI", "path": "nerf/gui.py", "snippet": "class NeRFGUI:\n def __init__(self, opt, trainer, data_loader, debug=True):\n self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.\n self.W = opt.W\n self.H = opt.H\n self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy)\n self.debug = debug\n self.training = False\n self.step = 0 # training step \n\n self.trainer = trainer\n self.data_loader = data_loader\n\n # override with dataloader's intrinsics\n self.W = data_loader._data.W\n self.H = data_loader._data.H\n self.cam.update_intrinsics(data_loader._data.intrinsics)\n\n # use dataloader's pose\n pose_init = data_loader._data.poses[0]\n self.cam.update_pose(pose_init.detach().cpu().numpy())\n\n # use dataloader's bg\n bg_img = data_loader._data.bg_img #.view(1, -1, 3)\n if self.H != bg_img.shape[0] or self.W != bg_img.shape[1]:\n bg_img = F.interpolate(bg_img.permute(2, 0, 1).unsqueeze(0).contiguous(), (self.H, self.W), mode='bilinear').squeeze(0).permute(1, 2, 0).contiguous()\n self.bg_color = bg_img.view(1, -1, 3)\n\n # audio features (from dataloader, only used in non-playing mode)\n self.audio_features = data_loader._data.auds # [N, 29, 16]\n self.audio_idx = 0\n\n # control eye\n self.eye_area = None if not self.opt.exp_eye else data_loader._data.eye_area.mean().item()\n\n # playing seq from dataloader, or pause.\n self.playing = False\n self.loader = iter(data_loader)\n\n self.render_buffer = np.zeros((self.W, self.H, 3), dtype=np.float32)\n self.need_update = True # camera moved, should reset accumulation\n self.spp = 1 # sample per pixel\n self.mode = 'image' # choose from ['image', 'depth']\n\n self.dynamic_resolution = False # assert False!\n self.downscale = 1\n self.train_steps = 16\n\n self.ind_index = 0\n self.ind_num = trainer.model.individual_codes.shape[0]\n\n # build asr\n if self.opt.asr:\n self.asr = ASR(opt)\n \n dpg.create_context()\n self.register_dpg()\n self.test_step()\n \n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.opt.asr:\n self.asr.stop() \n dpg.destroy_context()\n\n def train_step(self):\n\n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n outputs = self.trainer.train_gui(self.data_loader, step=self.train_steps)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n self.step += self.train_steps\n self.need_update = True\n\n dpg.set_value(\"_log_train_time\", f'{t:.4f}ms ({int(1000/t)} FPS)')\n dpg.set_value(\"_log_train_log\", f'step = {self.step: 5d} (+{self.train_steps: 2d}), loss = {outputs[\"loss\"]:.4f}, lr = {outputs[\"lr\"]:.5f}')\n\n # dynamic train steps\n # max allowed train time per-frame is 500 ms\n full_t = t / self.train_steps * 16\n train_steps = min(16, max(4, int(16 * 500 / full_t)))\n if train_steps > self.train_steps * 1.2 or train_steps < self.train_steps * 0.8:\n self.train_steps = train_steps\n\n def prepare_buffer(self, outputs):\n if self.mode == 'image':\n return outputs['image']\n else:\n return np.expand_dims(outputs['depth'], -1).repeat(3, -1)\n\n def test_step(self):\n\n if self.need_update or self.spp < self.opt.max_spp:\n \n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n if self.playing:\n try:\n data = next(self.loader)\n except StopIteration:\n self.loader = iter(self.data_loader)\n data = next(self.loader)\n \n if self.opt.asr:\n # use the live audio stream\n data['auds'] = self.asr.get_next_feat()\n\n outputs = self.trainer.test_gui_with_data(data, self.W, self.H)\n\n # sync local camera pose\n self.cam.update_pose(data['poses_matrix'][0].detach().cpu().numpy())\n \n else:\n if self.audio_features is not None:\n auds = get_audio_features(self.audio_features, self.opt.att, self.audio_idx)\n else:\n auds = None\n outputs = self.trainer.test_gui(self.cam.pose, self.cam.intrinsics, self.W, self.H, auds, self.eye_area, self.ind_index, self.bg_color, self.spp, self.downscale)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n # update dynamic resolution\n if self.dynamic_resolution:\n # max allowed infer time per-frame is 200 ms\n full_t = t / (self.downscale ** 2)\n downscale = min(1, max(1/4, math.sqrt(200 / full_t)))\n if downscale > self.downscale * 1.2 or downscale < self.downscale * 0.8:\n self.downscale = downscale\n\n if self.need_update:\n self.render_buffer = self.prepare_buffer(outputs)\n self.spp = 1\n self.need_update = False\n else:\n self.render_buffer = (self.render_buffer * self.spp + self.prepare_buffer(outputs)) / (self.spp + 1)\n self.spp += 1\n \n if self.playing:\n self.need_update = True\n\n dpg.set_value(\"_log_infer_time\", f'{t:.4f}ms ({int(1000/t)} FPS)')\n dpg.set_value(\"_log_resolution\", f'{int(self.downscale * self.W)}x{int(self.downscale * self.H)}')\n dpg.set_value(\"_log_spp\", self.spp)\n dpg.set_value(\"_texture\", self.render_buffer)\n\n \n def register_dpg(self):\n\n ### register texture \n\n with dpg.texture_registry(show=False):\n dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag=\"_texture\")\n\n ### register window\n\n # the rendered image, as the primary window\n with dpg.window(tag=\"_primary_window\", width=self.W, height=self.H):\n\n # add the texture\n dpg.add_image(\"_texture\")\n\n # dpg.set_primary_window(\"_primary_window\", True)\n\n dpg.show_tool(dpg.mvTool_Metrics)\n\n # control window\n with dpg.window(label=\"Control\", tag=\"_control_window\", width=400, height=300):\n\n # button theme\n with dpg.theme() as theme_button:\n with dpg.theme_component(dpg.mvButton):\n dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))\n dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)\n\n # time\n if not self.opt.test:\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train time: \")\n dpg.add_text(\"no data\", tag=\"_log_train_time\") \n\n with dpg.group(horizontal=True):\n dpg.add_text(\"Infer time: \")\n dpg.add_text(\"no data\", tag=\"_log_infer_time\")\n \n with dpg.group(horizontal=True):\n dpg.add_text(\"SPP: \")\n dpg.add_text(\"1\", tag=\"_log_spp\")\n\n # train button\n if not self.opt.test:\n with dpg.collapsing_header(label=\"Train\", default_open=True):\n\n # train / stop\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train: \")\n\n def callback_train(sender, app_data):\n if self.training:\n self.training = False\n dpg.configure_item(\"_button_train\", label=\"start\")\n else:\n self.training = True\n dpg.configure_item(\"_button_train\", label=\"stop\")\n\n dpg.add_button(label=\"start\", tag=\"_button_train\", callback=callback_train)\n dpg.bind_item_theme(\"_button_train\", theme_button)\n\n def callback_reset(sender, app_data):\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n self.trainer.model.apply(fn=weight_reset)\n self.trainer.model.reset_extra_state() # for cuda_ray density_grid and step_counter\n self.need_update = True\n\n dpg.add_button(label=\"reset\", tag=\"_button_reset\", callback=callback_reset)\n dpg.bind_item_theme(\"_button_reset\", theme_button)\n\n # save ckpt\n with dpg.group(horizontal=True):\n dpg.add_text(\"Checkpoint: \")\n\n def callback_save(sender, app_data):\n self.trainer.save_checkpoint(full=True, best=False)\n dpg.set_value(\"_log_ckpt\", \"saved \" + os.path.basename(self.trainer.stats[\"checkpoints\"][-1]))\n self.trainer.epoch += 1 # use epoch to indicate different calls.\n\n dpg.add_button(label=\"save\", tag=\"_button_save\", callback=callback_save)\n dpg.bind_item_theme(\"_button_save\", theme_button)\n\n dpg.add_text(\"\", tag=\"_log_ckpt\")\n \n # save mesh\n with dpg.group(horizontal=True):\n dpg.add_text(\"Marching Cubes: \")\n\n def callback_mesh(sender, app_data):\n self.trainer.save_mesh(resolution=256, threshold=10)\n dpg.set_value(\"_log_mesh\", \"saved \" + f'{self.trainer.name}_{self.trainer.epoch}.ply')\n self.trainer.epoch += 1 # use epoch to indicate different calls.\n\n dpg.add_button(label=\"mesh\", tag=\"_button_mesh\", callback=callback_mesh)\n dpg.bind_item_theme(\"_button_mesh\", theme_button)\n\n dpg.add_text(\"\", tag=\"_log_mesh\")\n\n with dpg.group(horizontal=True):\n dpg.add_text(\"\", tag=\"_log_train_log\")\n\n \n # rendering options\n with dpg.collapsing_header(label=\"Options\", default_open=True):\n \n # playing\n with dpg.group(horizontal=True):\n dpg.add_text(\"Play: \")\n\n def callback_play(sender, app_data):\n \n if self.playing:\n self.playing = False\n dpg.configure_item(\"_button_play\", label=\"start\")\n else:\n self.playing = True\n dpg.configure_item(\"_button_play\", label=\"stop\")\n if self.opt.asr:\n self.asr.warm_up()\n self.need_update = True\n\n dpg.add_button(label=\"start\", tag=\"_button_play\", callback=callback_play)\n dpg.bind_item_theme(\"_button_play\", theme_button)\n\n # set asr\n if self.opt.asr:\n\n # clear queue button\n def callback_clear_queue(sender, app_data):\n \n self.asr.clear_queue()\n self.need_update = True\n\n dpg.add_button(label=\"clear\", tag=\"_button_clear_queue\", callback=callback_clear_queue)\n dpg.bind_item_theme(\"_button_clear_queue\", theme_button)\n\n # dynamic rendering resolution\n with dpg.group(horizontal=True):\n\n def callback_set_dynamic_resolution(sender, app_data):\n if self.dynamic_resolution:\n self.dynamic_resolution = False\n self.downscale = 1\n else:\n self.dynamic_resolution = True\n self.need_update = True\n\n # Disable dynamic resolution for face.\n # dpg.add_checkbox(label=\"dynamic resolution\", default_value=self.dynamic_resolution, callback=callback_set_dynamic_resolution)\n dpg.add_text(f\"{self.W}x{self.H}\", tag=\"_log_resolution\")\n\n # mode combo\n def callback_change_mode(sender, app_data):\n self.mode = app_data\n self.need_update = True\n \n dpg.add_combo(('image', 'depth'), label='mode', default_value=self.mode, callback=callback_change_mode)\n\n\n # bg_color picker\n def callback_change_bg(sender, app_data):\n self.bg_color = torch.tensor(app_data[:3], dtype=torch.float32) # only need RGB in [0, 1]\n self.need_update = True\n\n dpg.add_color_edit((255, 255, 255), label=\"Background Color\", width=200, tag=\"_color_editor\", no_alpha=True, callback=callback_change_bg)\n\n # audio index slider\n if not self.opt.asr:\n def callback_set_audio_index(sender, app_data):\n self.audio_idx = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"Audio\", min_value=0, max_value=self.audio_features.shape[0] - 1, format=\"%d\", default_value=self.audio_idx, callback=callback_set_audio_index)\n\n # ind code index slider\n if self.opt.ind_dim > 0:\n def callback_set_individual_code(sender, app_data):\n self.ind_index = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"Individual\", min_value=0, max_value=self.ind_num - 1, format=\"%d\", default_value=self.ind_index, callback=callback_set_individual_code)\n\n # eye area slider\n if self.opt.exp_eye:\n def callback_set_eye(sender, app_data):\n self.eye_area = app_data\n self.need_update = True\n\n dpg.add_slider_float(label=\"eye area\", min_value=0, max_value=0.5, format=\"%.2f percent\", default_value=self.eye_area, callback=callback_set_eye)\n\n # fov slider\n def callback_set_fovy(sender, app_data):\n self.cam.fovy = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"FoV (vertical)\", min_value=1, max_value=120, format=\"%d deg\", default_value=self.cam.fovy, callback=callback_set_fovy)\n\n # dt_gamma slider\n def callback_set_dt_gamma(sender, app_data):\n self.opt.dt_gamma = app_data\n self.need_update = True\n\n dpg.add_slider_float(label=\"dt_gamma\", min_value=0, max_value=0.1, format=\"%.5f\", default_value=self.opt.dt_gamma, callback=callback_set_dt_gamma)\n\n # max_steps slider\n def callback_set_max_steps(sender, app_data):\n self.opt.max_steps = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"max steps\", min_value=1, max_value=1024, format=\"%d\", default_value=self.opt.max_steps, callback=callback_set_max_steps)\n\n # aabb slider\n def callback_set_aabb(sender, app_data, user_data):\n # user_data is the dimension for aabb (xmin, ymin, zmin, xmax, ymax, zmax)\n self.trainer.model.aabb_infer[user_data] = app_data\n\n # also change train aabb ? [better not...]\n #self.trainer.model.aabb_train[user_data] = app_data\n\n self.need_update = True\n\n dpg.add_separator()\n dpg.add_text(\"Axis-aligned bounding box:\")\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"x\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=0)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=3)\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"y\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=1)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=4)\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"z\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=2)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=5)\n \n\n # debug info\n if self.debug:\n with dpg.collapsing_header(label=\"Debug\"):\n # pose\n dpg.add_separator()\n dpg.add_text(\"Camera Pose:\")\n dpg.add_text(str(self.cam.pose), tag=\"_log_pose\")\n\n\n ### register camera handler\n\n def callback_camera_drag_rotate(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.orbit(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_wheel_scale(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n delta = app_data\n\n self.cam.scale(delta)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_drag_pan(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.pan(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n with dpg.handler_registry():\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Left, callback=callback_camera_drag_rotate)\n dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale)\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Middle, callback=callback_camera_drag_pan)\n\n \n dpg.create_viewport(title='RAD-NeRF', width=1080, height=720, resizable=True)\n\n ### global theme\n with dpg.theme() as theme_no_padding:\n with dpg.theme_component(dpg.mvAll):\n # set all padding to 0 to avoid scroll bar\n dpg.add_theme_style(dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core)\n \n dpg.bind_item_theme(\"_primary_window\", theme_no_padding)\n\n dpg.setup_dearpygui()\n\n #dpg.show_metrics()\n\n dpg.show_viewport()\n\n\n def render(self):\n\n while dpg.is_dearpygui_running():\n # update texture every frame\n if self.training:\n self.train_step()\n # audio stream thread...\n if self.opt.asr and self.playing:\n # run 2 ASR steps (audio is at 50FPS, video is at 25FPS)\n for _ in range(2):\n self.asr.run_step()\n self.test_step()\n dpg.render_dearpygui_frame()" } ]
import torch import argparse from nerf.provider import NeRFDataset_Test from nerf.gui import NeRFGUI from nerf.utils import * from nerf.network import NeRFNetwork, R2TalkerNeRF, GeneNeRFNetwork
10,823
parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") # parser.add_argument('--preload', action='store_true', help="preload all data into GPU, accelerate training but use more GPU memory") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' # assert test mode opt.test = True opt.test_train = False # explicit smoothing opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True assert opt.pose != '', 'Must provide a pose source' assert opt.aud != '', 'Must provide an audio source' if opt.O: opt.fp16 = True opt.exp_eye = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # print(model) trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, fp16=opt.fp16, metrics=[], use_checkpoint=opt.ckpt) test_loader = NeRFDataset_Test(opt, device=device).dataloader() # temp fix: for update_extra_states model.aud_features = test_loader._data.auds model.eye_areas = test_loader._data.eye_area if opt.gui: # we still need test_loader to provide audio features for testing.
# torch.autograd.set_detect_anomaly(True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pose', type=str, help="transforms.json, pose source") parser.add_argument('--aud', type=str, default=None, help="aud.npy, audio source") parser.add_argument('--cond_type', type=str, default=None, help="type of driving condition: eo, ds, idexp") parser.add_argument('--method', type=str, default='r2talker', help="r2talker, genefaceDagger, rad-nerf") parser.add_argument('--bg_img', type=str, default='white', help="bg.jpg, background image source") parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye") # parser.add_argument('--test', action='store_true', help="test mode (load model and test dataset)") # parser.add_argument('--test_train', action='store_true', help="test mode (load model and train dataset)") parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use") parser.add_argument('--workspace', type=str, default='workspace') parser.add_argument('--seed', type=int, default=0) ### training options # parser.add_argument('--iters', type=int, default=200000, help="training iters") # parser.add_argument('--lr', type=float, default=5e-3, help="initial learning rate") # parser.add_argument('--lr_net', type=float, default=5e-4, help="initial learning rate") parser.add_argument('--ckpt', type=str, default='latest') parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step") parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)") parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)") ### network backbone options parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training") parser.add_argument('--lambda_amb', type=float, default=0.1, help="lambda for ambient loss") parser.add_argument('--fbg', action='store_true', help="frame-wise bg") parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes") parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") # parser.add_argument('--preload', action='store_true', help="preload all data into GPU, accelerate training but use more GPU memory") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' # assert test mode opt.test = True opt.test_train = False # explicit smoothing opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True assert opt.pose != '', 'Must provide a pose source' assert opt.aud != '', 'Must provide an audio source' if opt.O: opt.fp16 = True opt.exp_eye = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # print(model) trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, fp16=opt.fp16, metrics=[], use_checkpoint=opt.ckpt) test_loader = NeRFDataset_Test(opt, device=device).dataloader() # temp fix: for update_extra_states model.aud_features = test_loader._data.auds model.eye_areas = test_loader._data.eye_area if opt.gui: # we still need test_loader to provide audio features for testing.
with NeRFGUI(opt, trainer, test_loader) as gui:
1
2023-12-04 12:51:59+00:00
16k
ubc-vision/vivid123
vivid123/generation_utils.py
[ { "identifier": "CLIPCameraProjection", "path": "vivid123/models/clip_camera_projection.py", "snippet": "class CLIPCameraProjection(ModelMixin, ConfigMixin):\n \"\"\"\n A Projection layer for CLIP embedding and camera embedding.\n Parameters:\n embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `clip_embed`\n additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the\n projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings +\n additional_embeddings`.\n \"\"\"\n\n @register_to_config\n def __init__(self, embedding_dim: int = 768, additional_embeddings: int = 4):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.additional_embeddings = additional_embeddings\n\n self.input_dim = self.embedding_dim + self.additional_embeddings\n self.output_dim = self.embedding_dim\n\n self.proj = torch.nn.Linear(self.input_dim, self.output_dim)\n\n def forward(\n self,\n embedding: torch.FloatTensor,\n ):\n \"\"\"\n The [`PriorTransformer`] forward method.\n Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, input_dim)`):\n The currently input embeddings.\n Returns:\n The output embedding projection (`torch.FloatTensor` of shape `(batch_size, output_dim)`).\n \"\"\"\n proj_embedding = self.proj(embedding)\n return proj_embedding" }, { "identifier": "ViVid123Pipeline", "path": "vivid123/pipelines/vivid123_pipeline.py", "snippet": "class ViVid123Pipeline(TextToVideoSDPipeline):\n r\"\"\"\n Pipeline for text-to-video generation.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods\n implemented for all pipelines (downloading, saving, running on a particular device, etc.).\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).\n tokenizer (`CLIPTokenizer`):\n A [`~transformers.CLIPTokenizer`] to tokenize text.\n unet ([`UNet3DConditionModel`]):\n A [`UNet3DConditionModel`] to denoise the encoded video latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n novel_view_unet: UNet2DConditionModel,\n image_encoder: CLIPVisionModelWithProjection,\n cc_projection: CLIPCameraProjection,\n ):\n super().__init__(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)\n\n self.register_modules(\n novel_view_unet=novel_view_unet,\n image_encoder=image_encoder,\n cc_projection=cc_projection,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n\n self.image_processor = VaeImageProcessor(\n vae_scale_factor=self.vae_scale_factor,\n do_convert_rgb=True,\n do_normalize=True,\n )\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs\n def check_inputs(\n self,\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n num_inference_steps=50,\n fusion_schedule=None,\n ):\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n if prompt is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt is None and prompt_embeds is None:\n raise ValueError(\n \"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.\"\n )\n elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if negative_prompt is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n\n if prompt_embeds is not None and negative_prompt_embeds is not None:\n if prompt_embeds.shape != negative_prompt_embeds.shape:\n raise ValueError(\n \"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but\"\n f\" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`\"\n f\" {negative_prompt_embeds.shape}.\"\n )\n \n if fusion_schedule is None:\n raise ValueError(\n \"Fusion schedule is not provided.\"\n )\n \n if len(fusion_schedule[0]) != num_inference_steps or len(fusion_schedule[1]) != num_inference_steps:\n raise ValueError(\n \"Fusion schedule length does not match the number of timesteps.\"\n )\n \n def prepare_latents(\n self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None, noise_identical_accross_frames=False\n ):\n shape = (\n batch_size,\n num_channels_latents,\n num_frames if not noise_identical_accross_frames else 1,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n )\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n else:\n if latents.shape != shape:\n raise ValueError(\n f\"User-prepared `latents` must have shape {shape}, when noise_identical_accross_frames={noise_identical_accross_frames} but got {latents.shape}.\"\n )\n latents = latents.to(device)\n\n if noise_identical_accross_frames:\n latents = latents.repeat(1, 1, num_frames, 1, 1)\n \n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def prepare_img_latents(\n self, image, batch_size, dtype, device, generator=None, do_zero123_classifier_free_guidance=False\n ):\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n if isinstance(image, torch.Tensor):\n # Batch single image\n if image.ndim == 3:\n assert image.shape[0] == 3, \"Image outside a batch should be of shape (3, H, W)\"\n image = image.unsqueeze(0)\n\n assert image.ndim == 4, \"Image must have 4 dimensions\"\n\n # Check image is in [-1, 1]\n if image.min() < -1 or image.max() > 1:\n raise ValueError(\"Image should be in [-1, 1] range\")\n else:\n # preprocess image\n if isinstance(image, (PIL.Image.Image, np.ndarray)):\n image = [image]\n\n if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):\n image = [np.array(i.convert(\"RGB\"))[None, :] for i in image]\n image = np.concatenate(image, axis=0)\n elif isinstance(image, list) and isinstance(image[0], np.ndarray):\n image = np.concatenate([i[None, :] for i in image], axis=0)\n\n image = image.transpose(0, 3, 1, 2)\n image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0\n\n image = image.to(device=device, dtype=dtype)\n\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if isinstance(generator, list):\n init_latents = [\n self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i]) for i in range(batch_size) # sample\n ]\n init_latents = torch.cat(init_latents, dim=0)\n else:\n init_latents = self.vae.encode(image).latent_dist.mode()\n\n # init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor\n if batch_size > init_latents.shape[0]:\n # init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1)\n num_images_per_prompt = batch_size // init_latents.shape[0]\n # duplicate image latents for each generation per prompt, using mps friendly method\n bs_embed, emb_c, emb_h, emb_w = init_latents.shape\n init_latents = init_latents.unsqueeze(1)\n init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1)\n init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w)\n\n # init_latents = torch.cat([init_latents]*2) if do_zero123_classifier_free_guidance else init_latents # follow zero123\n init_latents = (\n torch.cat([torch.zeros_like(init_latents), init_latents])\n if do_zero123_classifier_free_guidance\n else init_latents\n )\n\n init_latents = init_latents.to(device=device, dtype=dtype)\n return init_latents\n\n def CLIP_preprocess(self, x):\n dtype = x.dtype\n # following openai's implementation\n # TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741\n # follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608\n if isinstance(x, torch.Tensor):\n if x.min() < -1.0 or x.max() > 1.0:\n raise ValueError(\"Expected input tensor to have values in the range [-1, 1]\")\n x = kornia.geometry.resize(\n x.to(torch.float32), (224, 224), interpolation=\"bicubic\", align_corners=True, antialias=False\n ).to(dtype=dtype)\n x = (x + 1.0) / 2.0\n # renormalize according to clip\n x = kornia.enhance.normalize(\n x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711])\n )\n return x\n\n # from stable_diffusion_image_variation\n def _encode_image(self, image, device, num_images_per_prompt, do_video_classifier_free_guidance):\n dtype = next(self.image_encoder.parameters()).dtype\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n if isinstance(image, torch.Tensor):\n # Batch single image\n if image.ndim == 3:\n assert image.shape[0] == 3, \"Image outside a batch should be of shape (3, H, W)\"\n image = image.unsqueeze(0)\n\n assert image.ndim == 4, \"Image must have 4 dimensions\"\n\n # Check image is in [-1, 1]\n if image.min() < -1 or image.max() > 1:\n raise ValueError(\"Image should be in [-1, 1] range\")\n else:\n # preprocess image\n if isinstance(image, (PIL.Image.Image, np.ndarray)):\n image = [image]\n\n if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):\n image = [np.array(i.convert(\"RGB\"))[None, :] for i in image]\n image = np.concatenate(image, axis=0)\n elif isinstance(image, list) and isinstance(image[0], np.ndarray):\n image = np.concatenate([i[None, :] for i in image], axis=0)\n\n image = image.transpose(0, 3, 1, 2)\n image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0\n\n image = image.to(device=device, dtype=dtype)\n\n image = self.CLIP_preprocess(image)\n # if not isinstance(image, torch.Tensor):\n # # 0-255\n # print(\"Warning: image is processed by hf's preprocess, which is different from openai original's.\")\n # image = self.feature_extractor(images=image, return_tensors=\"pt\").pixel_values\n image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype)\n image_embeddings = image_embeddings.unsqueeze(1)\n\n # duplicate image embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = image_embeddings.shape\n image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)\n image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n if do_video_classifier_free_guidance:\n negative_prompt_embeds = torch.zeros_like(image_embeddings)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])\n\n return image_embeddings\n\n def _encode_pose(self, pose, device, num_images_per_prompt, do_video_classifier_free_guidance):\n dtype = next(self.cc_projection.parameters()).dtype\n if isinstance(pose, torch.Tensor):\n pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype)\n else:\n if isinstance(pose[0], list):\n pose = torch.Tensor(pose)\n else:\n pose = torch.Tensor([pose])\n x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1)\n pose_embeddings = (\n torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1)\n .unsqueeze(1)\n .to(device=device, dtype=dtype)\n ) # B, 1, 4\n # duplicate pose embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = pose_embeddings.shape\n pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1)\n pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n if do_video_classifier_free_guidance:\n negative_prompt_embeds = torch.zeros_like(pose_embeddings)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings])\n return pose_embeddings\n\n def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_video_classifier_free_guidance):\n img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False)\n pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False)\n prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1)\n prompt_embeds = self.cc_projection(prompt_embeds)\n # prompt_embeds = img_prompt_embeds\n # follow 0123, add negative prompt, after projection\n if do_video_classifier_free_guidance:\n negative_prompt = torch.zeros_like(prompt_embeds)\n prompt_embeds = torch.cat([negative_prompt, prompt_embeds])\n return prompt_embeds\n\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_frames: int = 16,\n num_inference_steps: int = 50,\n guidance_scale_video: float = 9.0,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"np\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n # vivid123 params below\n image: Optional[\n Union[\n torch.FloatTensor,\n PIL.Image.Image,\n np.ndarray,\n List[torch.FloatTensor],\n List[PIL.Image.Image],\n List[np.ndarray],\n ]\n ] = None,\n cam_pose_torch: Optional[torch.FloatTensor] = None,\n fusion_schedule: Optional[tuple[float]] = None,\n ddim_eta_0123: float = 1.0,\n guidance_scale_zero123: float = 3.0,\n noise_identical_accross_frames: bool = False,\n ):\n r\"\"\"\n The call function to the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.\n height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The height in pixels of the generated video.\n width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The width in pixels of the generated video.\n num_frames (`int`, *optional*, defaults to 16):\n The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds\n amounts to 2 seconds of video.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality videos at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n A higher guidance scale value encourages the model to generate images closely linked to the text\n `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide what to not include in image generation. If not defined, you need to\n pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies\n to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make\n generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor is generated by sampling using the supplied random `generator`. Latents should be of shape\n `(batch_size, num_channel, num_frames, height, width)`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not\n provided, text embeddings are generated from the `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If\n not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.\n output_type (`str`, *optional*, defaults to `\"np\"`):\n The output format of the generated video. Choose between `torch.FloatTensor` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead\n of a plain tuple.\n callback (`Callable`, *optional*):\n A function that calls every `callback_steps` steps during inference. The function is called with the\n following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function is called. If not specified, the callback is called at\n every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in\n [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n guidance_rescale (`float`, *optional*, defaults to 0.0):\n Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are\n Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when\n using zero terminal SNR.\n guidance_scale_zero123 (`float`, *optional*, defaults to 3.0):\n A higher guidance scale value encourages the model to generate images closely linked to the text\n `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.\n cam_pose_torch: (`torch.FloatTensor`, *optional*):\n Camera pose in torch tensor, shape (4,). The elements mean (el, sin(az), cos(az), radius)\n fusion_schedule (`tuple[float]`, *optional*):\n Fusion schedule for video diffusion and zero123. The first element is the schedule for video diffusion, and the\n second element is the schedule for zero123. The length of each schedule should be the same as the number\n of timesteps.\n ddim_eta_0123 (`float`, *optional*, defaults to 1.0):\n The eta value for the 0123 diffusion steps. Only applies to the [`~schedulers.DDIMScheduler`], and is\n ignored in other schedulers.\n \n Example:\n \n\n Returns:\n [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`:\n If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is\n returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n num_videos_per_image_prompt = 1\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n num_inference_steps,\n fusion_schedule\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_video_classifier_free_guidance = guidance_scale_video > 1.0\n do_zero123_classifier_free_guidance = guidance_scale_zero123 > 1.0\n\n # 3.1 Encode input prompt for video diffusion\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\n prompt=prompt,\n device=device,\n # by diffusers v0.23.1, the naming of diffusers.pipelines.TextToVideoSDPipeline is still \"num_images_per_prompt\",\n # where it should be \"num_videos_per_prompt\"\n num_images_per_prompt=num_videos_per_image_prompt,\n do_classifier_free_guidance=do_video_classifier_free_guidance,\n negative_prompt=negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n if do_video_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n # 3.2 Encode input image for zero123\n zero123_cond_images = [image for _ in range(num_frames)]\n zero123_embeds = self._encode_image_with_pose(\n zero123_cond_images,\n cam_pose_torch,\n device,\n num_videos_per_image_prompt,\n do_zero123_classifier_free_guidance,\n ) # (2xF) x 1 x 768\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_image_prompt,\n num_channels_latents,\n num_frames,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n noise_identical_accross_frames,\n )\n\n # 6. Prepare Zero123 image latents\n img_latents = self.prepare_img_latents(\n zero123_cond_images,\n batch_size=num_frames,\n dtype=zero123_embeds.dtype,\n device=device,\n generator=generator,\n do_zero123_classifier_free_guidance=True,\n )\n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_video_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual with video diffusion\n noise_pred_video = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # perform classifier-free guidance for video diffusion\n if do_video_classifier_free_guidance:\n noise_pred_video_uncond, noise_pred_video_text = noise_pred_video.chunk(2)\n noise_pred_video = noise_pred_video_uncond + guidance_scale_video * (\n noise_pred_video_text - noise_pred_video_uncond\n )\n # if do_video_classifier_free_guidance and guidance_rescale > 0.0:\n # # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n # noise_pred_video = rescale_noise_cfg(\n # noise_pred_video, noise_pred_video_text, guidance_rescale=guidance_rescale\n # )\n\n # zero123 denoising\n latent_model_input_zero123 = torch.cat([latents] * 2) if do_zero123_classifier_free_guidance else latents\n augmented_latent_model_input_zero123 = torch.cat(\n [rearrange(latent_model_input_zero123, \"B C F H W -> (B F) C H W\"), img_latents],\n dim=1,\n ).to(self.novel_view_unet.dtype)\n noise_pred_zero123 = self.novel_view_unet(\n augmented_latent_model_input_zero123,\n t,\n encoder_hidden_states=zero123_embeds,\n return_dict=True,\n ).sample\n noise_pred_zero123 = rearrange(noise_pred_zero123, \"(B F) C H W -> B C F H W\", F=num_frames)\n\n if do_zero123_classifier_free_guidance:\n noise_pred_zero123_uncond, noise_pred_zero123_text = noise_pred_zero123.chunk(2)\n noise_pred_zero123 = noise_pred_zero123_uncond + guidance_scale_zero123 * (\n noise_pred_zero123_text - noise_pred_zero123_uncond\n )\n\n # fusing video diffusion with zero123\n noise_pred = fusion_schedule[0][i] * noise_pred_video + fusion_schedule[1][i] * noise_pred_zero123\n\n # reshape latents\n bsz, channel, frames, width, height = latents.shape\n latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # reshape latents back\n latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4)\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if output_type == \"latent\":\n return TextToVideoSDPipelineOutput(frames=latents)\n\n video_tensor = self.decode_latents(latents)\n\n if output_type == \"pt\":\n video = video_tensor\n else:\n video = tensor2vid(video_tensor)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (video,)\n\n return TextToVideoSDPipelineOutput(frames=video)" }, { "identifier": "ViVid123BaseSchema", "path": "vivid123/configs/base_schema.py", "snippet": "class ViVid123BaseSchema(BaseModel):\n # Disable aliasing underscore to hyphen\n class Config:\n alias_generator = lambda string: string\n\n num_frames: int = 25\n delta_elevation_start: float = 0.0\n delta_elevation_end: float = 0.0\n delta_azimuth_start: float = -45.0\n delta_azimuth_end: float = 45.0\n delta_radius_start: float = 0.0\n delta_radius_end: float = 0.0\n height: int = 256\n width: int = 256\n # num_videos_per_image_prompt: int = 1 # Only support 1 for running on < 24G memory GPU\n num_inference_steps: int = 50\n guidance_scale_zero123: float = 3.0\n guidance_scale_video: float = 1.0\n eta: float = 1.0\n noise_identical_accross_frames: bool = False\n prompt: str = \"\"\n\n video_linear_start_weight: float = 1.0\n video_linear_end_weight: float = 0.5\n video_start_step_percentage: float = 0.0\n video_end_step_percentage: float = 1.0\n zero123_linear_start_weight: float = 1.0\n zero123_linear_end_weight: float = 1.0\n zero123_start_step_percentage: float = 0.0\n zero123_end_step_percentage: float = 1.0\n\n refiner_strength: float = 0.3\n refiner_guidance_scale: float = 12.0\n\n name: str = \"new_balance_used\"\n input_image_path: str = \"tmp/new_balance_used/012.png\"" } ]
import os import yaml import re import torch import numpy as np import imageio.v3 as imageio from typing import List, Any from yaml.parser import ParserError from PIL import Image from diffusers.pipelines import DiffusionPipeline from diffusers.models import UNet2DConditionModel, AutoencoderKL from diffusers.schedulers import DPMSolverMultistepScheduler, EulerDiscreteScheduler from diffusers.pipelines import DiffusionPipeline from transformers import CLIPVisionModelWithProjection from .models import CLIPCameraProjection from .pipelines import ViVid123Pipeline from .configs import ViVid123BaseSchema
11,310
assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet") zero123_cam_proj = CLIPCameraProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="clip_camera_projection") zero123_img_enc = CLIPVisionModelWithProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="image_encoder") vivid123_pipe = ViVid123Pipeline.from_pretrained( VIDEO_MODEL_ID, # torch_dtype=torch.float16, novel_view_unet=zero123_unet, image_encoder=zero123_img_enc, cc_projection=zero123_cam_proj, ) vivid123_pipe.scheduler = DPMSolverMultistepScheduler.from_config(vivid123_pipe.scheduler.config) # vivid123_pipe.to("cuda") vivid123_pipe.enable_model_cpu_offload() xl_pipe = DiffusionPipeline.from_pretrained(VIDEO_XL_MODEL_ID, torch_dtype=torch.float16) xl_pipe.scheduler = DPMSolverMultistepScheduler.from_config(xl_pipe.scheduler.config) # xl_pipe.to("cuda") xl_pipe.enable_model_cpu_offload() return vivid123_pipe, xl_pipe def generation_vivid123( vivid123_pipe: ViVid123Pipeline, xl_pipe: DiffusionPipeline, config_path: str, output_root_dir: str, ): # loading yaml config _var_matcher = re.compile(r"\${([^}^{]+)}") _tag_matcher = re.compile(r"[^$]*\${([^}^{]+)}.*") def _path_constructor(_loader: Any, node: Any): def replace_fn(match): envparts = f"{match.group(1)}:".split(":") return os.environ.get(envparts[0], envparts[1]) return _var_matcher.sub(replace_fn, node.value) def load_yaml(filename: str) -> dict: yaml.add_implicit_resolver("!envvar", _tag_matcher, None, yaml.SafeLoader) yaml.add_constructor("!envvar", _path_constructor, yaml.SafeLoader) try: with open(filename, "r") as f: return yaml.safe_load(f.read()) except (FileNotFoundError, PermissionError, ParserError): return dict() yaml_loaded = load_yaml(config_path)
def prepare_cam_pose_input( num_frames: int = 25, delta_elevation_start: float = 0.0, delta_elevation_end: float = 0.0, delta_azimuth_start: float = -45.0, delta_azimuth_end: float = 45.0, delta_radius_start: float = 0.0, delta_radius_end: float = 0.0, ): r""" The function to prepare the input to the vivid123 pipeline Args: delta_elevation_start (`float`, *optional*, defaults to 0.0): The starting relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_elevation_end (`float`, *optional*, defaults to 0.0): The ending relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_start (`float`, *optional*, defaults to -45.0): The starting relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_end (`float`, *optional*, defaults to 45.0): The ending relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. Returns: """ cam_elevation = np.radians(np.linspace(delta_elevation_start, delta_elevation_end, num_frames))[..., None] cam_azimuth = np.radians(np.linspace(delta_azimuth_start, delta_azimuth_end, num_frames)) cam_azimuth_sin_cos = np.stack([np.sin(cam_azimuth), np.cos(cam_azimuth)], axis=-1) cam_radius = np.linspace(delta_radius_start, delta_radius_end, num_frames)[..., None] cam_pose_np = np.concatenate([cam_elevation, cam_azimuth_sin_cos, cam_radius], axis=-1) cam_pose_torch = torch.from_numpy(cam_pose_np) return cam_pose_torch # refer to https://stackoverflow.com/a/33507138/6257375 def conver_rgba_to_rgb_white_bg( image: Image, H: int = 256, W: int = 256, ): input_image = image.convert("RGBA").resize((H, W), Image.BICUBIC) background = Image.new("RGBA", input_image.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, input_image) return alpha_composite def prepare_fusion_schedule_linear( num_inference_steps: int = 50, video_linear_start_weight: float = 1.0, video_linear_end_weight: float = 0.5, video_start_step_percentage: float = 0.0, video_end_step_percentage: float = 1.0, zero123_linear_start_weight: float = 1.0, zero123_linear_end_weight: float = 1.0, zero123_start_step_percentage: float = 0.0, zero123_end_step_percentage: float = 1.0, ): """ Prepare the fusion schedule of video diffusion and zero123 at all the denoising steps Args: video_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the video diffusion at the start of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_linear_end_weight (`float`, *optional*, defaults to 0.5): The weight of the video diffusion at the end of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the video diffusion starts. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. video_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the video diffusion ends. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. zero123_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the start of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_linear_end_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the end of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the zero123 diffusion starts. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. zero123_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the zero123 diffusion ends. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. Return: A tuple of two tensors, video_schedule (`torch.Tensor`): The schedule of the video diffusion weighting, with shape `[num_inference_steps]`. zero123_schedule (`torch.Tensor`): The schedule of the zero123 diffusion weighting, with shape `[num_inference_steps]`. """ assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet") zero123_cam_proj = CLIPCameraProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="clip_camera_projection") zero123_img_enc = CLIPVisionModelWithProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="image_encoder") vivid123_pipe = ViVid123Pipeline.from_pretrained( VIDEO_MODEL_ID, # torch_dtype=torch.float16, novel_view_unet=zero123_unet, image_encoder=zero123_img_enc, cc_projection=zero123_cam_proj, ) vivid123_pipe.scheduler = DPMSolverMultistepScheduler.from_config(vivid123_pipe.scheduler.config) # vivid123_pipe.to("cuda") vivid123_pipe.enable_model_cpu_offload() xl_pipe = DiffusionPipeline.from_pretrained(VIDEO_XL_MODEL_ID, torch_dtype=torch.float16) xl_pipe.scheduler = DPMSolverMultistepScheduler.from_config(xl_pipe.scheduler.config) # xl_pipe.to("cuda") xl_pipe.enable_model_cpu_offload() return vivid123_pipe, xl_pipe def generation_vivid123( vivid123_pipe: ViVid123Pipeline, xl_pipe: DiffusionPipeline, config_path: str, output_root_dir: str, ): # loading yaml config _var_matcher = re.compile(r"\${([^}^{]+)}") _tag_matcher = re.compile(r"[^$]*\${([^}^{]+)}.*") def _path_constructor(_loader: Any, node: Any): def replace_fn(match): envparts = f"{match.group(1)}:".split(":") return os.environ.get(envparts[0], envparts[1]) return _var_matcher.sub(replace_fn, node.value) def load_yaml(filename: str) -> dict: yaml.add_implicit_resolver("!envvar", _tag_matcher, None, yaml.SafeLoader) yaml.add_constructor("!envvar", _path_constructor, yaml.SafeLoader) try: with open(filename, "r") as f: return yaml.safe_load(f.read()) except (FileNotFoundError, PermissionError, ParserError): return dict() yaml_loaded = load_yaml(config_path)
cfg = ViVid123BaseSchema.model_validate(yaml_loaded)
2
2023-11-27 22:48:17+00:00
16k
TISUnion/PrimeBackup
prime_backup/action/import_backup_action.py
[ { "identifier": "CreateBackupActionBase", "path": "prime_backup/action/create_backup_action_base.py", "snippet": "class CreateBackupActionBase(Action[BackupInfo], ABC):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.__new_blobs: List[BlobInfo] = []\n\t\tself.__new_blobs_summary: Optional[BlobListSummary] = None\n\t\tself.__blobs_rollbackers: List[Callable] = []\n\n\tdef _remove_file(self, file_to_remove: Path):\n\t\ttry:\n\t\t\tfile_to_remove.unlink(missing_ok=True)\n\t\texcept OSError as e:\n\t\t\tself.logger.error('(rollback) remove file {!r} failed: {}'.format(file_to_remove, e))\n\n\tdef _add_remove_file_rollbacker(self, file_to_remove: Path):\n\t\tself.__blobs_rollbackers.append(functools.partial(self._remove_file, file_to_remove=file_to_remove))\n\n\tdef _apply_blob_rollback(self):\n\t\tif len(self.__blobs_rollbackers) > 0:\n\t\t\tself.__blobs_rollbackers.clear()\n\t\t\tself.logger.warning('Error occurs during import, applying rollback')\n\t\t\tfor rollback_func in self.__blobs_rollbackers:\n\t\t\t\trollback_func()\n\n\tdef _create_blob(self, session: DbSession, **kwargs) -> schema.Blob:\n\t\tblob = session.create_blob(**kwargs)\n\t\tself.__new_blobs.append(BlobInfo.of(blob))\n\t\treturn blob\n\n\tdef get_new_blobs_summary(self) -> BlobListSummary:\n\t\tif self.__new_blobs_summary is None:\n\t\t\tself.__new_blobs_summary = BlobListSummary.of(self.__new_blobs)\n\t\treturn self.__new_blobs_summary\n\n\t@classmethod\n\tdef _finalize_backup_and_files(cls, session: DbSession, backup: schema.Backup, files: List[schema.File]):\n\t\t# flush to generate the backup id\n\t\tsession.flush()\n\n\t\tfile_raw_size_sum = 0\n\t\tfile_stored_size_sum = 0\n\n\t\tfor file in files:\n\t\t\tfile.backup_id = backup.id\n\t\t\tif file.blob_raw_size is not None:\n\t\t\t\tfile_raw_size_sum += file.blob_raw_size\n\t\t\tif file.blob_stored_size is not None:\n\t\t\t\tfile_stored_size_sum += file.blob_stored_size\n\t\t\tsession.add(file)\n\n\t\tbackup.file_raw_size_sum = file_raw_size_sum\n\t\tbackup.file_stored_size_sum = file_stored_size_sum\n\n\tdef run(self) -> None:\n\t\tself.__new_blobs.clear()\n\t\tself.__new_blobs_summary = None\n\t\tself.__blobs_rollbackers.clear()" }, { "identifier": "Compressor", "path": "prime_backup/compressors.py", "snippet": "class Compressor(ABC):\n\tclass CopyCompressResult(NamedTuple):\n\t\tread_size: int\n\t\tread_hash: str\n\t\twrite_size: int\n\n\t@classmethod\n\tdef create(cls, method: Union[str, 'CompressMethod']) -> 'Compressor':\n\t\tif not isinstance(method, CompressMethod):\n\t\t\tif method in CompressMethod.__members__:\n\t\t\t\tmethod = CompressMethod[method]\n\t\t\telse:\n\t\t\t\traise ValueError(f'Unknown compression method: {method}')\n\t\treturn method.value()\n\n\t@classmethod\n\tdef get_method(cls) -> 'CompressMethod':\n\t\treturn CompressMethod(cls)\n\n\t@classmethod\n\tdef get_name(cls) -> str:\n\t\treturn cls.get_method().name\n\n\t@classmethod\n\t@abstractmethod\n\tdef ensure_lib(cls):\n\t\t...\n\n\tdef copy_compressed(self, source_path: PathLike, dest_path: PathLike, *, calc_hash: bool = False) -> CopyCompressResult:\n\t\t\"\"\"\n\t\tsource --[compress]--> destination\n\t\t\"\"\"\n\t\twith open(source_path, 'rb') as f_in, open(dest_path, 'wb') as f_out:\n\t\t\treader = BypassReader(f_in, calc_hash=calc_hash)\n\t\t\twriter = BypassWriter(f_out)\n\t\t\tself._copy_compressed(reader, writer)\n\t\t\treturn self.CopyCompressResult(reader.get_read_len(), reader.get_hash(), writer.get_write_len())\n\n\tdef copy_decompressed(self, source_path: PathLike, dest_path: PathLike):\n\t\t\"\"\"\n\t\tsource --[decompress]--> destination\n\t\t\"\"\"\n\t\twith open(source_path, 'rb') as f_in, open(dest_path, 'wb') as f_out:\n\t\t\tself._copy_decompressed(f_in, f_out)\n\n\[email protected]\n\tdef open_compressed(self, target_path: PathLike) -> ContextManager[BinaryIO]:\n\t\t\"\"\"\n\t\t(writer) --[compress]--> target_path\n\t\t\"\"\"\n\t\twith open(target_path, 'wb') as f:\n\t\t\twith self.compress_stream(f) as f_compressed:\n\t\t\t\tyield f_compressed\n\n\[email protected]\n\tdef open_compressed_bypassed(self, target_path: PathLike) -> ContextManager[Tuple[BypassWriter, BinaryIO]]:\n\t\t\"\"\"\n\t\t(writer) --[compress]--> target_path\n\t\t ^- bypassed\n\t\t\"\"\"\n\t\twith open(target_path, 'wb') as f:\n\t\t\twriter = BypassWriter(f)\n\t\t\twith self.compress_stream(writer) as f_compressed:\n\t\t\t\tyield writer, f_compressed\n\n\[email protected]\n\tdef open_decompressed(self, source_path: PathLike) -> ContextManager[BinaryIO]:\n\t\t\"\"\"\n\t\tsource_path --[decompress]--> (reader)\n\t\t\"\"\"\n\t\twith open(source_path, 'rb') as f:\n\t\t\twith self.decompress_stream(f) as f_decompressed:\n\t\t\t\tyield f_decompressed\n\n\[email protected]\n\tdef open_decompressed_bypassed(self, source_path: PathLike) -> ContextManager[Tuple[BypassReader, BinaryIO]]:\n\t\t\"\"\"\n\t\tsource_path --[decompress]--> (reader)\n\t\t ^- bypassed\n\t\t\"\"\"\n\t\twith open(source_path, 'rb') as f:\n\t\t\treader = BypassReader(f, calc_hash=False) # it's meaningless to calc hash on the compressed file\n\t\t\twith self.decompress_stream(reader) as f_decompressed:\n\t\t\t\tyield reader, f_decompressed\n\n\tdef _copy_compressed(self, f_in: BinaryIO, f_out: BinaryIO):\n\t\t\"\"\"\n\t\t(f_in) --[compress]--> (f_out)\n\t\t\"\"\"\n\t\twith self.compress_stream(f_out) as compressed_out:\n\t\t\tshutil.copyfileobj(f_in, compressed_out)\n\n\tdef _copy_decompressed(self, f_in: BinaryIO, f_out: BinaryIO):\n\t\t\"\"\"\n\t\t(f_in) --[decompress]--> (f_out)\n\t\t\"\"\"\n\t\twith self.decompress_stream(f_in) as compressed_in:\n\t\t\tshutil.copyfileobj(compressed_in, f_out)\n\n\t@abstractmethod\n\tdef compress_stream(self, f_out: BinaryIO) -> ContextManager[BinaryIO]:\n\t\t\"\"\"\n\t\tOpen a stream from compressing write\n\t\t\"\"\"\n\t\t...\n\n\t@abstractmethod\n\tdef decompress_stream(self, f_in: BinaryIO) -> ContextManager[BinaryIO]:\n\t\t\"\"\"\n\t\tOpen a stream from decompressing read\n\t\t\"\"\"\n\t\t..." }, { "identifier": "CompressMethod", "path": "prime_backup/compressors.py", "snippet": "class CompressMethod(enum.Enum):\n\tplain = PlainCompressor\n\tgzip = GzipCompressor\n\tlzma = LzmaCompressor\n\tzstd = ZstdCompressor\n\tlz4 = Lz4Compressor" }, { "identifier": "Config", "path": "prime_backup/config/config.py", "snippet": "class Config(Serializable):\n\tenabled: bool = False\n\tdebug: bool = False\n\tstorage_root: str = './pb_files'\n\tconcurrency: int = 1\n\n\tcommand: CommandConfig = CommandConfig()\n\tserver: ServerConfig = ServerConfig()\n\tbackup: BackupConfig = BackupConfig()\n\tscheduled_backup: ScheduledBackupConfig = ScheduledBackupConfig()\n\tprune: PruneConfig = PruneConfig()\n\tdatabase: DatabaseConfig = DatabaseConfig()\n\n\t@classmethod\n\[email protected]_cache\n\tdef __get_default(cls) -> 'Config':\n\t\treturn Config.get_default()\n\n\t@classmethod\n\tdef get(cls) -> 'Config':\n\t\tif _config is None:\n\t\t\treturn cls.__get_default()\n\t\treturn _config\n\n\tdef get_effective_concurrency(self) -> int:\n\t\tif self.concurrency == 0:\n\t\t\timport multiprocessing\n\t\t\treturn max(1, int(multiprocessing.cpu_count() * 0.5))\n\t\telse:\n\t\t\treturn max(1, self.concurrency)\n\n\t@property\n\tdef storage_path(self) -> Path:\n\t\treturn Path(self.storage_root)\n\n\t@property\n\tdef blobs_path(self) -> Path:\n\t\treturn self.storage_path / 'blobs'\n\n\t@property\n\tdef temp_path(self) -> Path:\n\t\treturn self.storage_path / 'temp'\n\n\t@property\n\tdef source_path(self) -> Path:\n\t\treturn Path(self.backup.source_root)" }, { "identifier": "BACKUP_META_FILE_NAME", "path": "prime_backup/constants.py", "snippet": "BACKUP_META_FILE_NAME = '.prime_backup.meta.json'" }, { "identifier": "schema", "path": "prime_backup/db/schema.py", "snippet": "class Base(DeclarativeBase):\nclass DbMeta(Base):\nclass Blob(Base):\nclass File(Base):\nclass Backup(Base):\n\tdef __repr__(self) -> str:" }, { "identifier": "DbAccess", "path": "prime_backup/db/access.py", "snippet": "class DbAccess:\n\t__engine: Optional[Engine] = None\n\t__db_file_path: Optional[Path] = None\n\n\t__hash_method: Optional[HashMethod] = None\n\n\t@classmethod\n\tdef init(cls, auto_migrate: bool = True):\n\t\t\"\"\"\n\t\t:param auto_migrate:\n\t\t\tTrue: check db meta, try to migrate;\n\t\t\tFalse: check db version only\n\t\t\"\"\"\n\t\tdb_dir = Config.get().storage_path\n\t\tdb_dir.mkdir(parents=True, exist_ok=True)\n\n\t\tdb_path = db_dir / db_constants.DB_FILE_NAME\n\t\tcls.__engine = create_engine('sqlite:///' + str(db_path))\n\t\tcls.__db_file_path = db_path\n\n\t\tmigration = DbMigration(cls.__engine)\n\t\tif auto_migrate:\n\t\t\tmigration.check_and_migrate()\n\t\telse:\n\t\t\tmigration.ensure_version()\n\n\t\tcls.sync_hash_method()\n\n\t@classmethod\n\tdef shutdown(cls):\n\t\tif (engine := cls.__engine) is not None:\n\t\t\tengine.dispose()\n\t\t\tcls.__engine = None\n\n\t@classmethod\n\tdef sync_hash_method(cls):\n\t\twith cls.open_session() as session:\n\t\t\thash_method_str = str(session.get_db_meta().hash_method)\n\t\ttry:\n\t\t\tcls.__hash_method = HashMethod[hash_method_str]\n\t\texcept KeyError:\n\t\t\traise ValueError('invalid hash method {!r} in db meta'.format(hash_method_str)) from None\n\n\t@classmethod\n\tdef __ensure_engine(cls) -> Engine:\n\t\tif cls.__engine is None:\n\t\t\traise RuntimeError('engine unavailable')\n\t\treturn cls.__engine\n\n\t@classmethod\n\tdef __ensure_not_none(cls, value):\n\t\tif value is None:\n\t\t\traise RuntimeError('db not is not initialized yet')\n\t\treturn value\n\n\t@classmethod\n\tdef get_db_file_path(cls) -> Path:\n\t\treturn cls.__ensure_not_none(cls.__db_file_path)\n\n\t@classmethod\n\tdef get_hash_method(cls) -> HashMethod:\n\t\treturn cls.__ensure_not_none(cls.__hash_method)\n\n\t@classmethod\n\[email protected]\n\tdef open_session(cls) -> ContextManager['DbSession']:\n\t\twith Session(cls.__ensure_engine()) as session, session.begin():\n\t\t\tyield DbSession(session, cls.__db_file_path)\n\n\t@classmethod\n\[email protected]\n\tdef enable_echo(cls) -> ContextManager[None]:\n\t\tengine = cls.__ensure_engine()\n\t\tengine.echo = True\n\t\ttry:\n\t\t\tyield\n\t\tfinally:\n\t\t\tengine.echo = False" }, { "identifier": "DbSession", "path": "prime_backup/db/session.py", "snippet": "class DbSession:\n\tdef __init__(self, session: Session, db_path: Path = None):\n\t\tself.session = session\n\t\tself.db_path = db_path\n\n\t\t# the limit in old sqlite (https://www.sqlite.org/limits.html#max_variable_number)\n\t\tself.__safe_var_limit = 999 - 20\n\n\t@classmethod\n\tdef __check_support(cls, check_func: Callable[[], bool], msg: str):\n\t\tif not (is_supported := check_func()):\n\t\t\tfrom prime_backup import logger\n\t\t\timport sqlite3\n\t\t\tlogger.get().warning(f'WARN: {msg}. SQLite version: {sqlite3.sqlite_version}')\n\t\treturn is_supported\n\n\t@classmethod\n\[email protected]_cache\n\tdef __supports_json_query(cls) -> bool:\n\t\treturn cls.__check_support(db_utils.check_sqlite_json_query_support, 'SQLite backend does not support json query. Inefficient manual query is used as the fallback')\n\n\t@classmethod\n\[email protected]_cache\n\tdef __supports_vacuum_into(cls) -> bool:\n\t\treturn cls.__check_support(db_utils.check_sqlite_vacuum_into_support, 'SQLite backend does not support VACUUM INTO statement. Insecure manual file copy is used as the fallback')\n\n\t# ========================= General Database Operations =========================\n\n\tdef add(self, obj: schema.Base):\n\t\tself.session.add(obj)\n\n\tdef expunge(self, obj: schema.Base):\n\t\tself.session.expunge(obj)\n\n\tdef expunge_all(self):\n\t\tself.session.expunge_all()\n\n\tdef flush(self):\n\t\tself.session.flush()\n\n\tdef flush_and_expunge_all(self):\n\t\tself.flush()\n\t\tself.expunge_all()\n\n\tdef commit(self):\n\t\tself.session.commit()\n\n\[email protected]\n\tdef no_auto_flush(self) -> ContextManager[None]:\n\t\twith self.session.no_autoflush:\n\t\t\tyield\n\n\tdef vacuum(self, into_file: Optional[str] = None, allow_vacuum_into_fallback: bool = True):\n\t\t# https://www.sqlite.org/lang_vacuum.html\n\t\tif into_file is not None:\n\t\t\tif self.__supports_vacuum_into():\n\t\t\t\tself.session.execute(text('VACUUM INTO :into_file').bindparams(into_file=str(into_file)))\n\t\t\telif allow_vacuum_into_fallback:\n\t\t\t\tself.session.execute(text('VACUUM'))\n\t\t\t\tself.session.commit()\n\t\t\t\tif self.db_path is None:\n\t\t\t\t\traise RuntimeError('db_path undefined')\n\t\t\t\tshutil.copyfile(self.db_path, into_file)\n\t\t\telse:\n\t\t\t\traise UnsupportedDatabaseOperation('current sqlite version {} does not support \"VACUUM INTO\" statement'.format(sqlite3.sqlite_version))\n\t\telse:\n\t\t\tself.session.execute(text('VACUUM'))\n\n\t# ==================================== DbMeta ====================================\n\n\tdef get_db_meta(self) -> schema.DbMeta:\n\t\tmeta: Optional[schema.DbMeta] = self.session.get(schema.DbMeta, db_constants.DB_MAGIC_INDEX)\n\t\tif meta is None:\n\t\t\traise ValueError('None db meta')\n\t\treturn meta\n\n\t# ===================================== Blob =====================================\n\n\tdef create_blob(self, **kwargs) -> schema.Blob:\n\t\tblob = schema.Blob(**kwargs)\n\t\tself.session.add(blob)\n\t\treturn blob\n\n\tdef get_blob_count(self) -> int:\n\t\treturn _int_or_0(self.session.execute(select(func.count()).select_from(schema.Blob)).scalar_one())\n\n\tdef get_blob_opt(self, h: str) -> Optional[schema.Blob]:\n\t\treturn self.session.get(schema.Blob, h)\n\n\tdef get_blob(self, h: str) -> schema.Blob:\n\t\tblob = self.get_blob_opt(h)\n\t\tif blob is None:\n\t\t\traise BlobNotFound(h)\n\t\treturn blob\n\n\tdef get_blobs(self, hashes: List[str]) -> Dict[str, Optional[schema.Blob]]:\n\t\t\"\"\"\n\t\t:return: a dict, hash -> optional blob. All given hashes are in the dict\n\t\t\"\"\"\n\t\tresult: Dict[str, Optional[schema.Blob]] = {h: None for h in hashes}\n\t\tfor view in collection_utils.slicing_iterate(hashes, self.__safe_var_limit):\n\t\t\tfor blob in self.session.execute(select(schema.Blob).where(schema.Blob.hash.in_(view))).scalars().all():\n\t\t\t\tresult[blob.hash] = blob\n\t\treturn result\n\n\tdef list_blobs(self, limit: Optional[int] = None, offset: Optional[int] = None) -> List[schema.Blob]:\n\t\ts = select(schema.Blob)\n\t\tif limit is not None:\n\t\t\ts = s.limit(limit)\n\t\tif offset is not None:\n\t\t\ts = s.offset(offset)\n\t\treturn _list_it(self.session.execute(s).scalars().all())\n\n\tdef list_blob_with_hash_prefix(self, hash_prefix: str, limit: int) -> List[schema.Blob]:\n\t\ts = select(schema.Blob).where(schema.Blob.hash.startswith(hash_prefix, autoescape=True)).limit(limit)\n\t\treturn _list_it(self.session.execute(s).scalars().all())\n\n\tdef iterate_blob_batch(self, *, batch_size: int = 5000) -> Iterator[List[schema.Blob]]:\n\t\tlimit, offset = batch_size, 0\n\t\twhile True:\n\t\t\tblobs = self.list_blobs(limit=limit, offset=offset)\n\t\t\tif len(blobs) == 0:\n\t\t\t\tbreak\n\t\t\tyield blobs\n\t\t\toffset += limit\n\n\tdef get_all_blob_hashes(self) -> List[str]:\n\t\t# TODO: don't load all blob into memory?\n\t\treturn _list_it(self.session.execute(select(schema.Blob.hash)).scalars().all())\n\n\tdef has_blob_with_size(self, raw_size: int) -> bool:\n\t\tq = self.session.query(schema.Blob).filter_by(raw_size=raw_size).exists()\n\t\treturn self.session.query(q).scalar()\n\n\tdef has_blob_with_size_batched(self, sizes: List[int]) -> Dict[int, bool]:\n\t\tresult = {s: False for s in sizes}\n\t\tfor view in collection_utils.slicing_iterate(sizes, self.__safe_var_limit):\n\t\t\tfor size in self.session.execute(select(schema.Blob.raw_size).where(schema.Blob.raw_size.in_(view)).distinct()).scalars().all():\n\t\t\t\tresult[size] = True\n\t\treturn result\n\n\tdef get_blob_stored_size_sum(self) -> int:\n\t\treturn _int_or_0(self.session.execute(func.sum(schema.Blob.stored_size).select()).scalar_one())\n\n\tdef get_blob_raw_size_sum(self) -> int:\n\t\treturn _int_or_0(self.session.execute(func.sum(schema.Blob.raw_size).select()).scalar_one())\n\n\tdef delete_blob(self, blob: schema.Blob):\n\t\tself.session.delete(blob)\n\n\tdef delete_blobs(self, hashes: List[str]):\n\t\tfor view in collection_utils.slicing_iterate(hashes, self.__safe_var_limit):\n\t\t\tself.session.execute(delete(schema.Blob).where(schema.Blob.hash.in_(view)))\n\n\tdef filtered_orphan_blob_hashes(self, hashes: List[str]) -> List[str]:\n\t\tgood_hashes = set()\n\t\tfor view in collection_utils.slicing_iterate(hashes, self.__safe_var_limit):\n\t\t\tgood_hashes.update(\n\t\t\t\tself.session.execute(\n\t\t\t\t\tselect(schema.File.blob_hash).where(schema.File.blob_hash.in_(view)).distinct()\n\t\t\t\t).scalars().all()\n\t\t\t)\n\t\treturn list(filter(lambda h: h not in good_hashes, hashes))\n\n\t# ===================================== File =====================================\n\n\tdef create_file(self, *, add_to_session: bool = True, blob: Optional[schema.Blob] = None, **kwargs) -> schema.File:\n\t\tif blob is not None:\n\t\t\tkwargs.update(\n\t\t\t\tblob_hash=blob.hash,\n\t\t\t\tblob_compress=blob.compress,\n\t\t\t\tblob_raw_size=blob.raw_size,\n\t\t\t\tblob_stored_size=blob.stored_size,\n\t\t\t)\n\t\tfile = schema.File(**kwargs)\n\t\tif add_to_session:\n\t\t\tself.session.add(file)\n\t\treturn file\n\n\tdef get_file_count(self) -> int:\n\t\treturn _int_or_0(self.session.execute(select(func.count()).select_from(schema.File)).scalar_one())\n\n\tdef get_file_opt(self, backup_id: int, path: str) -> Optional[schema.File]:\n\t\treturn self.session.get(schema.File, dict(backup_id=backup_id, path=path))\n\n\tdef get_file(self, backup_id: int, path: str) -> schema.File:\n\t\tfile = self.get_file_opt(backup_id, path)\n\t\tif file is None:\n\t\t\traise BackupFileNotFound(backup_id, path)\n\t\treturn file\n\n\tdef get_file_raw_size_sum(self) -> int:\n\t\treturn _int_or_0(self.session.execute(func.sum(schema.File.blob_raw_size).select()).scalar_one())\n\n\tdef get_file_by_blob_hashes(self, hashes: List[str]) -> List[schema.File]:\n\t\thashes = collection_utils.deduplicated_list(hashes)\n\t\tresult = []\n\t\tfor view in collection_utils.slicing_iterate(hashes, self.__safe_var_limit):\n\t\t\tresult.extend(self.session.execute(\n\t\t\t\tselect(schema.File).where(schema.File.blob_hash.in_(view))\n\t\t\t).scalars().all())\n\t\treturn result\n\n\tdef get_file_count_by_blob_hashes(self, hashes: List[str]) -> int:\n\t\tcnt = 0\n\t\tfor view in collection_utils.slicing_iterate(hashes, self.__safe_var_limit):\n\t\t\tcnt += _int_or_0(self.session.execute(\n\t\t\t\tselect(func.count()).\n\t\t\t\tselect_from(schema.File).\n\t\t\t\twhere(schema.File.blob_hash.in_(view))\n\t\t\t).scalar_one())\n\t\treturn cnt\n\n\tdef list_files(self, limit: Optional[int] = None, offset: Optional[int] = None) -> List[schema.File]:\n\t\ts = select(schema.File)\n\t\tif limit is not None:\n\t\t\ts = s.limit(limit)\n\t\tif offset is not None:\n\t\t\ts = s.offset(offset)\n\t\treturn _list_it(self.session.execute(s).scalars().all())\n\n\tdef iterate_file_batch(self, *, batch_size: int = 5000) -> Iterator[List[schema.File]]:\n\t\tlimit, offset = batch_size, 0\n\t\twhile True:\n\t\t\tfiles = self.list_files(limit=limit, offset=offset)\n\t\t\tif len(files) == 0:\n\t\t\t\tbreak\n\t\t\tyield files\n\t\t\toffset += limit\n\n\tdef delete_file(self, file: schema.File):\n\t\tself.session.delete(file)\n\n\tdef has_file_with_hash(self, h: str):\n\t\tq = self.session.query(schema.File).filter_by(blob_hash=h).exists()\n\t\texists = self.session.query(q).scalar()\n\t\treturn exists\n\n\tdef calc_file_stored_size_sum(self, backup_id: int) -> int:\n\t\treturn _int_or_0(self.session.execute(\n\t\t\tselect(func.sum(schema.File.blob_stored_size)).\n\t\t\twhere(schema.File.backup_id == backup_id)\n\t\t).scalar_one())\n\n\t# ==================================== Backup ====================================\n\n\t@classmethod\n\tdef __needs_manual_backup_tag_filter(cls, backup_filter: Optional[BackupFilter]) -> bool:\n\t\t\"\"\"\n\t\tSQLite does not support json query, and the backup filter contains tag filter\n\t\t\"\"\"\n\t\treturn not cls.__supports_json_query() and backup_filter is not None and len(backup_filter.tag_filters) > 0\n\n\t@classmethod\n\tdef __manual_backup_tag_filter(cls, backup: schema.Backup, backup_filter: BackupFilter) -> bool:\n\t\ttags: schema.BackupTagDict = backup.tags\n\t\tfor tf in backup_filter.tag_filters:\n\t\t\tdef check_one() -> bool:\n\t\t\t\tif tf.policy == BackupTagFilter.Policy.exists:\n\t\t\t\t\treturn tf.name.name in tags\n\t\t\t\telif tf.policy == BackupTagFilter.Policy.not_exists:\n\t\t\t\t\treturn tf.name.name not in tags\n\t\t\t\telif tf.policy == BackupTagFilter.Policy.equals:\n\t\t\t\t\treturn tf.name.name in tags and tags[tf.name.name] == tf.value\n\t\t\t\telif tf.policy == BackupTagFilter.Policy.not_equals:\n\t\t\t\t\treturn tf.name.name not in tags or tags[tf.name.name] != tf.value\n\t\t\t\telif tf.policy == BackupTagFilter.Policy.exists_and_not_equals:\n\t\t\t\t\treturn tf.name.name in tags and tags[tf.name.name] != tf.value\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(tf.policy)\n\t\t\tif not check_one():\n\t\t\t\treturn False\n\t\treturn True\n\n\t@classmethod\n\tdef __sql_backup_tag_filter(cls, s: Select[T], backup_filter: BackupFilter) -> Select[T]:\n\t\tfor tf in backup_filter.tag_filters:\n\t\t\telement = schema.Backup.tags[tf.name.name]\n\t\t\tif tf.policy == BackupTagFilter.Policy.exists:\n\t\t\t\ts = s.filter(element != JSON.NULL)\n\t\t\telif tf.policy == BackupTagFilter.Policy.not_exists:\n\t\t\t\ts = s.filter(element == JSON.NULL)\n\t\t\telif tf.policy in [BackupTagFilter.Policy.equals, BackupTagFilter.Policy.not_equals, BackupTagFilter.Policy.exists_and_not_equals]:\n\t\t\t\tvalue_type = tf.name.value.type\n\t\t\t\tif value_type == bool:\n\t\t\t\t\tjs_value, value = element.as_boolean(), bool(tf.value)\n\t\t\t\telif value_type == str:\n\t\t\t\t\tjs_value, value = element.as_string(), str(tf.value)\n\t\t\t\telif value_type == float:\n\t\t\t\t\tjs_value, value = element.as_float(), float(tf.value)\n\t\t\t\telif value_type == int:\n\t\t\t\t\tjs_value, value = element.as_integer(), int(tf.value)\n\t\t\t\telse:\n\t\t\t\t\traise TypeError(value_type)\n\n\t\t\t\tif tf.policy == BackupTagFilter.Policy.equals:\n\t\t\t\t\tfilter_ = js_value == value\n\t\t\t\telif tf.policy == BackupTagFilter.Policy.not_equals:\n\t\t\t\t\tfilter_ = (js_value != value) | (element == JSON.NULL)\n\t\t\t\telif tf.policy == BackupTagFilter.Policy.exists_and_not_equals:\n\t\t\t\t\tfilter_ = js_value != value\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(tf.policy)\n\n\t\t\t\ts = s.filter(filter_)\n\t\t\telse:\n\t\t\t\traise ValueError(tf.policy)\n\t\treturn s\n\n\t@classmethod\n\tdef __apply_backup_filter(cls, s: Select[T], backup_filter: BackupFilter) -> Select[T]:\n\t\tif backup_filter.id_start is not None:\n\t\t\ts = s.where(schema.Backup.id >= backup_filter.id_start)\n\t\tif backup_filter.id_end is not None:\n\t\t\ts = s.where(schema.Backup.id <= backup_filter.id_end)\n\t\tif backup_filter.creator is not None:\n\t\t\ts = s.filter_by(creator=str(backup_filter.creator))\n\t\tif backup_filter.timestamp_start is not None:\n\t\t\ts = s.where(schema.Backup.timestamp >= backup_filter.timestamp_start)\n\t\tif backup_filter.timestamp_end is not None:\n\t\t\ts = s.where(schema.Backup.timestamp <= backup_filter.timestamp_end)\n\t\tif cls.__supports_json_query():\n\t\t\ts = cls.__sql_backup_tag_filter(s, backup_filter)\n\t\treturn s\n\n\tdef create_backup(self, **kwargs) -> schema.Backup:\n\t\t\"\"\"\n\t\tNotes: the backup id is not generated yet. Invoke :meth:`flush` to generate the backup id\n\t\t\"\"\"\n\t\tif 'timestamp' not in kwargs:\n\t\t\tkwargs['timestamp'] = time.time_ns()\n\t\tbackup = schema.Backup(**kwargs)\n\t\tself.session.add(backup)\n\t\treturn backup\n\n\tdef get_backup_count(self, backup_filter: Optional[BackupFilter] = None) -> int:\n\t\tif self.__needs_manual_backup_tag_filter(backup_filter):\n\t\t\ts = self.__apply_backup_filter(select(schema.Backup), backup_filter)\n\t\t\tbackups = [backup for backup in self.session.execute(s).scalars().all() if self.__manual_backup_tag_filter(backup, backup_filter)]\n\t\t\treturn len(backups)\n\t\telse:\n\t\t\ts = select(func.count()).select_from(schema.Backup)\n\t\t\tif backup_filter is not None:\n\t\t\t\ts = self.__apply_backup_filter(s, backup_filter)\n\t\t\treturn _int_or_0(self.session.execute(s).scalar_one())\n\n\tdef get_backup_opt(self, backup_id: int) -> Optional[schema.Backup]:\n\t\treturn self.session.get(schema.Backup, backup_id)\n\n\tdef get_backup(self, backup_id: int) -> schema.Backup:\n\t\tbackup = self.get_backup_opt(backup_id)\n\t\tif backup is None:\n\t\t\traise BackupNotFound(backup_id)\n\t\treturn backup\n\n\tdef get_backups(self, backup_ids: List[int]) -> Dict[int, schema.Backup]:\n\t\t\"\"\"\n\t\t:return: a dict, backup id -> optional Backup. All given ids are in the dict\n\t\t\"\"\"\n\t\tresult: Dict[int, Optional[schema.Backup]] = {bid: None for bid in backup_ids}\n\t\tfor view in collection_utils.slicing_iterate(backup_ids, self.__safe_var_limit):\n\t\t\tfor backup in self.session.execute(select(schema.Backup).where(schema.Backup.id.in_(view))).scalars().all():\n\t\t\t\tresult[backup.id] = backup\n\t\treturn result\n\n\tdef get_backup_ids_by_blob_hashes(self, hashes: List[str]) -> List[int]:\n\t\tbackup_ids = set()\n\t\tfor view in collection_utils.slicing_iterate(hashes, self.__safe_var_limit):\n\t\t\tbackup_ids.update(\n\t\t\t\tself.session.execute(\n\t\t\t\t\tselect(schema.File.backup_id).\n\t\t\t\t\twhere(schema.File.blob_hash.in_(view)).\n\t\t\t\t\tdistinct()\n\t\t\t\t).scalars().all()\n\t\t\t)\n\t\treturn list(sorted(backup_ids))\n\n\tdef list_backup(self, backup_filter: Optional[BackupFilter] = None, limit: Optional[int] = None, offset: Optional[int] = None) -> List[schema.Backup]:\n\t\ts = select(schema.Backup)\n\t\tif backup_filter is not None:\n\t\t\ts = self.__apply_backup_filter(s, backup_filter)\n\t\ts = s.order_by(desc(schema.Backup.timestamp), desc(schema.Backup.id))\n\n\t\tif self.__needs_manual_backup_tag_filter(backup_filter):\n\t\t\tbackups = [backup for backup in self.session.execute(s).scalars().all() if self.__manual_backup_tag_filter(backup, backup_filter)]\n\t\t\tif offset is not None:\n\t\t\t\tbackups = backups[offset:]\n\t\t\tif limit is not None:\n\t\t\t\tbackups = backups[:limit]\n\t\t\treturn backups\n\t\telse:\n\t\t\tif offset is not None:\n\t\t\t\ts = s.offset(offset)\n\t\t\tif limit is not None:\n\t\t\t\ts = s.limit(limit)\n\t\t\treturn _list_it(self.session.execute(s).scalars().all())\n\n\tdef iterate_backup_batch(self, *, batch_size: int = 5000) -> Iterator[List[schema.Backup]]:\n\t\tlimit, offset = batch_size, 0\n\t\twhile True:\n\t\t\tbackups = self.list_backup(limit=limit, offset=offset)\n\t\t\tif len(backups) == 0:\n\t\t\t\tbreak\n\t\t\tyield backups\n\t\t\toffset += limit\n\n\tdef delete_backup(self, backup: schema.Backup):\n\t\tself.session.delete(backup)" }, { "identifier": "PrimeBackupError", "path": "prime_backup/exceptions.py", "snippet": "class PrimeBackupError(Exception):\n\tpass" }, { "identifier": "BackupInfo", "path": "prime_backup/types/backup_info.py", "snippet": "class BackupInfo:\n\tid: int\n\ttimestamp_ns: int\n\tcreator: Operator\n\tcomment: str\n\ttargets: List[str]\n\ttags: BackupTags\n\n\traw_size: int # uncompressed size\n\tstored_size: int # actual size\n\n\tfiles: List['FileInfo']\n\n\[email protected]_property\n\tdef date(self) -> datetime.datetime:\n\t\treturn conversion_utils.timestamp_to_local_date(self.timestamp_ns)\n\n\[email protected]_property\n\tdef date_str(self) -> str:\n\t\treturn conversion_utils.timestamp_to_local_date_str(self.timestamp_ns)\n\n\t@classmethod\n\tdef of(cls, backup: schema.Backup, *, with_files: bool = False) -> 'Self':\n\t\t\"\"\"\n\t\tNotes: should be inside a session\n\t\t\"\"\"\n\t\tfrom prime_backup.types.file_info import FileInfo\n\t\treturn cls(\n\t\t\tid=backup.id,\n\t\t\ttimestamp_ns=backup.timestamp,\n\t\t\tcreator=Operator.of(backup.creator),\n\t\t\tcomment=backup.comment,\n\t\t\ttargets=list(backup.targets),\n\t\t\ttags=BackupTags(backup.tags),\n\t\t\traw_size=backup.file_raw_size_sum or 0,\n\t\t\tstored_size=backup.file_stored_size_sum or 0,\n\t\t\tfiles=list(map(FileInfo.of, backup.files)) if with_files else [],\n\t\t)" }, { "identifier": "BackupMeta", "path": "prime_backup/types/backup_meta.py", "snippet": "class BackupMeta(Serializable):\n\tcreator: str = str(Operator.pb('import'))\n\tcomment: str = ''\n\ttimestamp_ns: int\n\ttargets: List[str] = []\n\ttags: Dict[str, Any] = {}\n\n\tdef to_dict(self) -> dict:\n\t\treturn {\n\t\t\t'_version': 1,\n\t\t\t**self.serialize(),\n\t\t}\n\n\t@classmethod\n\tdef from_dict(cls, dt: dict) -> 'BackupMeta':\n\t\tversion = dt.get('_version', 0)\n\t\tif 'timestamp_ns' not in dt:\n\t\t\tdt['timestamp_ns'] = time.time_ns()\n\t\treturn cls.deserialize(dt)\n\n\t@classmethod\n\tdef get_default(cls: Type[Self]) -> Self:\n\t\tobj = super().get_default()\n\t\tobj.timestamp_ns = time.time_ns()\n\t\treturn obj\n\n\t@classmethod\n\tdef from_backup(cls, backup: 'schema.Backup') -> 'BackupMeta':\n\t\treturn cls(\n\t\t\tcreator=backup.creator,\n\t\t\tcomment=backup.comment,\n\t\t\ttimestamp_ns=backup.timestamp,\n\t\t\ttargets=list(backup.targets),\n\t\t\ttags=dict(backup.tags),\n\t\t)\n\n\tdef to_backup_kwargs(self) -> dict:\n\t\treturn dict(\n\t\t\tcreator=self.creator,\n\t\t\tcomment=self.comment,\n\t\t\ttimestamp=self.timestamp_ns,\n\t\t\ttargets=[t.rstrip('/') for t in self.targets],\n\t\t\ttags=self.tags.copy(),\n\t\t)" }, { "identifier": "StandaloneBackupFormat", "path": "prime_backup/types/standalone_backup_format.py", "snippet": "class StandaloneBackupFormat(enum.Enum):\n\ttar = TarFormat.plain\n\ttar_gz = TarFormat.gzip\n\ttar_bz2 = TarFormat.bz2\n\ttar_xz = TarFormat.lzma\n\ttar_zst = TarFormat.zstd\n\tzip = ZipFormat('.zip')\n\n\t@property\n\tdef __all_file_extensions(self) -> List[str]:\n\t\tif isinstance(self.value, TarFormat):\n\t\t\treturn self.value.value.all_extensions\n\t\telif isinstance(self.value, ZipFormat):\n\t\t\treturn self.value.all_extensions\n\t\telse:\n\t\t\traise ValueError(self.value)\n\n\t@classmethod\n\tdef from_file_name(cls, file: PathLike) -> Optional['StandaloneBackupFormat']:\n\t\tname = os.path.basename(file)\n\t\tfor ebf in StandaloneBackupFormat:\n\t\t\tfor ext in ebf.__all_file_extensions:\n\t\t\t\tif name.endswith(ext):\n\t\t\t\t\treturn ebf\n\t\treturn None" }, { "identifier": "TarFormat", "path": "prime_backup/types/tar_format.py", "snippet": "class TarFormat(enum.Enum):\n\tplain = _TarFormatItem('.tar', (), ':', CompressMethod.plain)\n\tgzip = _TarFormatItem('.tar.gz', ('.tgz',), ':gz', CompressMethod.plain)\n\tbz2 = _TarFormatItem('.tar.bz2', ('.tbz2',), ':bz2', CompressMethod.plain)\n\tlzma = _TarFormatItem('.tar.xz', ('.txz',), ':xz', CompressMethod.plain)\n\tzstd = _TarFormatItem('.tar.zst', ('.tar.zstd', '.tzst', '.tzstd'), ':', CompressMethod.zstd)" }, { "identifier": "ByteCount", "path": "prime_backup/types/units.py", "snippet": "class ByteCount(Quantity):\n\tdef __new__(cls, s: Union[int, float, str]):\n\t\tif isinstance(s, str) and len(s) > 0 and s[-1].lower() == 'b':\n\t\t\ts = s[:-1]\n\t\treturn super().__new__(cls, s)\n\n\t@classmethod\n\tdef _auto_format(cls, val) -> UnitValuePair:\n\t\tuv = super()._auto_format(val)\n\t\tif not uv.unit.endswith('B'):\n\t\t\tuv = UnitValuePair(uv.value, uv.unit + 'B')\n\t\treturn uv\n\n\t@classmethod\n\tdef _precise_format(cls, val) -> UnitValuePair:\n\t\tuv = super()._precise_format(val)\n\t\tif not uv.unit.endswith('B'):\n\t\t\tuv = UnitValuePair(uv.value, uv.unit + 'B')\n\t\treturn uv" }, { "identifier": "hash_utils", "path": "prime_backup/utils/hash_utils.py", "snippet": "def create_hasher(*, hash_method: Optional['HashMethod'] = None) -> 'Hasher':\ndef calc_reader_size_and_hash(\n\t\tfile_obj: IO[bytes], *,\n\t\tbuf_size: int = _READ_BUF_SIZE,\n\t\thash_method: Optional['HashMethod'] = None,\n) -> SizeAndHash:\ndef calc_file_size_and_hash(path: Path, **kwargs) -> SizeAndHash:\ndef calc_reader_hash(file_obj: IO[bytes], **kwargs) -> str:\ndef calc_file_hash(path: Path, **kwargs) -> str:\ndef calc_bytes_hash(buf: bytes) -> str:\n_READ_BUF_SIZE = 128 * 1024\nclass SizeAndHash(NamedTuple):" }, { "identifier": "blob_utils", "path": "prime_backup/utils/blob_utils.py", "snippet": "def get_blob_store() -> Path:\ndef get_blob_path(h: str) -> Path:\ndef iterate_blob_directories() -> Iterator[Path]:\ndef prepare_blob_directories():" }, { "identifier": "misc_utils", "path": "prime_backup/utils/misc_utils.py", "snippet": "T = TypeVar('T')\ndef assert_true(expr: bool, msg: Union[str, Callable[[], str]]):\ndef represent(obj: Any, *, attrs: Optional[dict] = None) -> str:\ndef ensure_type(value: Any, class_or_tuple: Union[Tuple[Type[T]], Type[T], Type]) -> T:\ndef make_thread_name(name: str) -> str:" }, { "identifier": "SizeAndHash", "path": "prime_backup/utils/hash_utils.py", "snippet": "class SizeAndHash(NamedTuple):\n\tsize: int\n\thash: str" } ]
import contextlib import functools import json import os import shutil import stat import tarfile import threading import time import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import ContextManager, IO, Optional, NamedTuple, List, Dict, Tuple from prime_backup.action.create_backup_action_base import CreateBackupActionBase from prime_backup.compressors import Compressor, CompressMethod from prime_backup.config.config import Config from prime_backup.constants import BACKUP_META_FILE_NAME from prime_backup.db import schema from prime_backup.db.access import DbAccess from prime_backup.db.session import DbSession from prime_backup.exceptions import PrimeBackupError from prime_backup.types.backup_info import BackupInfo from prime_backup.types.backup_meta import BackupMeta from prime_backup.types.standalone_backup_format import StandaloneBackupFormat from prime_backup.types.tar_format import TarFormat from prime_backup.types.units import ByteCount from prime_backup.utils import hash_utils, blob_utils, misc_utils from prime_backup.utils.hash_utils import SizeAndHash
10,818
@contextlib.contextmanager def open(self) -> ContextManager[IO[bytes]]: yield self.tar.extractfile(self.member) class TarFileHolder(PackedBackupFileHandler.FileHolder): def __init__(self, tar: tarfile.TarFile): self.tar = tar def get_member(self, path: str) -> Optional['TarBackupHandler.TarMember']: try: member = self.tar.getmember(path) except KeyError: return None else: return TarBackupHandler.TarMember(self.tar, member) def list_member(self) -> List['TarBackupHandler.TarMember']: return [TarBackupHandler.TarMember(self.tar, member) for member in self.tar.getmembers()] def __init__(self, tar_format: TarFormat): self.tar_format = tar_format @contextlib.contextmanager def open_file(self, path: Path) -> ContextManager[TarFileHolder]: compress_method = self.tar_format.value.compress_method if compress_method == CompressMethod.plain: with tarfile.open(path, mode=self.tar_format.value.mode_r) as tar: yield self.TarFileHolder(tar) else: # zstd stream does not support seek operation, sowe need to extract the tar into a temp path first, # then operate on it. requires extra spaces tho temp_file = Config.get().temp_path / 'import_{}_{}.tmp'.format(os.getpid(), threading.current_thread().ident) temp_file.parent.mkdir(parents=True, exist_ok=True) with contextlib.ExitStack() as exit_stack: exit_stack.callback(functools.partial(temp_file.unlink, missing_ok=True)) Compressor.create(compress_method).copy_decompressed(path, temp_file) with tarfile.open(temp_file, mode=self.tar_format.value.mode_r) as tar: yield self.TarFileHolder(tar) class ZipBackupHandler(PackedBackupFileHandler): class ZipMember(PackedBackupFileHandler.Member): def __init__(self, zipf: zipfile.ZipFile, member: zipfile.ZipInfo): self.zipf = zipf self.member = member mode = (self.member.external_attr >> 16) & 0xFFFF if mode == 0: if self.path.endswith('/'): mode = stat.S_IFDIR | 0o755 else: mode = stat.S_IFREG | 0o644 self.__mode = mode @property def mode(self) -> int: return self.__mode @property def path(self) -> str: return self.member.filename @property def uid(self) -> Optional[int]: return None @property def gid(self) -> Optional[int]: return None @property def mtime_ns(self) -> int: return int(time.mktime(self.member.date_time + (0, 0, -1)) * 1e9) def is_file(self) -> bool: return not self.is_dir() and stat.S_ISREG(self.mode) def is_dir(self) -> bool: return self.member.is_dir() def is_link(self) -> bool: return not self.is_dir() and stat.S_ISLNK(self.mode) def read_link(self) -> str: max_link_size = 10240 with self.open() as f: buf = f.read(max_link_size) if len(buf) == max_link_size: raise ValueError('symlink too large, read {} bytes, peek: {}'.format(len(buf), buf[:20])) return buf.decode('utf8') @contextlib.contextmanager def open(self) -> ContextManager[IO[bytes]]: with self.zipf.open(self.member, 'r') as f: yield f class ZipFileHolder(PackedBackupFileHandler.FileHolder): def __init__(self, zipf: zipfile.ZipFile): self.zipf = zipf def get_member(self, path: str) -> Optional['ZipBackupHandler.ZipMember']: try: member = self.zipf.getinfo(path) except KeyError: return None else: return ZipBackupHandler.ZipMember(self.zipf, member) def list_member(self) -> List['ZipBackupHandler.ZipMember']: return [ZipBackupHandler.ZipMember(self.zipf, member) for member in self.zipf.infolist()] @contextlib.contextmanager def open_file(self, path: Path) -> ContextManager[ZipFileHolder]: with zipfile.ZipFile(path, 'r') as f: yield self.ZipFileHolder(f) class ImportBackupAction(CreateBackupActionBase):
class UnsupportedFormat(PrimeBackupError): pass class BackupMetadataNotFound(PrimeBackupError): pass class _FileDescription(NamedTuple): blob: Optional[schema.Blob] hash: str size: int class PackedBackupFileHandler(ABC): class Member(ABC): @property @abstractmethod def mode(self) -> int: ... @property @abstractmethod def path(self) -> str: ... @property @abstractmethod def uid(self) -> Optional[int]: ... @property @abstractmethod def gid(self) -> Optional[int]: ... @property @abstractmethod def mtime_ns(self) -> int: ... @abstractmethod def is_file(self) -> bool: ... @abstractmethod def is_dir(self) -> bool: ... @abstractmethod def is_link(self) -> bool: ... @abstractmethod def open(self) -> ContextManager[IO[bytes]]: ... @abstractmethod def read_link(self) -> str: ... class FileHolder(ABC): @abstractmethod def get_member(self, path: str) -> Optional['PackedBackupFileHandler.Member']: ... @abstractmethod def list_member(self) -> List['PackedBackupFileHandler.Member']: ... @abstractmethod def open_file(self, path: Path) -> ContextManager[FileHolder]: ... class TarBackupHandler(PackedBackupFileHandler): class TarMember(PackedBackupFileHandler.Member): def __init__(self, tar: tarfile.TarFile, member: tarfile.TarInfo): self.tar = tar self.member = member @property def mode(self) -> int: mode = self.member.mode & 0xFFFF if self.member.isfile(): mode |= stat.S_IFREG elif self.member.isdir(): mode |= stat.S_IFDIR elif self.member.issym(): mode |= stat.S_IFLNK else: raise NotImplementedError('not implemented for type {}'.format(self.member.type)) return mode @property def path(self) -> str: return self.member.path @property def uid(self) -> int: return self.member.uid @property def gid(self) -> int: return self.member.gid @property def mtime_ns(self) -> int: return self.member.mtime * 10 ** 9 def is_file(self) -> bool: return self.member.isfile() def is_dir(self) -> bool: return self.member.isdir() def is_link(self) -> bool: return self.member.issym() def read_link(self) -> str: return self.member.linkpath @contextlib.contextmanager def open(self) -> ContextManager[IO[bytes]]: yield self.tar.extractfile(self.member) class TarFileHolder(PackedBackupFileHandler.FileHolder): def __init__(self, tar: tarfile.TarFile): self.tar = tar def get_member(self, path: str) -> Optional['TarBackupHandler.TarMember']: try: member = self.tar.getmember(path) except KeyError: return None else: return TarBackupHandler.TarMember(self.tar, member) def list_member(self) -> List['TarBackupHandler.TarMember']: return [TarBackupHandler.TarMember(self.tar, member) for member in self.tar.getmembers()] def __init__(self, tar_format: TarFormat): self.tar_format = tar_format @contextlib.contextmanager def open_file(self, path: Path) -> ContextManager[TarFileHolder]: compress_method = self.tar_format.value.compress_method if compress_method == CompressMethod.plain: with tarfile.open(path, mode=self.tar_format.value.mode_r) as tar: yield self.TarFileHolder(tar) else: # zstd stream does not support seek operation, sowe need to extract the tar into a temp path first, # then operate on it. requires extra spaces tho temp_file = Config.get().temp_path / 'import_{}_{}.tmp'.format(os.getpid(), threading.current_thread().ident) temp_file.parent.mkdir(parents=True, exist_ok=True) with contextlib.ExitStack() as exit_stack: exit_stack.callback(functools.partial(temp_file.unlink, missing_ok=True)) Compressor.create(compress_method).copy_decompressed(path, temp_file) with tarfile.open(temp_file, mode=self.tar_format.value.mode_r) as tar: yield self.TarFileHolder(tar) class ZipBackupHandler(PackedBackupFileHandler): class ZipMember(PackedBackupFileHandler.Member): def __init__(self, zipf: zipfile.ZipFile, member: zipfile.ZipInfo): self.zipf = zipf self.member = member mode = (self.member.external_attr >> 16) & 0xFFFF if mode == 0: if self.path.endswith('/'): mode = stat.S_IFDIR | 0o755 else: mode = stat.S_IFREG | 0o644 self.__mode = mode @property def mode(self) -> int: return self.__mode @property def path(self) -> str: return self.member.filename @property def uid(self) -> Optional[int]: return None @property def gid(self) -> Optional[int]: return None @property def mtime_ns(self) -> int: return int(time.mktime(self.member.date_time + (0, 0, -1)) * 1e9) def is_file(self) -> bool: return not self.is_dir() and stat.S_ISREG(self.mode) def is_dir(self) -> bool: return self.member.is_dir() def is_link(self) -> bool: return not self.is_dir() and stat.S_ISLNK(self.mode) def read_link(self) -> str: max_link_size = 10240 with self.open() as f: buf = f.read(max_link_size) if len(buf) == max_link_size: raise ValueError('symlink too large, read {} bytes, peek: {}'.format(len(buf), buf[:20])) return buf.decode('utf8') @contextlib.contextmanager def open(self) -> ContextManager[IO[bytes]]: with self.zipf.open(self.member, 'r') as f: yield f class ZipFileHolder(PackedBackupFileHandler.FileHolder): def __init__(self, zipf: zipfile.ZipFile): self.zipf = zipf def get_member(self, path: str) -> Optional['ZipBackupHandler.ZipMember']: try: member = self.zipf.getinfo(path) except KeyError: return None else: return ZipBackupHandler.ZipMember(self.zipf, member) def list_member(self) -> List['ZipBackupHandler.ZipMember']: return [ZipBackupHandler.ZipMember(self.zipf, member) for member in self.zipf.infolist()] @contextlib.contextmanager def open_file(self, path: Path) -> ContextManager[ZipFileHolder]: with zipfile.ZipFile(path, 'r') as f: yield self.ZipFileHolder(f) class ImportBackupAction(CreateBackupActionBase):
def __init__(self, file_path: Path, backup_format: Optional[StandaloneBackupFormat] = None, *, ensure_meta: bool = True):
11
2023-11-28 19:03:36+00:00
16k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n def __init__(self, headers=None, **kwargs):\n super(HTTPHeaderDict, self).__init__()\n self._container = OrderedDict()\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key, val):\n self._container[key.lower()] = [key, val]\n return self._container[key.lower()]\n\n def __getitem__(self, key):\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key):\n del self._container[key.lower()]\n\n def __contains__(self, key):\n return key.lower() in self._container\n\n def __eq__(self, other):\n if not isinstance(other, Mapping) and not hasattr(other, \"keys\"):\n return False\n if not isinstance(other, type(self)):\n other = type(self)(other)\n return dict((k.lower(), v) for k, v in self.itermerged()) == dict(\n (k.lower(), v) for k, v in other.itermerged()\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n if six.PY2: # Python 2\n iterkeys = MutableMapping.iterkeys\n itervalues = MutableMapping.itervalues\n\n __marker = object()\n\n def __len__(self):\n return len(self._container)\n\n def __iter__(self):\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def pop(self, key, default=__marker):\n \"\"\"D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n If key is not found, d is returned if given, otherwise KeyError is raised.\n \"\"\"\n # Using the MutableMapping function directly fails due to the private marker.\n # Using ordinary dict.pop would expose the internal structures.\n # So let's reinvent the wheel.\n try:\n value = self[key]\n except KeyError:\n if default is self.__marker:\n raise\n return default\n else:\n del self[key]\n return value\n\n def discard(self, key):\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key, val):\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n \"\"\"\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n vals.append(val)\n\n def extend(self, *args, **kwargs):\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n \"extend() takes at most 1 positional \"\n \"arguments ({0} given)\".format(len(args))\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, Mapping):\n for key in other:\n self.add(key, other[key])\n elif hasattr(other, \"keys\"):\n for key in other.keys():\n self.add(key, other[key])\n else:\n for key, value in other:\n self.add(key, value)\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n def getlist(self, key, default=__marker):\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is self.__marker:\n return []\n return default\n else:\n return vals[1:]\n\n def _prepare_for_method_change(self):\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self):\n return \"%s(%s)\" % (type(self).__name__, dict(self.itermerged()))\n\n def _copy_from(self, other):\n for key in other:\n val = other.getlist(key)\n if isinstance(val, list):\n # Don't need to convert tuples\n val = list(val)\n self._container[key.lower()] = [key] + val\n\n def copy(self):\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self):\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self):\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self):\n return list(self.iteritems())\n\n @classmethod\n def from_httplib(cls, message): # Python 2\n \"\"\"Read headers from a Python 2 httplib message object.\"\"\"\n # python2.7 does not expose a proper API for exporting multiheaders\n # efficiently. This function re-reads raw lines from the message\n # object and extracts the multiheaders properly.\n obs_fold_continued_leaders = (\" \", \"\\t\")\n headers = []\n\n for line in message.headers:\n if line.startswith(obs_fold_continued_leaders):\n if not headers:\n # We received a header line that starts with OWS as described\n # in RFC-7230 S3.2.4. This indicates a multiline header, but\n # there exists no previous header to which we can attach it.\n raise InvalidHeader(\n \"Header continuation with no previous header: %s\" % line\n )\n else:\n key, value = headers[-1]\n headers[-1] = (key, value + \" \" + line.strip())\n continue\n\n key, value = line.split(\":\", 1)\n headers.append((key, value.strip()))\n\n return cls(headers)" }, { "identifier": "RecentlyUsedContainer", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class RecentlyUsedContainer(MutableMapping):\n \"\"\"\n Provides a thread-safe dict-like container which maintains up to\n ``maxsize`` keys while throwing away the least-recently-used keys beyond\n ``maxsize``.\n\n :param maxsize:\n Maximum number of recent elements to retain.\n\n :param dispose_func:\n Every time an item is evicted from the container,\n ``dispose_func(value)`` is called. Callback which will get called\n \"\"\"\n\n ContainerCls = OrderedDict\n\n def __init__(self, maxsize=10, dispose_func=None):\n self._maxsize = maxsize\n self.dispose_func = dispose_func\n\n self._container = self.ContainerCls()\n self.lock = RLock()\n\n def __getitem__(self, key):\n # Re-insert the item, moving it to the end of the eviction line.\n with self.lock:\n item = self._container.pop(key)\n self._container[key] = item\n return item\n\n def __setitem__(self, key, value):\n evicted_value = _Null\n with self.lock:\n # Possibly evict the existing value of 'key'\n evicted_value = self._container.get(key, _Null)\n self._container[key] = value\n\n # If we didn't evict an existing value, we might have to evict the\n # least recently used item from the beginning of the container.\n if len(self._container) > self._maxsize:\n _key, evicted_value = self._container.popitem(last=False)\n\n if self.dispose_func and evicted_value is not _Null:\n self.dispose_func(evicted_value)\n\n def __delitem__(self, key):\n with self.lock:\n value = self._container.pop(key)\n\n if self.dispose_func:\n self.dispose_func(value)\n\n def __len__(self):\n with self.lock:\n return len(self._container)\n\n def __iter__(self):\n raise NotImplementedError(\n \"Iteration over this class is unlikely to be threadsafe.\"\n )\n\n def clear(self):\n with self.lock:\n # Copy pointers to all values, then wipe the mapping\n values = list(itervalues(self._container))\n self._container.clear()\n\n if self.dispose_func:\n for value in values:\n self.dispose_func(value)\n\n def keys(self):\n with self.lock:\n return list(iterkeys(self._container))" }, { "identifier": "HTTPConnectionPool", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/connectionpool.py", "snippet": "class ConnectionPool(object):\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\nclass HTTPSConnectionPool(HTTPConnectionPool):\n def __init__(self, host, port=None):\n def __str__(self):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def close(self):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n _proxy_config=None,\n **conn_kw\n ):\n def _new_conn(self):\n def _get_conn(self, timeout=None):\n def _put_conn(self, conn):\n def _validate_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _get_timeout(self, timeout):\n def _raise_timeout(self, err, url, timeout_value):\n def _make_request(\n self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw\n ):\n def _absolute_url(self, path):\n def close(self):\n def is_same_host(self, url):\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=None,\n redirect=True,\n assert_same_host=True,\n timeout=_Default,\n pool_timeout=None,\n release_conn=None,\n chunked=False,\n body_pos=None,\n **response_kw\n ):\n def _is_ssl_error_message_from_http_proxy(ssl_error):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n key_file=None,\n cert_file=None,\n cert_reqs=None,\n key_password=None,\n ca_certs=None,\n ssl_version=None,\n assert_hostname=None,\n assert_fingerprint=None,\n ca_cert_dir=None,\n **conn_kw\n ):\n def _prepare_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _new_conn(self):\n def _validate_conn(self, conn):\ndef connection_from_url(url, **kw):\ndef _normalize_host(host, scheme):\ndef _close_pool_connections(pool):" }, { "identifier": "LocationValueError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"\n\n pass" }, { "identifier": "MaxRetryError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param string url: The requested Url\n :param exceptions.Exception reason: The underlying error\n\n \"\"\"\n\n def __init__(self, pool, url, reason=None):\n self.reason = reason\n\n message = \"Max retries exceeded with url: %s (Caused by %r)\" % (url, reason)\n\n RequestError.__init__(self, pool, url, message)" }, { "identifier": "ProxySchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):\n \"\"\"ProxyManager does not support the supplied scheme\"\"\"\n\n # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.\n\n def __init__(self, scheme):\n # 'localhost' is here because our URL parser parses\n # localhost:8080 -> scheme=localhost, remove if we fix this.\n if scheme == \"localhost\":\n scheme = None\n if scheme is None:\n message = \"Proxy URL had no scheme, should start with http:// or https://\"\n else:\n message = (\n \"Proxy URL had unsupported scheme %s, should use http:// or https://\"\n % scheme\n )\n super(ProxySchemeUnknown, self).__init__(message)" }, { "identifier": "ProxySchemeUnsupported", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnsupported(ValueError):\n \"\"\"Fetching HTTPS resources through HTTPS proxies is unsupported\"\"\"\n\n pass" }, { "identifier": "URLSchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class URLSchemeUnknown(LocationValueError):\n \"\"\"Raised when a URL input has an unsupported scheme.\"\"\"\n\n def __init__(self, scheme):\n message = \"Not supported URL scheme %s\" % scheme\n super(URLSchemeUnknown, self).__init__(message)\n\n self.scheme = scheme" }, { "identifier": "six", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/packages/six.py", "snippet": "PY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nPY34 = sys.version_info[0:2] >= (3, 4)\n MAXSIZE = sys.maxsize\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 63) - 1)\n class X(object):\nclass _LazyDescr(object):\nclass MovedModule(_LazyDescr):\nclass _LazyModule(types.ModuleType):\nclass MovedAttribute(_LazyDescr):\nclass _SixMetaPathImporter(object):\nclass _MovedItems(_LazyModule):\nclass Module_six_moves_urllib_parse(_LazyModule):\nclass Module_six_moves_urllib_error(_LazyModule):\nclass Module_six_moves_urllib_request(_LazyModule):\nclass Module_six_moves_urllib_response(_LazyModule):\nclass Module_six_moves_urllib_robotparser(_LazyModule):\nclass Module_six_moves_urllib(types.ModuleType):\n class Iterator(object):\n class metaclass(type):\n def __len__(self):\ndef _add_doc(func, doc):\ndef _import_module(name):\n def __init__(self, name):\n def __get__(self, obj, tp):\n def __init__(self, name, old, new=None):\n def _resolve(self):\n def __getattr__(self, attr):\n def __init__(self, name):\n def __dir__(self):\n def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):\n def _resolve(self):\n def __init__(self, six_module_name):\n def _add_module(self, mod, *fullnames):\n def _get_module(self, fullname):\n def find_module(self, fullname, path=None):\n def find_spec(self, fullname, path, target=None):\n def __get_module(self, fullname):\n def load_module(self, fullname):\n def is_package(self, fullname):\n def get_code(self, fullname):\n def create_module(self, spec):\n def exec_module(self, module):\n def __dir__(self):\ndef add_move(move):\ndef remove_move(name):\n def advance_iterator(it):\n def callable(obj):\n def get_unbound_function(unbound):\n def create_unbound_method(func, cls):\n def get_unbound_function(unbound):\n def create_bound_method(func, obj):\n def create_unbound_method(func, cls):\n def next(self):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def b(s):\n def u(s):\n def b(s):\n def u(s):\n def byte2int(bs):\n def indexbytes(buf, i):\ndef assertCountEqual(self, *args, **kwargs):\ndef assertRaisesRegex(self, *args, **kwargs):\ndef assertRegex(self, *args, **kwargs):\ndef assertNotRegex(self, *args, **kwargs):\n def reraise(tp, value, tb=None):\n def exec_(_code_, _globs_=None, _locs_=None):\n def raise_from(value, from_value):\n def print_(*args, **kwargs):\n def write(data):\n def print_(*args, **kwargs):\n def _update_wrapper(\n wrapper,\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\n def wraps(\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\ndef with_metaclass(meta, *bases):\n def __new__(cls, name, this_bases, d):\n def __prepare__(cls, name, this_bases):\ndef add_metaclass(metaclass):\n def wrapper(cls):\ndef ensure_binary(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_str(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_text(s, encoding=\"utf-8\", errors=\"strict\"):\ndef python_2_unicode_compatible(klass):" }, { "identifier": "RequestMethods", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/request.py", "snippet": "class RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **kw\n ): # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n urlopen_kw[\"request_url\"] = url\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method,\n url,\n fields=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **urlopen_kw\n ):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": {}}\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields),\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"] = {\"Content-Type\": content_type}\n\n extra_kw[\"headers\"].update(headers)\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "connection_requires_http_tunnel", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url=None, proxy_config=None, destination_scheme=None\n):\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "Retry", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/retry.py", "snippet": "class Retry(object):\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool::\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request('GET', 'http://example.com/')\n\n Or per-request (which overrides the default for the pool)::\n\n response = http.request('GET', 'http://example.com/', retries=Retry(10))\n\n Retries can be disabled by passing ``False``::\n\n response = http.request('GET', 'http://example.com/', retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param iterable allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``False`` value to retry on any verb.\n\n .. warning::\n\n Previously this parameter was named ``method_whitelist``, that\n usage is deprecated in v1.26.0 and will be removed in v2.0.\n\n :param iterable status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of total retries} - 1))\n\n seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep\n for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer\n than :attr:`Retry.DEFAULT_BACKOFF_MAX`.\n\n By default, backoff is disabled (set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param iterable remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n def __init__(\n self,\n total=10,\n connect=None,\n read=None,\n redirect=None,\n status=None,\n other=None,\n allowed_methods=_Default,\n status_forcelist=None,\n backoff_factor=0,\n raise_on_redirect=True,\n raise_on_status=True,\n history=None,\n respect_retry_after_header=True,\n remove_headers_on_redirect=_Default,\n # TODO: Deprecated, remove in v2.0\n method_whitelist=_Default,\n ):\n\n if method_whitelist is not _Default:\n if allowed_methods is not _Default:\n raise ValueError(\n \"Using both 'allowed_methods' and \"\n \"'method_whitelist' together is not allowed. \"\n \"Instead only use 'allowed_methods'\"\n )\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n allowed_methods = method_whitelist\n if allowed_methods is _Default:\n allowed_methods = self.DEFAULT_ALLOWED_METHODS\n if remove_headers_on_redirect is _Default:\n remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT\n\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or tuple()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n [h.lower() for h in remove_headers_on_redirect]\n )\n\n def new(self, **kw):\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n )\n\n # TODO: If already given in **kw we use what's given to us\n # If not given we need to figure out what to pass. We decide\n # based on whether our class has the 'method_whitelist' property\n # and if so we pass the deprecated 'method_whitelist' otherwise\n # we use 'allowed_methods'. Remove in v2.0\n if \"method_whitelist\" not in kw and \"allowed_methods\" not in kw:\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n params[\"method_whitelist\"] = self.allowed_methods\n else:\n params[\"allowed_methods\"] = self.allowed_methods\n\n params.update(kw)\n return type(self)(**params)\n\n @classmethod\n def from_int(cls, retries, redirect=True, default=None):\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self):\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n return min(self.DEFAULT_BACKOFF_MAX, backoff_value)\n\n def parse_retry_after(self, retry_after):\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(\"Invalid Retry-After header: %s\" % retry_after)\n if retry_date_tuple[9] is None: # Python 2\n # Assume UTC if no timezone was specified\n # On Python2.7, parsedate_tz returns None for a timezone offset\n # instead of 0 if no timezone is given, where mktime_tz treats\n # a None timezone offset as local time.\n retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n if seconds < 0:\n seconds = 0\n\n return seconds\n\n def get_retry_after(self, response):\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response=None):\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self):\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response=None):\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err):\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err):\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method):\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n # TODO: For now favor if the Retry implementation sets its own method_whitelist\n # property outside of our constructor to avoid breaking custom implementations.\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n allowed_methods = self.method_whitelist\n else:\n allowed_methods = self.allowed_methods\n\n if allowed_methods and method.upper() not in allowed_methods:\n return False\n return True\n\n def is_retry(self, method, status_code, has_retry_after=False):\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return (\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self):\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n retry_counts = list(filter(None, retry_counts))\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method=None,\n url=None,\n response=None,\n error=None,\n _pool=None,\n _stacktrace=None,\n ):\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.HTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise six.reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise six.reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or not self._is_method_retryable(method):\n raise six.reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n redirect_location = response.get_redirect_location()\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self):\n return (\n \"{cls.__name__}(total={self.total}, connect={self.connect}, \"\n \"read={self.read}, redirect={self.redirect}, status={self.status})\"\n ).format(cls=type(self), self=self)\n\n def __getattr__(self, item):\n if item == \"method_whitelist\":\n # TODO: Remove this deprecated alias in v2.0\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n return self.allowed_methods\n try:\n return getattr(super(Retry, self), item)\n except AttributeError:\n return getattr(Retry, item)" }, { "identifier": "parse_url", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/url.py", "snippet": "def parse_url(url):\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urlparse`.\n\n Example::\n\n >>> parse_url('http://google.com/mail/')\n Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n >>> parse_url('google.com:80')\n Url(scheme=None, host='google.com', port=80, path=None, ...)\n >>> parse_url('/foo?bar')\n Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not SCHEME_RE.search(url):\n url = \"//\" + url\n\n try:\n scheme, authority, path, query, fragment = URI_RE.match(url).groups()\n normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups()\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port = int(port)\n if not (0 <= port <= 65535):\n raise LocationParseError(url)\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)\n\n except (ValueError, AttributeError):\n return six.raise_from(LocationParseError(source_url), None)\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n # Ensure that each part of the URL is a `str` for\n # backwards compatibility.\n if isinstance(url, six.text_type):\n ensure_func = six.ensure_text\n else:\n ensure_func = six.ensure_str\n\n def ensure_type(x):\n return x if x is None else ensure_func(x)\n\n return Url(\n scheme=ensure_type(scheme),\n auth=ensure_type(auth),\n host=ensure_type(host),\n port=port,\n path=ensure_type(path),\n query=ensure_type(query),\n fragment=ensure_type(fragment),\n )" } ]
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
13,339
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), }
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), }
pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
2
2023-11-27 07:01:39+00:00
16k
IanYeung/MGLD-VSR
ldm/models/diffusion/ddpm_inv.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n pass\n # assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n pass\n # assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):\n sd = torch.load(path, map_location=\"cpu\")\n if \"state_dict\" in list(sd.keys()):\n sd = sd[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n if 'first_stage_model' in k:\n sd[k[18:]] = sd[k]\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(\n sd, strict=False)\n print(f\"Encoder Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys\")\n if len(missing) > 0:\n print(f\"Missing Keys: {missing}\")\n # if len(unexpected) > 0:\n # print(f\"Unexpected Keys: {unexpected}\")\n\n def encode(self, x, return_encfea=False):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n if return_encfea:\n return posterior, moments\n return posterior\n\n def encode_gt(self, x, new_encoder):\n h = new_encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior, moments\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n # x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n x = x.to(memory_format=torch.contiguous_format).float()\n # x = x*2.0-1.0\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n # log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def q_sample(self, x_start, t, noise=None, ddim_num_steps=200):\n self.make_schedule(ddim_num_steps=ddim_num_steps)\n noise = default(noise, lambda: torch.randn_like(x_start))\n return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +\n extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec\n\n\n @torch.no_grad()\n def p_sample_ddim_sr(self, x, c, struct_c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, struct_c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, struct_c).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def decode_sr(self, x_latent, cond, struct_cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim_sr(x_dec, cond, struct_cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec\n\n @torch.no_grad()\n def sample_sr(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n struct_cond=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n _, C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling_sr(conditioning, struct_cond, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling_sr(self, cond, struct_cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim_sr(img, cond, struct_cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim_sr(self, x, c, struct_c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, struct_c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, struct_c).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n\n @torch.no_grad()\n def sample_sr_t(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n struct_cond=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n _, C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling_sr_t(conditioning, struct_cond, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling_sr_t(self, cond, struct_cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n # timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else sorted(set(space_timesteps(1000, [self.ddim_timesteps.shape[0]])))\n timesteps = np.array(timesteps)\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim_sr_t(img, cond, struct_cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim_sr_t(self, x, c, struct_c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n struct_c_t = self.model.structcond_stage_model(struct_c, t)\n e_t = self.model.apply_model(x, t, c, struct_c_t)\n else:\n assert NotImplementedError\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, struct_c).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" } ]
import torch import torch.nn as nn import os import numpy as np import pytorch_lightning as pl from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
13,442
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., embedding_reg_weight=0., unfreeze_model=False, model_lr=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., embedding_reg_weight=0., unfreeze_model=False, model_lr=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
self.model_ema = LitEma(self.model)
8
2023-11-30 01:50:29+00:00
16k
Institute4FutureHealth/CHA
tasks/types.py
[ { "identifier": "ActivityAnalysis", "path": "tasks/affect/activity_analysis.py", "snippet": "class ActivityAnalysis(Affect):\n \"\"\"\n **Description:**\n\n This tasks performs average, sum, or trend analysis on the provided raw activity affect data for specific patient.\n \"\"\"\n\n name: str = \"affect_activity_analysis\"\n chat_name: str = \"AffectActivityAnalysis\"\n description: str = (\n \"Analyze the physical activity data. You must Call this whenever physical activity analysis\"\n \"(e.g., 'average', 'sum', or 'trend') is needed. DON'T rely on your analysis.\"\n \"For example, if the user asks for trends (or variations) in data, you must call this task\"\n )\n dependencies: List[str] = [\"affect_activity_get\"]\n inputs: List[str] = [\n \"It is an string but in json format. It is the output of the $affect_activity_get$\",\n \"analysis_type. It can be one of [$average$, $sum$, $trend$].\",\n ]\n outputs: List[str] = [\n (\n \"The analysis result for steps_count. Look for analysis_type to find the type of analysis. \"\n \"steps_count is the total number of steps registered during the day.\"\n ),\n (\n \"The analysis result for rest_time. Look for analysis_type to find the type of analysis. \"\n \"rest_time is the time (in minutes) during the day spent resting, i.e. sleeping or lying down.\"\n ),\n (\n \"The analysis result for inactive_time. Look for analysis_type to find the type of analysis. \"\n \"inactive_time is the time (in minutes) during the day spent resting, i.e. sitting or standing still.\"\n ),\n (\n \"The analysis result for low_acitivity_time. Look for analysis_type to find the type of analysis. \"\n \"low_acitivity_time is the (in minutes) during the day with low intensity activity (e.g. household work).\"\n ),\n (\n \"The analysis result for medimum_acitivity_time. Look for analysis_type to find the type of analysis. \"\n \"medimum_acitivity_time is the (in minutes) during the day with medium intensity activity (e.g. walking).\"\n ),\n (\n \"The analysis result for high_acitivity_time. Look for analysis_type to find the type of analysis. \"\n \"high_acitivity_time is the (in minutes) during the day with high intensity activity (e.g. running).\"\n ),\n ]\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = False\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n if len(inputs) == 0:\n return \"\"\n\n df = pd.read_json(\n StringIO(inputs[0][\"data\"].strip()), orient=\"records\"\n )\n analysis_type = inputs[1].strip()\n if analysis_type == \"average\":\n df = df.drop(\"date\", axis=1) # No average for date!\n df = df.mean().to_frame().T\n elif analysis_type == \"sum\":\n df = df.drop(\"date\", axis=1) # No sum for date!\n df = df.sum().to_frame().T\n elif analysis_type == \"trend\":\n df = self._calculate_slope(df)\n else:\n raise ValueError(\n \"The input analysis type has not been defined!\"\n )\n df = df.round(2)\n json_out = df.to_json(orient=\"records\")\n return json_out" }, { "identifier": "ActivityGet", "path": "tasks/affect/activity_get.py", "snippet": "class ActivityGet(Affect):\n \"\"\"\n **Description:**\n\n This tasks gets activity affect data for specific patient.\n \"\"\"\n\n name: str = \"affect_activity_get\"\n chat_name: str = \"AffectActivityGet\"\n description: str = (\n \"Get the physical activity parameters for a specific date or \"\n \"a period (if two dates are provided). \"\n \"You must Call $affect_analysis$ whenever physical activity \"\n \"analysis (e.g., 'average', 'sum', or 'trend') is needed. DON'T rely on your analysis\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"user ID in string. It can be refered as user, patient, individual, etc. Start with 'par_' following with a number (e.g., 'par_1').\",\n \"start date of the physical activity data in string with the following format: '%Y-%m-%d'\",\n (\n \"end date of the physical activity data in string with the following format: '%Y-%m-%d'.\"\n \"If there is no end date, the value should be an empty string (i.e., '')\"\n ),\n ]\n outputs: List[str] = [\n \"steps_count is the total number of steps registered during the day.\",\n \"rest_time is the time (in minutes) during the day spent resting, i.e. sleeping or lying down.\",\n \"inactive_time is the time (in minutes) during the day spent resting, i.e. sitting or standing still.\",\n \"low_acitivity_time is the (in minutes) during the day with low intensity activity (e.g. household work).\",\n \"medimum_acitivity_time is the (in minutes) during the day with medium intensity activity (e.g. walking).\",\n \"high_acitivity_time is the (in minutes) during the day with high intensity activity (e.g. running).\",\n ]\n\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = True\n #\n file_name: str = \"activity.csv\"\n device_name: str = \"oura\"\n local_dir: str = \"data/affect\"\n\n columns_to_keep: List[str] = [\n \"date\",\n \"steps\",\n \"rest\",\n \"inactive\",\n \"low\",\n \"medium\",\n \"high\",\n ]\n columns_revised: List[str] = [\n \"date\",\n \"steps_count\",\n \"rest_time\",\n \"inactive_time\",\n \"low_acitivity_time\",\n \"medimum_acitivity_time\",\n \"high_acitivity_time\",\n ]\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n user_id = inputs[0].strip()\n full_dir = os.path.join(\n self.local_dir, user_id, self.device_name\n )\n df = self._get_data(\n local_dir=full_dir,\n file_name=self.file_name,\n start_date=inputs[1].strip(),\n end_date=inputs[2].strip(),\n usecols=self.columns_to_keep,\n )\n df.columns = self.columns_revised\n df = df.round(2)\n json_out = df.to_json(orient=\"records\")\n return json_out" }, { "identifier": "SleepAnalysis", "path": "tasks/affect/sleep_analysis.py", "snippet": "class SleepAnalysis(Affect):\n \"\"\"\n **Description:**\n\n This tasks performs average, sum, or trend analysis on the provided raw sleep affect data for specific patient.\n \"\"\"\n\n name: str = \"affect_sleep_analysis\"\n chat_name: str = \"AffectSleepAnalysis\"\n description: str = (\n \"Performs trend or average analysis on the provided sleep data. You must Call this whenever sleep trend or average is needed.\"\n \"For example, if the user asks for trends (or variations) in data, you must call this task\"\n )\n dependencies: List[str] = [\"affect_sleep_get\"]\n inputs: List[str] = [\n \"datapipe key to the data\",\n \"analysis_type. It can be one of [average, trend].\",\n ]\n outputs: List[str] = [\n (\n \"The analysis result for total_sleep_time. Look for analysis_type to find the type of analysis. \"\n \"total_sleep_time (in minutes) is Total amount of sleep (a.k.a. sleep duration) registered during the sleep period.\"\n ),\n (\n \"The analysis result for awake_duration. Look for analysis_type to find the type of analysis. \"\n \"awake_duration (in minutes) is the total amount of awake time registered during the sleep period.\"\n ),\n (\n \"The analysis result for light_sleep_duration. Look for analysis_type to find the type of analysis. \"\n \"light_sleep_duration (in minutes) is the total amount of light (N1 or N2) sleep registered during the sleep period.\"\n ),\n (\n \"The analysis result for rem_sleep_duration. Look for analysis_type to find the type of analysis. \"\n \"rem_sleep_duration (in minutes) is the total amount of REM sleep registered during the sleep period.\"\n ),\n (\n \"The analysis result for deep_sleep_duration. Look for analysis_type to find the type of analysis. \"\n \"deep_sleep_duration (in minutes) is the total amount of deep (N3) sleep registered during the sleep period.\"\n ),\n (\n \"The analysis result for sleep_onset_latency. Look for analysis_type to find the type of analysis. sleep_onset_latency (in minutes) \"\n \"is the detected latency from bedtime_start to the beginning of the first five minutes of persistent sleep.\"\n ),\n (\n \"The analysis result for midpoint_time_of_sleep. Look for analysis_type to find the type of analysis. \"\n \"midpoint_time_of_sleep (in minutes) is the time from the start of sleep to the midpoint of sleep. The midpoint ignores awake periods.\"\n ),\n (\n \"The analysis result for sleep_efficiency. Look for analysis_type to find the type of analysis. \"\n \"sleep_efficiency is the percentage of the sleep period spent asleep (100% * sleep duration / time in bed).\"\n ),\n (\n \"The analysis result for average_heart_rate. Look for analysis_type to find the type of analysis. \"\n \"average_heart_rate is the average heart rate registered during the sleep period.\"\n ),\n (\n \"The analysis result for minimum_heart_rate. Look for analysis_type to find the type of analysis. \"\n \"minimum_heart_rate is the lowest heart rate (5 minutes sliding average) registered during the sleep period.\"\n ),\n (\n \"The analysis result for rmssd. Look for analysis_type to find the type of analysis. \"\n \"rmssd is the average Root Mean Square of Successive Differences (RMSSD) registered during the sleep period.\"\n ),\n (\n \"The analysis result for average_breathing_rate. Look for analysis_type to find the type of analysis. \"\n \"average_breathing_rate is the average breathing rate registered during the sleep period.\"\n ),\n (\n \"The analysis result for temperature_variation. Look for analysis_type to find the type of analysis. \"\n \"temperature_variation is the skin temperature deviation from the long-term temperature average.\"\n ),\n ]\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = True\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n df = pd.read_json(\n StringIO(inputs[0][\"data\"].strip()), orient=\"records\"\n )\n analysis_type = inputs[1].strip()\n if analysis_type == \"average\":\n df = df.drop(\"date\", axis=1) # No average for date!\n df = df.mean().to_frame().T\n elif analysis_type == \"trend\":\n df = self._calculate_slope(df)\n else:\n raise ValueError(\n \"The input analysis type has not been defined!\"\n )\n df = df.round(2)\n json_out = df.to_json(orient=\"records\")\n return json_out" }, { "identifier": "SleepGet", "path": "tasks/affect/sleep_get.py", "snippet": "class SleepGet(Affect):\r\n \"\"\"\r\n **Description:**\r\n\r\n This tasks gets sleep affect data for specific patient.\r\n \"\"\"\r\n\r\n name: str = \"affect_sleep_get\"\r\n chat_name: str = \"AffectSleepGet\"\r\n description: str = (\r\n \"Get the sleep parameters for a specific date or \"\r\n \"a period (if two dates are provided). \"\r\n \"You must Call $affect_sleep_analysis$ whenever sleep \"\r\n \"analysis (e.g., 'average' or 'trend') is needed. DON'T rely on your analysis\"\r\n )\r\n dependencies: List[str] = []\r\n inputs: List[str] = [\r\n \"user ID in string. It can be refered as user, patient, individual, etc. Start with 'par_' following with a number (e.g., 'par_1').\",\r\n \"start date of the sleep data in string with the following format: '%Y-%m-%d'\",\r\n (\r\n \"end date of the sleep data in string with the following format: '%Y-%m-%d'. \"\r\n \"If there is no end date, the value should be an empty string (i.e., '')\"\r\n ),\r\n ]\r\n outputs: List[str] = [\r\n \"total_sleep_time (in minutes) is Total amount of sleep (a.k.a. sleep duration) registered during the sleep period.\",\r\n \"awake_duration (in minutes) is the total amount of awake time registered during the sleep period.\",\r\n \"light_sleep_duration (in minutes) is the total amount of light (N1 or N2) sleep registered during the sleep period.\",\r\n \"rem_sleep_duration (in minutes) is the total amount of REM sleep registered during the sleep period.\",\r\n \"deep_sleep_duration (in minutes) is the total amount of deep (N3) sleep registered during the sleep period.\",\r\n \"sleep_onset_latency (in minutes) is detected latency from bedtime_start to the beginning of the first five minutes of persistent sleep.\",\r\n \"midpoint_time_of_sleep (in minutes) is the time from the start of sleep to the midpoint of sleep. The midpoint ignores awake periods.\",\r\n \"sleep_efficiency is the percentage of the sleep period spent asleep (100% * sleep duration / time in bed).\",\r\n \"average_heart_rate is the average heart rate registered during the sleep period.\",\r\n \"minimum_heart_rate is the lowest heart rate (5 minutes sliding average) registered during the sleep period.\",\r\n \"rmssd is the average Root Mean Square of Successive Differences (RMSSD) registered during the sleep period.\",\r\n \"average_breathing_rate is the average breathing rate registered during the sleep period.\",\r\n \"temperature_variation is the skin temperature deviation from the long-term temperature average.\",\r\n ]\r\n # False if the output should directly passed back to the planner.\r\n # True if it should be stored in datapipe\r\n output_type: bool = True\r\n #\r\n file_name: str = \"sleep.csv\"\r\n device_name: str = \"oura\"\r\n local_dir: str = \"data/affect\"\r\n columns_to_keep: List[str] = [\r\n \"date\",\r\n \"total\",\r\n \"awake\",\r\n \"light\",\r\n \"rem\",\r\n \"deep\",\r\n \"onset_latency\",\r\n \"midpoint_time\",\r\n \"efficiency\",\r\n \"hr_average\",\r\n \"hr_lowest\",\r\n \"rmssd\",\r\n \"breath_average\",\r\n \"temperature_delta\",\r\n ]\r\n columns_revised: List[str] = [\r\n \"date\",\r\n \"total_sleep_time\",\r\n \"awake_duration\",\r\n \"light_sleep_duration\",\r\n \"rem_sleep_duration\",\r\n \"deep_sleep_duration\",\r\n \"sleep_onset_latency\",\r\n \"midpoint_time_of_sleep\",\r\n \"sleep_efficiency\",\r\n \"average_heart_rate\",\r\n \"minimum_heart_rate\",\r\n \"rmssd\",\r\n \"average_breathing_rate\",\r\n \"temperature_variation\",\r\n ]\r\n variables_in_seconds: List[str] = [\r\n \"total_sleep_time\",\r\n \"awake_duration\",\r\n \"light_sleep_duration\",\r\n \"rem_sleep_duration\",\r\n \"deep_sleep_duration\",\r\n \"sleep_onset_latency\",\r\n \"midpoint_time_of_sleep\",\r\n ]\r\n\r\n def _execute(\r\n self,\r\n inputs: List[Any],\r\n ) -> str:\r\n user_id = inputs[0].strip()\r\n full_dir = os.path.join(\r\n self.local_dir, user_id, self.device_name\r\n )\r\n df = self._get_data(\r\n local_dir=full_dir,\r\n file_name=self.file_name,\r\n start_date=inputs[1].strip(),\r\n end_date=inputs[2].strip(),\r\n usecols=self.columns_to_keep,\r\n )\r\n df.columns = self.columns_revised\r\n df = self._convert_seconds_to_minutes(\r\n df, self.variables_in_seconds\r\n )\r\n df = df.round(2)\r\n json_out = df.to_json(orient=\"records\")\r\n return json_out\r" }, { "identifier": "AskUser", "path": "tasks/ask_user.py", "snippet": "class AskUser(BaseTask):\n \"\"\"\n **Description:**\n\n This task is asking question back to the user and stops planning. When needed, the planner will decide to ask question from user\n and use the user's answer to proceed to the planning.\n\n \"\"\"\n\n name: str = \"ask_user\"\n chat_name: str = \"AskUser\"\n description: str = (\n \"Ask user to provide more information or directly answer user's question. \"\n \"You should try your best using other tools before calling this tool.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"The text returned to user. It should be relevant and very detailed based on the latest user's Question.\"\n ]\n outputs: List[str] = []\n output_type: bool = False\n return_direct: bool = True\n\n translator: Any = None #: :meta private:\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"Translate query\"\"\"\n if inputs is None:\n return \"\"\n return inputs[0]\n\n def explain(\n self,\n ) -> str:\n return \"This task simply asks user to provide more information or continue interaction.\"" }, { "identifier": "GoogleTranslate", "path": "tasks/google_translator.py", "snippet": "class GoogleTranslate(BaseTask):\n \"\"\"\n **Description:**\n\n This task uses google translate to autmatically convert from the user language to english or vise versa.\n\n \"\"\"\n\n name: str = \"google_translator\"\n chat_name: str = \"GoogleTranslator\"\n description: str = (\n \"Translates queries between different languages.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"text to be translated\",\n \"destination language\",\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n translator: Any = None #: :meta private:\n\n @model_validator(mode=\"before\")\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"\n Validate that api key and python package exists in environment.\n\n Args:\n cls (object): The class itself.\n values (Dict): The dictionary containing the values for validation.\n Return:\n Dict:The original values.\n Raise:\n ImportError: If the 'playwright' package is not installed.\n\n\n \"\"\"\n\n try:\n from googletrans import Translator\n\n values[\"translator\"] = Translator()\n except ImportError:\n raise ValueError(\n \"Could not import googletrans python package. \"\n \"Please install it with `pip install googletrans-py`.\"\n )\n return values\n\n def _parse_input(\n self,\n input_args: str,\n ) -> List[str]:\n \"\"\"\n Parse the input string into a list of strings.\n\n Args:\n input (str): Input string to be parsed.\n Return:\n List[str]: List of parsed strings.\n\n \"\"\"\n return input_args.split(\"$#\")\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task.\n\n Args:\n input (str): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n \"\"\"\n if len(inputs) < 2:\n return \"\", \"\"\n dest = inputs[1] if inputs[1] is not None else \"en\"\n result = self.translator.translate(inputs[0], dest=dest)\n return result.text, result.src\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n \"\"\"\n\n return \"This task uses google translate to translate between languages\"" }, { "identifier": "Click", "path": "tasks/playwright/click.py", "snippet": "class Click(BaseBrowser):\n \"\"\"\n **Description:**\n\n This code defines a class named Click that inherits from the BaseBrowser class.\n The Click class represents a task related to browser interactions, specifically clicking on an element\n identified by a CSS selector using the Playwright library.\n\n \"\"\"\n\n name: str = \"click\"\n chat_name: str = \"Clicker\"\n description: str = (\n \"Click on an element with the given CSS selector\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\"CSS selector for the element to click\"]\n outputs: List[str] = []\n output_type: bool = False\n\n def _selector_effective(self, selector: str) -> str:\n \"\"\"\n Get the effective CSS selector considering visibility.\n\n Args:\n selector (str): The original CSS selector.\n Return:\n str: The effective CSS selector.\n\n \"\"\"\n\n if not self.visible_only:\n return selector\n return f\"{selector} >> visible=1\"\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the click task by clicking on an element with the provided CSS selector.\n\n Aegs:\n input (str): The input string containing the CSS selector.\n Return:\n str: A message indicating the success or failure of the click operation.\n\n \"\"\"\n selector = inputs[0]\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n # Navigate to the desired webpage before using this tool\n selector_effective = self._selector_effective(\n selector=selector\n )\n from playwright.sync_api import (\n TimeoutError as PlaywrightTimeoutError,\n )\n\n try:\n page.click(\n selector_effective,\n strict=self.playwright_strict,\n timeout=self.playwright_timeout,\n )\n except PlaywrightTimeoutError:\n return f\"Unable to click on element '{selector}'\"\n return f\"Clicked element '{selector}'\"\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Explain the purpose of the click task.\n\n Return:\n str: A brief explanation of the task.\n\n \"\"\"\n\n return \"This task clicks on an element in an specific url\"" }, { "identifier": "CurrentWebPage", "path": "tasks/playwright/current_page.py", "snippet": "class CurrentWebPage(BaseBrowser):\n \"\"\"\n **Description:**\n\n This code defines a class named CurrentWebPage that inherits from the BaseBrowser class.\n The CurrentWebPage class represents a task related to browser interactions, specifically retrieving the URL of the current web page.\n\n \"\"\"\n\n name: str = \"current_page\"\n chat_name: str = \"CurrentPage\"\n description: str = \"Returns the URL of the current page\"\n dependencies: List[str] = []\n inputs: List[str] = []\n outputs: List[str] = []\n output_type: bool = False\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n This method executes the task by retrieving the current page from the synchronous browser using\n the get_current_page function and returning its URL.\n\n Args:\n input (str): The input string (not used in this task).\n Return:\n str: The URL of the current web page.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n return str(page.url)\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provides a brief explanation of the current_page task.\n\n Return:\n str: An explanation of the task.\n\n \"\"\"\n\n return \"This task returns the ulr of the current page.\"" }, { "identifier": "ExtractHyperlinks", "path": "tasks/playwright/extract_hyperlinks.py", "snippet": "class ExtractHyperlinks(BaseBrowser):\n \"\"\"\n **Description:**\n\n This task extracts all hyperlinks from the current webpage.\n \"\"\"\n\n name: str = \"extract_hyperlinks\"\n chat_name: str = \"ExtractHyperLinks\"\n description: str = \"Extract all hyperlinks on the current webpage\"\n dependencies: List[str] = []\n inputs: List[str] = [\n \"Boolean: True/False. Return absolute URLs instead of relative URLs.\"\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n @model_validator(mode=\"before\")\n def check_bs_import(cls, values: dict) -> dict:\n \"\"\"\n Check that the arguments are valid.\n\n Args:\n values (Dict): The current attribute values.\n Return:\n Dict: The updated attribute values.\n Raise:\n ImportError: If 'beautifulsoup4' package is not installed.\n\n \"\"\"\n\n try:\n from bs4 import BeautifulSoup # noqa: F401\n except ImportError:\n raise ImportError(\n \"The 'beautifulsoup4' package is required to use this tool.\"\n \" Please install it with 'pip install beautifulsoup4'.\"\n )\n return values\n\n @staticmethod\n def scrape_page(\n page: Any, html_content: str, absolute_urls: bool\n ) -> str:\n \"\"\"\n Scrape hyperlinks from the current webpage.\n\n Args:\n page (Any): The current webpage.\n html_content (str): The HTML content of the webpage.\n absolute_urls (bool): True if absolute URLs should be returned, False otherwise.\n Return:\n str: JSON string containing the extracted hyperlinks.\n\n\n \"\"\"\n\n from urllib.parse import urljoin\n from bs4 import BeautifulSoup\n\n # Parse the HTML content with BeautifulSoup\n soup = BeautifulSoup(html_content, \"lxml\")\n\n # Find all the anchor elements and extract their href attributes\n anchors = soup.find_all(\"a\")\n if absolute_urls:\n base_url = page.url\n links = [\n urljoin(base_url, anchor.get(\"href\", \"\"))\n for anchor in anchors\n ]\n else:\n links = [anchor.get(\"href\", \"\") for anchor in anchors]\n # Return the list of links as a JSON string\n return json.dumps(links)\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the ExtractHyperlinks task.\n\n Args:\n input (str): The input parameter for the task.\n Return:\n str: JSON string containing the extracted hyperlinks.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n html_content = page.content()\n return self.scrape_page(page, html_content, inputs[0])\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a brief explanation of the ExtractHyperlinks task.\n\n Return:\n str: An explanation of the task.\n\n\n \"\"\"\n\n return \"This task extracts all of the hyperlinks.\"" }, { "identifier": "ExtractText", "path": "tasks/playwright/extract_text.py", "snippet": "class ExtractText(BaseBrowser):\n \"\"\"\n **Description:**\n\n This task extracts all the text from the current webpage.\n \"\"\"\n\n name: str = \"extract_text\"\n chat_name: str = \"ExtractText\"\n description: str = \"Extract all the text on the current webpage\"\n dependencies: List[str] = [\"navigate\"]\n inputs: List[str] = [\"url to navigate to\"]\n outputs: List[str] = []\n output_type: bool = False\n\n @model_validator(mode=\"before\")\n def check_acheck_bs_importrgs(cls, values: dict) -> dict:\n \"\"\"\n Check that the arguments are valid.\n\n Args:\n values (Dict): The current attribute values.\n Return:\n Dict: The updated attribute values.\n Raise:\n ImportError: If 'beautifulsoup4' or 'lxml' packages are not installed.\n\n \"\"\"\n\n try:\n from bs4 import BeautifulSoup # noqa: F401\n except ImportError:\n raise ImportError(\n \"The 'beautifulsoup4' package is required to use this tool.\"\n \" Please install it with 'pip install beautifulsoup4'.\"\n )\n\n try:\n import lxml # noqa: F401\n except ImportError:\n raise ImportError(\n \"The 'lxml' package is required to use this tool.\"\n \" Please install it with 'pip install lxml'.\"\n )\n return values\n\n def validate_url(self, url):\n \"\"\"\n This method validates a given URL by checking if its scheme is either 'http' or 'https'.\n\n Args:\n url (str): The URL to be validated.\n Return:\n str: The validated URL.\n Raise:\n ValueError: If the URL scheme is not 'http' or 'https'.\n\n\n \"\"\"\n\n parsed_url = urlparse(url)\n if parsed_url.scheme not in (\"http\", \"https\"):\n raise ValueError(\"URL scheme must be 'http' or 'https'\")\n return url\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the ExtractText task.\n\n Args:\n input (str): The input parameter for the task.\n Return:\n str: The extracted text from the current webpage.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n \"\"\"\n from bs4 import BeautifulSoup\n\n self.validate_url(inputs[0].strip())\n\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n\n page = get_current_page(self.sync_browser)\n response = page.goto(inputs[0])\n status = response.status if response else \"unknown\"\n\n if status == 200:\n html_content = page.content()\n # Parse the HTML content with BeautifulSoup\n soup = BeautifulSoup(html_content, \"lxml\")\n\n return \" \".join(text for text in soup.stripped_strings)\n else:\n return (\n \"Error extracting text. The url is wrong. Try again.\"\n )\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Explain the ExtractText task.\n\n Return:\n str: A brief explanation of the ExtractText task.\n\n\n \"\"\"\n\n return \"This task returns the ulr of the current page.\"" }, { "identifier": "GetElements", "path": "tasks/playwright/get_elements.py", "snippet": "class GetElements(BaseBrowser):\n \"\"\"\n **Description:**\n\n The GetElements class is a subclass of BaseBrowser responsible for retrieving elements\n on the current web page that match a given CSS selector.\n \"\"\"\n\n name: str = \"get_elements\"\n chat_name: str = \"GetElements\"\n description: str = \"Retrieve elements in the current web page matching the given CSS selector\"\n dependencies: List[str] = []\n inputs: List[str] = [\n \"CSS selector, such as '*', 'div', 'p', 'a', #id, .classname\",\n \"Set of attributes to retrieve for each element\",\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n def _get_elements(\n page: SyncPage, selector: str, attributes: Sequence[str]\n ) -> List[dict]:\n \"\"\"\n Get elements matching the given CSS selector.\n\n Args:\n page (SyncPage): The current page.\n selector (str): CSS selector to match elements.\n attributes (Sequence[str]): Set of attributes to retrieve for each element.\n Return:\n List[dict]: A list of dictionaries containing the retrieved elements and their attributes.\n\n\n \"\"\"\n\n elements = page.query_selector_all(selector)\n results = []\n for element in elements:\n result = {}\n for attribute in attributes:\n if attribute == \"innerText\":\n val: Optional[str] = element.inner_text()\n else:\n val = element.get_attribute(attribute)\n if val is not None and val.strip() != \"\":\n result[attribute] = val\n if result:\n results.append(result)\n return results\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the GetElements task.\n\n Args:\n input (str): Input string containing CSS selector and attributes.\n Return:\n str: The JSON-formatted string containing the retrieved elements and their attributes.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n # Navigate to the desired webpage before using this tool\n results = self._get_elements(page, inputs[0], inputs[1])\n return json.dumps(results, ensure_ascii=False)\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Explain the GetElements task.\n\n Return:\n str: A brief explanation of the GetElements task.\n\n \"\"\"\n\n return \"This task gets the elements.\"" }, { "identifier": "Navigate", "path": "tasks/playwright/navigate.py", "snippet": "class Navigate(BaseBrowser):\n \"\"\"\n **Description:**\n\n This class represents a browser navigation task to a specified URL using Playwright.\n \"\"\"\n\n name: str = \"navigate\"\n chat_name: str = \"Navigate\"\n description: str = \"Navigate a browser to the specified URL\"\n dependencies: List[str] = []\n inputs: List[str] = [\"url to navigate to\"]\n outputs: List[str] = []\n output_type: bool = False\n\n def validate_url(self, url):\n \"\"\"\n This method validates a given URL by checking if its scheme is either 'http' or 'https'.\n\n Args:\n url (str): The URL to be validated.\n Return:\n str: The validated URL.\n Raise:\n ValueError: If the URL scheme is not 'http' or 'https'.\n\n \"\"\"\n\n parsed_url = urlparse(url)\n if parsed_url.scheme not in (\"http\", \"https\"):\n raise ValueError(\"URL scheme must be 'http' or 'https'\")\n return url\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n This method executes the navigation action in the browser using Playwright.\n\n Args:\n input (str): The input string containing the URL to navigate to.\n Return:\n str: A message indicating whether the navigation was successful, including the URL and status code if successful,\n or an error message if unsuccessful.\n\n \"\"\"\n self.validate_url(inputs[0].strip())\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n response = page.goto(inputs[0])\n status = response.status if response else \"unknown\"\n return (\n f\"Navigating to {inputs[0]} returned status code {status}\"\n )\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n This method provides an explanation of the task.\n\n Return:\n str: A brief explanation of the task, in this case, \"This task extracts all of the hyperlinks.\"\n\n \"\"\"\n\n return \"This task extracts all of the hyperlinks.\"" }, { "identifier": "NavigateBack", "path": "tasks/playwright/navigate_back.py", "snippet": "class NavigateBack(BaseBrowser):\n \"\"\"\n **Description:**\n\n This class represents a browser navigation task using Playwright.\n \"\"\"\n\n name: str = \"navigate_back\"\n chat_name: str = \"NavigateBack\"\n description: str = (\n \"Navigate back to the previous page in the browser history\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\"url to navigate to\"]\n outputs: List[str] = []\n output_type: bool = False\n\n def validate_url(self, url):\n \"\"\"\n This method validates a given URL by checking if its scheme is either 'http' or 'https'.\n\n Args:\n url (str): The URL to be validated.\n Return:\n str: The validated URL.\n Raise:\n ValueError: If the URL scheme is not 'http' or 'https'.\n\n \"\"\"\n\n parsed_url = urlparse(url)\n if parsed_url.scheme not in (\"http\", \"https\"):\n raise ValueError(\"URL scheme must be 'http' or 'https'\")\n return url\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n This method executes the navigation back action in the browser using Playwright.\n\n Args:\n input (str): The input string containing the URL to navigate to.\n Return:\n str: A message indicating whether the navigation was successful, including the URL and status code if successful,\n or an error message if unsuccessful.\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n response = page.go_back()\n\n if response:\n return (\n f\"Navigated back to the previous page with URL '{response.url}'.\"\n f\" Status code {response.status}\"\n )\n else:\n return \"Unable to navigate back; no previous page in the history\"\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n This method provides an explanation of the task.\n\n Return:\n str: A brief explanation of the task, in this case, \"This task extracts all of the hyperlinks.\"\n\n\n \"\"\"\n\n return \"This task extracts all of the hyperlinks.\"" }, { "identifier": "ReadDataPipe", "path": "tasks/read_from_datapipe.py", "snippet": "class ReadDataPipe(BaseTask):\n \"\"\"\n **Description:**\n\n This code reads raw data stored in datapipe. When different tasks are executed, there are situations that the final data is stored\n in the datapipe when the final called task's output_type=True. In these situations, this task is called to retireve the latest stored data\n to be used for final inference.\n \"\"\"\n\n name: str = \"read_from_datapipe\"\n chat_name: str = \"DataPipeReader\"\n description: str = (\n \"Get the stored information from datapipe to be used to answer user query accurately. \"\n \"This should be called when the final answer is in datapipe.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"the datapipe key in the format $datapipe:key$\"\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"\n This simply retrieves data from datapipe.\n\n Args:\n inputs (List[Any]): The datapipe key\n Return:\n str: The raw data along with the instructions.\n\n \"\"\"\n if len(inputs) == 0:\n return \"\"\n return (\n \"The data along with the description for each data is provided. \"\n \"Use the data and description to provide a detailed answer regarding the user query.\\n\\n\"\n + json.dumps(inputs[0])\n )\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide an explanation of the task.\n\n Return:\n str: Explanation of the SerpAPI task.\n\n \"\"\"\n return \"This task is to read data from datapipe.\"" }, { "identifier": "SerpAPI", "path": "tasks/serpapi.py", "snippet": "class SerpAPI(BaseTask):\n \"\"\"\n **Description:**\n\n This code defines a class named SerpAPI, which is a specific implementation of the abstract BaseTask class.\n The SerpAPI class represents a task that utilizes the SerpAPI (Google Search API) to perform internet searches\n and retrieve relevant information.\n\n \"\"\"\n\n name: str = \"serpapi\"\n chat_name: str = \"InternetSearchSerp\"\n description: str = (\n \"A low-cost Google Search API.\"\n \"Useful for when you need to answer questions about current events.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\"It should be a search query.\"]\n outputs: List[str] = []\n output_type: bool = False\n\n search_engine: Any = None #: :meta private:\n params: Dict = Field(\n default={\n \"engine\": \"google\",\n \"google_domain\": \"google.com\",\n \"gl\": \"us\",\n \"hl\": \"en\",\n }\n )\n serpapi_api_key: Optional[str] = None\n aiosession: Optional[aiohttp.ClientSession] = None\n\n @model_validator(mode=\"before\")\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"\n Validate that api key and python package exists in environment.\n\n Args:\n values (Dict): The dictionary of attribute values.\n Return:\n Dict: The updated dictionary of attribute values.\n Raise:\n ValueError: If the SerpAPI python package is not installed.\n\n \"\"\"\n\n serpapi_api_key = get_from_dict_or_env(\n values, \"serpapi_api_key\", \"SERPAPI_API_KEY\"\n )\n values[\"serpapi_api_key\"] = serpapi_api_key\n try:\n from serpapi import GoogleSearch\n\n values[\"search_engine\"] = GoogleSearch\n except ImportError:\n raise ValueError(\n \"Could not import serpapi python package. \"\n \"Please install it with `pip install google-search-results`.\"\n )\n return values\n\n def get_params(self, query: str) -> Dict[str, str]:\n \"\"\"\n Get parameters for SerpAPI.\n\n Args:\n query (str): The search query.\n Return:\n Dict[str, str]: The parameters for the SerpAPI.\n\n\n \"\"\"\n\n _params = {\n \"api_key\": self.serpapi_api_key,\n \"q\": query,\n }\n params = {**self.params, **_params}\n return params\n\n def results(self, query: str) -> Dict:\n \"\"\"\n Run query through SerpAPI and return the raw result.\n\n Args:\n query (str): The search query.\n Return:\n Dict: The raw result from the SerpAPI.\n\n\n \"\"\"\n\n params = self.get_params(query)\n search = self.search_engine(params)\n res = search.get_dict()\n return res\n\n @staticmethod\n def _process_response(res: Dict) -> str:\n \"\"\"\n Process response from SerpAPI.\n\n Args:\n res (Dict): The raw response from the SerpAPI.\n Return:\n str: Processed information from the SerpAPI response.\n\n \"\"\"\n\n try:\n if \"answer_box\" in res:\n toret = (\n \"url: \"\n + res[\"answer_box\"][\"link\"]\n + \"\\nmetadata: \"\n + res[\"answer_box\"][\"snippet\"]\n )\n else:\n toret = (\n \"url: \"\n + res[\"organic_results\"][0][\"link\"]\n + \"\\nmetadata: \"\n + res[\"organic_results\"][0][\"snippet\"]\n )\n except KeyError:\n return \"Could not get the proper response from the search. Try another search query.\"\n return toret\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"\n Run query through SerpAPI and parse result.\n\n Args:\n input (str): The input, which should be a search query.\n Return:\n str: The parsed result from the SerpAPI.\n\n\n \"\"\"\n if len(inputs) == 0:\n return \"\"\n return self._process_response(self.results(inputs[0]))\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide an explanation of the task.\n\n Return:\n str: Explanation of the SerpAPI task.\n\n \"\"\"\n\n return (\n \"This task searched in the internet using google search engine, returns the url\"\n \"and the first top result of the google search.\"\n )" }, { "identifier": "BaseTask", "path": "tasks/task.py", "snippet": "class BaseTask(BaseModel):\n \"\"\"\n **Description:**\n\n This class is the base implementation for the Tasks. For every new task that you want to create, you should\n inherit from this class and override the attributes and methods based on your task's need. This class defines a base class named BaseTask.\n This class serves as a foundation for defining common properties and behaviors among various tasks in the system.\n\n Attributes:\n name: The name of the task. It should be unique underscore_case to be defined in TaskType. sample_task_name\n chat_name: This is the name that later will be used if needed to mention the tasks inside the chat with the user.\n It should be Camel Case. SampleTaskChatName\n description: The description of the what specifically the task is doing.\n Try to define it as specific as possible to help the Task Planner decide better.\n dependencies: You can put the name of the TaskTypes that this task is dependent on. For example, in stress detection scenario,\n the stress analysis is dependent on the fetch hrv data task. [TaskType.SERPAPI, TASKTYPE.EXTRACT_TEXT]\n inputs: This is the list of descriptions for the inputs that should be provided by the planner.\n For example if your task has two inputs: [\"the first input description\", \"the second input description\"]\n outputs: This is the list of the description of the outputs that the task returns.\n This helps the planner to understand the returned results better and use it as needed.\n For example, if the task returns a list of sleep hours for different sleep states,\n the description helps planner learn which number is related to what state.\n output_type: This indicates if the task result should be stored in the DataPipe or be returned directly to the planner.\n This process will be done in the parse_input and post_execute methods. If needed you can overwrite them.\n return_direct: This indicates if this task should completely interrupt the planning process or not.\n This is needed in cases like when you want to ask a question from user and no further\n planning is needed until the user gives the proper answer (look at ask_user task)\n \"\"\"\n\n name: str\n chat_name: str\n description: str\n dependencies: List[str] = []\n inputs: List[str] = []\n outputs: List[str] = []\n datapipe: DataPipe = None\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = False\n # False if planner should continue. True if after this task the planning should be\n # on pause or stop. examples are when you have a task that asks user to provide more information\n return_direct: bool = False\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def name(self):\n return self.name\n\n @property\n def dependencies(self):\n return self.dependencies\n\n @property\n def inputs(self):\n return \", \".join(\n [\n f\"{str(i)}-{input}\"\n for i, input in enumerate(self.inputs)\n ]\n )\n\n @abstractmethod\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task. You should implement this method based on your need.\n This method is called by the **execute** method that provides the parsed inputs to this method.\n\n Args:\n inputs (List[Any]): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n \"\"\"\n\n def _parse_input(\n self,\n input_args: str,\n ) -> List[str]:\n \"\"\"\n Parses the input string into a list of strings. If the input is in format `datapipe:key`,\n the parser will retrieve the data from datapipe before sending it over to the **_execute** method.\n\n Args:\n input_args (str): Input string provided by planner. It should be parsed and return a list of str variables.\n Return:\n List[str]: List of parsed strings. These strings can be converted into desired types inside **_execute** method.\n\n\n \"\"\"\n inputs = input_args.split(\",\")\n return [\n json.loads(\n self.datapipe.retrieve(\n re.search(r\"datapipe:[0-9a-f\\-]{36}\", arg)\n .group()\n .strip()\n .split(\":\")[-1]\n )\n )\n if \"datapipe\" in arg\n else arg.strip()\n for arg in inputs\n ]\n\n def _post_execute(self, result: str = \"\"):\n \"\"\"\n This method is called inside **execute** method after calling **_execute**. The result of **_execute** will be passed to this method\n in case the **output_type** attribute is True, the result will be stored inside the datapipe and the datapipe key is returned to\n the plenner instead of the raw result. This is good practice for times that you have intermediate data (like sleep data over a month)\n and it needs to be passed over to other tasks and the raw result is not immidiately needed.\n This will save a huge amount of tokens and makes sure that the planner will not pass wrong raw data to the tasks.\n\n It is important to note that to make the **DataPipe's** stored data standard and unified, we store the data in the json string\n format that currently contains 'data' and 'description' keys. The 'data' will be the returned data after execution and the 'description'\n is created using the **outputs** attribute of the task. Whenever the raw data is returned to the planner, these **outputs** descriptions\n will help the planner understand and learn how to interpret the 'data' to generate the final answer or continue planning.\n\n Args:\n result (str): string containig the task result.\n Return:\n List[str]: List of parsed strings.\n\n \"\"\"\n if self.output_type:\n key = self.datapipe.store(\n json.dumps(\n {\n \"data\": result,\n \"description\": \",\".join(self.outputs),\n }\n )\n )\n return (\n f\"The result of the tool {self.name} is stored in the datapipe with key: $datapipe:{key}$\"\n \" pass this key to other tools to access to the result or call read_from_datapipe to get the raw data.\"\n )\n return result\n\n def execute(self, input_args: str) -> str:\n \"\"\"\n This method is called by the **Orchestrator** which provides the planner provided inputs.\n This method first calls **_parse_input** to parse the inputs and retrieve needed data from the **DataPipe**\n Then **_execute** is called and the parsed inputs are given to this method. Finally the final result of execution is passed to\n **_post_execute** and ith will either be stored inside **DataPipe** or directly returned to the planner to continue planning.\n\n Args:\n input_args (str): Input string provided by planner.\n Return:\n str: The final result of the task execution.\n\n \"\"\"\n inputs = self._parse_input(input_args)\n result = self._execute(inputs)\n return self._post_execute(result)\n\n def get_dict(self) -> str:\n \"\"\"\n Generate a dictionary-like representation of the task.\n\n Return:\n str: String representation of the task dictionary.\n\n\n \"\"\"\n inputs = \",\".join(\n f\"input{i+1}-{word}\" for i, word in enumerate(self.inputs)\n )\n dependencies = \",\".join(\n f\"{i+1}-{word}\"\n for i, word in enumerate(self.dependencies)\n )\n prompt = (\n f\"tool name:{self.name}, description: {self.description}.\"\n )\n if len(self.inputs) > 0:\n prompt += f\"The input to this tool should be comma separated list of data representing: {inputs}\"\n if len(self.dependencies) > 0:\n prompt += f\"\\nThis tool is dependent on the following tools. make sure these tools are called first: '{dependencies}'\"\n # prompt += \"\\n\"\n return prompt\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n\n \"\"\"\n\n return \"\"\"\n Sample Explanation\n \"\"\"" }, { "identifier": "TaskType", "path": "tasks/task_types.py", "snippet": "class TaskType(str, Enum):\n SERPAPI = \"serpapi\"\n CLICK = \"click\"\n GET_CURRENT_PAGE = \"current_page\"\n EXTRACT_HYPERLINKS = \"extract_hyperlinks\"\n EXTRACT_TEXT = \"extract_text\"\n GET_ELEMENTS = \"get_elements\"\n NAVIGATE_BACK = \"navigate_back\"\n NAVIGATE = \"navigate\"\n AFFECT_SLEEP_GET = \"affect_sleep_get\"\n AFFECT_ACTIVITY_GET = \"affect_activity_get\"\n AFFECT_SLEEP_ANALYSIS = \"affect_sleep_analysis\"\n AFFECT_ACTIVITY_ANALYSIS = \"affect_activity_analysis\"\n GOOGLE_TRANSLATE = \"google_translate\"\n ASK_USER = \"ask_user\"\n READ_FROM_DATAPIPE = \"read_from_datapipe\"\n TEST_FILE = \"test_file\"" }, { "identifier": "TestFile", "path": "tasks/test_file.py", "snippet": "class TestFile(BaseTask):\n name: str = \"test_file\"\n chat_name: str = \"TestFile\"\n description: str = \"analyzes the image and returns description.\"\n dependencies: List[str] = []\n inputs: List[str] = [\"the image file name\"]\n outputs: List[str] = []\n output_type: bool = False\n return_direct: bool = True\n\n translator: Any = None #: :meta private:\n\n def parse_input(\n self,\n input: str,\n ) -> List[str]:\n \"\"\"\n Parse the input string into a list of strings.\n\n Args:\n input (str): Input string to be parsed.\n Return:\n List[str]: List of parsed strings.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n return input.split(\"$#\")\n\n def execute(\n self,\n input: str,\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task.\n\n Args:\n input (str): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n self.parse_input(input)\n return \"this image is a classification results of a data\"\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n return \"This task simply asks user to provide more information or continue interaction.\"" } ]
from typing import Dict from typing import Type from tasks.affect import ActivityAnalysis from tasks.affect import ActivityGet from tasks.affect import SleepAnalysis from tasks.affect import SleepGet from tasks.ask_user import AskUser from tasks.google_translator import GoogleTranslate from tasks.playwright import Click from tasks.playwright import CurrentWebPage from tasks.playwright import ExtractHyperlinks from tasks.playwright import ExtractText from tasks.playwright import GetElements from tasks.playwright import Navigate from tasks.playwright import NavigateBack from tasks.read_from_datapipe import ReadDataPipe from tasks.serpapi import SerpAPI from tasks.task import BaseTask from tasks.task_types import TaskType from tasks.test_file import TestFile
14,037
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText, TaskType.GET_ELEMENTS: GetElements, TaskType.NAVIGATE_BACK: NavigateBack, TaskType.NAVIGATE: Navigate, TaskType.AFFECT_SLEEP_GET: SleepGet, TaskType.AFFECT_ACTIVITY_GET: ActivityGet, TaskType.AFFECT_SLEEP_ANALYSIS: SleepAnalysis, TaskType.AFFECT_ACTIVITY_ANALYSIS: ActivityAnalysis, TaskType.GOOGLE_TRANSLATE: GoogleTranslate, TaskType.ASK_USER: AskUser,
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText, TaskType.GET_ELEMENTS: GetElements, TaskType.NAVIGATE_BACK: NavigateBack, TaskType.NAVIGATE: Navigate, TaskType.AFFECT_SLEEP_GET: SleepGet, TaskType.AFFECT_ACTIVITY_GET: ActivityGet, TaskType.AFFECT_SLEEP_ANALYSIS: SleepAnalysis, TaskType.AFFECT_ACTIVITY_ANALYSIS: ActivityAnalysis, TaskType.GOOGLE_TRANSLATE: GoogleTranslate, TaskType.ASK_USER: AskUser,
TaskType.TEST_FILE: TestFile,
17
2023-12-02 05:10:44+00:00
16k
Czm369/MixPL
mmdet/models/dense_heads/atss_vlfusion_head.py
[ { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "cat_boxes", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def cat_boxes(data_list: List[Union[Tensor, BaseBoxes]],\n dim: int = 0) -> Union[Tensor, BaseBoxes]:\n \"\"\"Concatenate boxes with type of tensor or box type.\n\n Args:\n data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors\n or box types need to be concatenated.\n dim (int): The dimension over which the box are concatenated.\n Defaults to 0.\n\n Returns:\n Union[Tensor, :obj`BaseBoxes`]: Concatenated results.\n \"\"\"\n if data_list and isinstance(data_list[0], BaseBoxes):\n return data_list[0].cat(data_list, dim=dim)\n else:\n return torch.cat(data_list, dim=dim)" }, { "identifier": "reduce_mean", "path": "mmdet/utils/dist_utils.py", "snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor" }, { "identifier": "InstanceList", "path": "mmdet/utils/typing_utils.py", "snippet": "" }, { "identifier": "filter_scores_and_topk", "path": "mmdet/models/utils/misc.py", "snippet": "def filter_scores_and_topk(scores, score_thr, topk, results=None):\n \"\"\"Filter results using score threshold and topk candidates.\n\n Args:\n scores (Tensor): The scores, shape (num_bboxes, K).\n score_thr (float): The score filter threshold.\n topk (int): The number of topk candidates.\n results (dict or list or Tensor, Optional): The results to\n which the filtering rule is to be applied. The shape\n of each item is (num_bboxes, N).\n\n Returns:\n tuple: Filtered results\n\n - scores (Tensor): The scores after being filtered, \\\n shape (num_bboxes_filtered, ).\n - labels (Tensor): The class labels, shape \\\n (num_bboxes_filtered, ).\n - anchor_idxs (Tensor): The anchor indexes, shape \\\n (num_bboxes_filtered, ).\n - filtered_results (dict or list or Tensor, Optional): \\\n The filtered results. The shape of each item is \\\n (num_bboxes_filtered, N).\n \"\"\"\n valid_mask = scores > score_thr\n scores = scores[valid_mask]\n valid_idxs = torch.nonzero(valid_mask)\n\n num_topk = min(topk, valid_idxs.size(0))\n # torch.sort is actually faster than .topk (at least on GPUs)\n scores, idxs = scores.sort(descending=True)\n scores = scores[:num_topk]\n topk_idxs = valid_idxs[idxs[:num_topk]]\n keep_idxs, labels = topk_idxs.unbind(dim=1)\n\n filtered_results = None\n if results is not None:\n if isinstance(results, dict):\n filtered_results = {k: v[keep_idxs] for k, v in results.items()}\n elif isinstance(results, list):\n filtered_results = [result[keep_idxs] for result in results]\n elif isinstance(results, torch.Tensor):\n filtered_results = results[keep_idxs]\n else:\n raise NotImplementedError(f'Only supports dict or list or Tensor, '\n f'but get {type(results)}.')\n return scores, labels, keep_idxs, filtered_results" }, { "identifier": "select_single_mlvl", "path": "mmdet/models/utils/misc.py", "snippet": "def select_single_mlvl(mlvl_tensors, batch_id, detach=True):\n \"\"\"Extract a multi-scale single image tensor from a multi-scale batch\n tensor based on batch index.\n\n Note: The default value of detach is True, because the proposal gradient\n needs to be detached during the training of the two-stage model. E.g\n Cascade Mask R-CNN.\n\n Args:\n mlvl_tensors (list[Tensor]): Batch tensor for all scale levels,\n each is a 4D-tensor.\n batch_id (int): Batch index.\n detach (bool): Whether detach gradient. Default True.\n\n Returns:\n list[Tensor]: Multi-scale single image tensor.\n \"\"\"\n assert isinstance(mlvl_tensors, (list, tuple))\n num_levels = len(mlvl_tensors)\n\n if detach:\n mlvl_tensor_list = [\n mlvl_tensors[i][batch_id].detach() for i in range(num_levels)\n ]\n else:\n mlvl_tensor_list = [\n mlvl_tensors[i][batch_id] for i in range(num_levels)\n ]\n return mlvl_tensor_list" }, { "identifier": "unpack_gt_instances", "path": "mmdet/models/utils/misc.py", "snippet": "def unpack_gt_instances(batch_data_samples: SampleList) -> tuple:\n \"\"\"Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based\n on ``batch_data_samples``\n\n Args:\n batch_data_samples (List[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n Returns:\n tuple:\n\n - batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n - batch_gt_instances_ignore (list[:obj:`InstanceData`]):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n - batch_img_metas (list[dict]): Meta information of each image,\n e.g., image size, scaling factor, etc.\n \"\"\"\n batch_gt_instances = []\n batch_gt_instances_ignore = []\n batch_img_metas = []\n for data_sample in batch_data_samples:\n batch_img_metas.append(data_sample.metainfo)\n batch_gt_instances.append(data_sample.gt_instances)\n if 'ignored_instances' in data_sample:\n batch_gt_instances_ignore.append(data_sample.ignored_instances)\n else:\n batch_gt_instances_ignore.append(None)\n\n return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas" }, { "identifier": "BertEncoderLayer", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "class BertEncoderLayer(BertPreTrainedModel):\n \"\"\"A modified version of the `BertLayer` class from the\n `transformers.models.bert.modeling_bert` module.\n\n Args:\n config (:class:`~transformers.BertConfig`):\n The configuration object that\n contains various parameters for the model.\n clamp_min_for_underflow (bool, optional):\n Whether to clamp the minimum value of the hidden states\n to prevent underflow. Defaults to `False`.\n clamp_max_for_overflow (bool, optional):\n Whether to clamp the maximum value of the hidden states\n to prevent overflow. Defaults to `False`.\n \"\"\"\n\n def __init__(self,\n config: BertConfig,\n clamp_min_for_underflow: bool = False,\n clamp_max_for_overflow: bool = False):\n super().__init__(config)\n self.config = config\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n\n self.attention = BertAttention(config, clamp_min_for_underflow,\n clamp_max_for_overflow)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self, inputs: Dict[str, Dict[str, torch.Tensor]]\n ) -> Dict[str, Dict[str, torch.Tensor]]:\n \"\"\"Applies the BertEncoderLayer to the input features.\"\"\"\n language_dict_features = inputs['lang']\n hidden_states = language_dict_features['hidden']\n attention_mask = language_dict_features['masks']\n\n device = hidden_states.device\n input_shape = hidden_states.size()[:-1]\n extended_attention_mask = self.get_extended_attention_mask(\n attention_mask, input_shape, device)\n\n self_attention_outputs = self.attention(\n hidden_states,\n extended_attention_mask,\n None,\n output_attentions=False,\n past_key_value=None)\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:]\n layer_output = apply_chunking_to_forward(self.feed_forward_chunk,\n self.chunk_size_feed_forward,\n self.seq_len_dim,\n attention_output)\n outputs = (layer_output, ) + outputs\n hidden_states = outputs[0]\n\n language_dict_features['hidden'] = hidden_states\n\n features_dict = {\n 'visual': inputs['visual'],\n 'lang': language_dict_features\n }\n\n return features_dict\n\n def feed_forward_chunk(self, attention_output: Tensor) -> Tensor:\n \"\"\"Applies the intermediate and output layers of the BertEncoderLayer\n to a chunk of the input sequence.\"\"\"\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output" }, { "identifier": "VLFuse", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "class VLFuse(nn.Module):\n \"\"\"Early Fusion Module.\n\n Args:\n v_dim (int): Dimension of visual features.\n l_dim (int): Dimension of language features.\n embed_dim (int): The embedding dimension for the attention operation.\n num_heads (int): Number of attention heads.\n dropout (float): Dropout probability.\n drop_path (float): Drop path probability.\n use_checkpoint (bool): Whether to use PyTorch's checkpoint function.\n \"\"\"\n\n def __init__(self,\n v_dim: int = 256,\n l_dim: int = 768,\n embed_dim: int = 2048,\n num_heads: int = 8,\n dropout: float = 0.1,\n drop_path: float = 0.0,\n use_checkpoint: bool = False):\n super().__init__()\n self.use_checkpoint = use_checkpoint\n self.b_attn = BiAttentionBlock(\n v_dim=v_dim,\n l_dim=l_dim,\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=dropout,\n drop_path=drop_path,\n init_values=1.0 / 6.0)\n\n def forward(self, x: dict) -> dict:\n \"\"\"Forward pass of the VLFuse module.\"\"\"\n visual_features = x['visual']\n language_dict_features = x['lang']\n\n if self.use_checkpoint:\n # vf is mean visual_features\n # checkpoint does not allow complex data structures as input,\n # such as list, so we must split them.\n vf0, vf1, vf2, vf3, vf4, language_features = checkpoint.checkpoint(\n self.b_attn, *visual_features,\n language_dict_features['hidden'],\n language_dict_features['masks'])\n else:\n vf0, vf1, vf2, vf3, vf4, language_features = self.b_attn(\n *visual_features, language_dict_features['hidden'],\n language_dict_features['masks'])\n\n language_dict_features['hidden'] = language_features\n fused_language_dict_features = language_dict_features\n\n features_dict = {\n 'visual': [vf0, vf1, vf2, vf3, vf4],\n 'lang': fused_language_dict_features\n }\n\n return features_dict" }, { "identifier": "permute_and_flatten", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "def permute_and_flatten(layer: Tensor, N: int, A: int, C: int, H: int,\n W: int) -> Tensor:\n \"\"\"Permute and then flatten a tensor,\n\n from size (N, A, C, H, W) to (N, H * W * A, C).\n\n Args:\n layer (Tensor): Tensor of shape (N, C, H, W).\n N (int): Batch size.\n A (int): Number of attention heads.\n C (int): Number of channels.\n H (int): Height of feature map.\n W (int): Width of feature map.\n\n Returns:\n Tensor: A Tensor of shape (N, H * W * A, C).\n \"\"\"\n layer = layer.view(N, A, C, H, W)\n layer = layer.permute(0, 3, 4, 1, 2)\n layer = layer.reshape(N, -1, C)\n return layer" }, { "identifier": "MAX_CLAMP_VALUE", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "MAX_CLAMP_VALUE = 50000" }, { "identifier": "ATSSHead", "path": "mmdet/models/dense_heads/atss_head.py", "snippet": "class ATSSHead(AnchorHead):\n \"\"\"Detection Head of `ATSS <https://arxiv.org/abs/1912.02424>`_.\n\n ATSS head structure is similar with FCOS, however ATSS use anchor boxes\n and assign label by Adaptive Training Sample Selection instead max-iou.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n pred_kernel_size (int): Kernel size of ``nn.Conv2d``\n stacked_convs (int): Number of stacking convs of the head.\n conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n convolution layer. Defaults to None.\n norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n layer. Defaults to ``dict(type='GN', num_groups=32,\n requires_grad=True)``.\n reg_decoded_bbox (bool): If true, the regression loss would be\n applied directly on decoded bounding boxes, converting both\n the predicted boxes and regression targets to absolute\n coordinates format. Defaults to False. It should be `True` when\n using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n loss_centerness (:obj:`ConfigDict` or dict): Config of centerness loss.\n Defaults to ``dict(type='CrossEntropyLoss', use_sigmoid=True,\n loss_weight=1.0)``.\n init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n list[:obj:`ConfigDict`]): Initialization config dict.\n \"\"\"\n\n def __init__(self,\n num_classes: int,\n in_channels: int,\n pred_kernel_size: int = 3,\n stacked_convs: int = 4,\n conv_cfg: OptConfigType = None,\n norm_cfg: ConfigType = dict(\n type='GN', num_groups=32, requires_grad=True),\n reg_decoded_bbox: bool = True,\n loss_centerness: ConfigType = dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n init_cfg: MultiConfig = dict(\n type='Normal',\n layer='Conv2d',\n std=0.01,\n override=dict(\n type='Normal',\n name='atss_cls',\n std=0.01,\n bias_prob=0.01)),\n **kwargs) -> None:\n self.pred_kernel_size = pred_kernel_size\n self.stacked_convs = stacked_convs\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n super().__init__(\n num_classes=num_classes,\n in_channels=in_channels,\n reg_decoded_bbox=reg_decoded_bbox,\n init_cfg=init_cfg,\n **kwargs)\n\n self.sampling = False\n self.loss_centerness = MODELS.build(loss_centerness)\n\n def _init_layers(self) -> None:\n \"\"\"Initialize layers of the head.\"\"\"\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually a tuple of classification scores and bbox prediction\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n \"\"\"\n return multi_apply(self.forward_single, x, self.scales)\n\n def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]:\n \"\"\"Forward feature of a single scale level.\n\n Args:\n x (Tensor): Features of a single scale level.\n scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n the bbox prediction.\n\n Returns:\n tuple:\n cls_score (Tensor): Cls scores for a single scale level\n the channels number is num_anchors * num_classes.\n bbox_pred (Tensor): Box energies / deltas for a single scale\n level, the channels number is num_anchors * 4.\n centerness (Tensor): Centerness for a single scale level, the\n channel number is (N, num_anchors * 1, H, W).\n \"\"\"\n cls_feat = x\n reg_feat = x\n for cls_conv in self.cls_convs:\n cls_feat = cls_conv(cls_feat)\n for reg_conv in self.reg_convs:\n reg_feat = reg_conv(reg_feat)\n cls_score = self.atss_cls(cls_feat)\n # we just follow atss, not apply exp in bbox_pred\n bbox_pred = scale(self.atss_reg(reg_feat)).float()\n centerness = self.atss_centerness(reg_feat)\n return cls_score, bbox_pred, centerness\n\n def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,\n bbox_pred: Tensor, centerness: Tensor,\n labels: Tensor, label_weights: Tensor,\n bbox_targets: Tensor, avg_factor: float) -> dict:\n \"\"\"Calculate the loss of a single scale level based on the features\n extracted by the detection head.\n\n Args:\n cls_score (Tensor): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W).\n bbox_pred (Tensor): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W).\n anchors (Tensor): Box reference for each scale level with shape\n (N, num_total_anchors, 4).\n labels (Tensor): Labels of each anchors with shape\n (N, num_total_anchors).\n label_weights (Tensor): Label weights of each anchor with shape\n (N, num_total_anchors)\n bbox_targets (Tensor): BBox regression targets of each anchor with\n shape (N, num_total_anchors, 4).\n avg_factor (float): Average factor that is used to average\n the loss. When using sampling method, avg_factor is usually\n the sum of positive and negative priors. When using\n `PseudoSampler`, `avg_factor` is usually equal to the number\n of positive priors.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n\n anchors = anchors.reshape(-1, 4)\n cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n -1, self.cls_out_channels).contiguous()\n bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n centerness = centerness.permute(0, 2, 3, 1).reshape(-1)\n bbox_targets = bbox_targets.reshape(-1, 4)\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n\n # classification loss\n loss_cls = self.loss_cls(\n cls_score, labels, label_weights, avg_factor=avg_factor)\n\n # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n bg_class_ind = self.num_classes\n pos_inds = ((labels >= 0)\n & (labels < bg_class_ind)).nonzero().squeeze(1)\n\n if len(pos_inds) > 0:\n pos_bbox_targets = bbox_targets[pos_inds]\n pos_bbox_pred = bbox_pred[pos_inds]\n pos_anchors = anchors[pos_inds]\n pos_centerness = centerness[pos_inds]\n\n centerness_targets = self.centerness_target(\n pos_anchors, pos_bbox_targets)\n pos_decode_bbox_pred = self.bbox_coder.decode(\n pos_anchors, pos_bbox_pred)\n\n # regression loss\n loss_bbox = self.loss_bbox(\n pos_decode_bbox_pred,\n pos_bbox_targets,\n weight=centerness_targets,\n avg_factor=1.0)\n\n # centerness loss\n loss_centerness = self.loss_centerness(\n pos_centerness, centerness_targets, avg_factor=avg_factor)\n\n else:\n loss_bbox = bbox_pred.sum() * 0\n loss_centerness = centerness.sum() * 0\n centerness_targets = bbox_targets.new_tensor(0.)\n\n return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()\n\n def loss_by_feat(\n self,\n cls_scores: List[Tensor],\n bbox_preds: List[Tensor],\n centernesses: List[Tensor],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n \"\"\"Calculate the loss based on the features extracted by the detection\n head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n centernesses (list[Tensor]): Centerness for each scale\n level with shape (N, num_anchors * 1, H, W)\n batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n batch_img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]\n assert len(featmap_sizes) == self.prior_generator.num_levels\n\n device = cls_scores[0].device\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, batch_img_metas, device=device)\n\n cls_reg_targets = self.get_targets(\n anchor_list,\n valid_flag_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n bbox_weights_list, avg_factor) = cls_reg_targets\n avg_factor = reduce_mean(\n torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n losses_cls, losses_bbox, loss_centerness, \\\n bbox_avg_factor = multi_apply(\n self.loss_by_feat_single,\n anchor_list,\n cls_scores,\n bbox_preds,\n centernesses,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n avg_factor=avg_factor)\n\n bbox_avg_factor = sum(bbox_avg_factor)\n bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()\n losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n return dict(\n loss_cls=losses_cls,\n loss_bbox=losses_bbox,\n loss_centerness=loss_centerness)\n\n def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor:\n \"\"\"Calculate the centerness between anchors and gts.\n\n Only calculate pos centerness targets, otherwise there may be nan.\n\n Args:\n anchors (Tensor): Anchors with shape (N, 4), \"xyxy\" format.\n gts (Tensor): Ground truth bboxes with shape (N, 4), \"xyxy\" format.\n\n Returns:\n Tensor: Centerness between anchors and gts.\n \"\"\"\n anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2\n anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2\n l_ = anchors_cx - gts[:, 0]\n t_ = anchors_cy - gts[:, 1]\n r_ = gts[:, 2] - anchors_cx\n b_ = gts[:, 3] - anchors_cy\n\n left_right = torch.stack([l_, r_], dim=1)\n top_bottom = torch.stack([t_, b_], dim=1)\n centerness = torch.sqrt(\n (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *\n (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))\n assert not torch.isnan(centerness).any()\n return centerness\n\n def get_targets(self,\n anchor_list: List[List[Tensor]],\n valid_flag_list: List[List[Tensor]],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None,\n unmap_outputs: bool = True) -> tuple:\n \"\"\"Get targets for ATSS head.\n\n This method is almost the same as `AnchorHead.get_targets()`. Besides\n returning the targets as the parent method does, it also returns the\n anchors as the first element of the returned tuple.\n \"\"\"\n num_imgs = len(batch_img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list[i] = torch.cat(anchor_list[i])\n valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n # compute targets for each image\n if batch_gt_instances_ignore is None:\n batch_gt_instances_ignore = [None] * num_imgs\n (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n all_bbox_weights, pos_inds_list, neg_inds_list,\n sampling_results_list) = multi_apply(\n self._get_targets_single,\n anchor_list,\n valid_flag_list,\n num_level_anchors_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore,\n unmap_outputs=unmap_outputs)\n # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n # When using sampling method, avg_factor is usually the sum of\n # positive and negative priors. When using `PseudoSampler`,\n # `avg_factor` is usually equal to the number of positive priors.\n avg_factor = sum(\n [results.avg_factor for results in sampling_results_list])\n # split targets to a list w.r.t. multiple levels\n anchors_list = images_to_levels(all_anchors, num_level_anchors)\n labels_list = images_to_levels(all_labels, num_level_anchors)\n label_weights_list = images_to_levels(all_label_weights,\n num_level_anchors)\n bbox_targets_list = images_to_levels(all_bbox_targets,\n num_level_anchors)\n bbox_weights_list = images_to_levels(all_bbox_weights,\n num_level_anchors)\n return (anchors_list, labels_list, label_weights_list,\n bbox_targets_list, bbox_weights_list, avg_factor)\n\n def _get_targets_single(self,\n flat_anchors: Tensor,\n valid_flags: Tensor,\n num_level_anchors: List[int],\n gt_instances: InstanceData,\n img_meta: dict,\n gt_instances_ignore: Optional[InstanceData] = None,\n unmap_outputs: bool = True) -> tuple:\n \"\"\"Compute regression, classification targets for anchors in a single\n image.\n\n Args:\n flat_anchors (Tensor): Multi-level anchors of the image, which are\n concatenated into a single tensor of shape (num_anchors ,4)\n valid_flags (Tensor): Multi level valid flags of the image,\n which are concatenated into a single tensor of\n shape (num_anchors,).\n num_level_anchors (List[int]): Number of anchors of each scale\n level.\n gt_instances (:obj:`InstanceData`): Ground truth of instance\n annotations. It usually includes ``bboxes`` and ``labels``\n attributes.\n img_meta (dict): Meta information for current image.\n gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n to be ignored during training. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n unmap_outputs (bool): Whether to map outputs back to the original\n set of anchors.\n\n Returns:\n tuple: N is the number of total anchors in the image.\n labels (Tensor): Labels of all anchors in the image with shape\n (N,).\n label_weights (Tensor): Label weights of all anchor in the\n image with shape (N,).\n bbox_targets (Tensor): BBox targets of all anchors in the\n image with shape (N, 4).\n bbox_weights (Tensor): BBox weights of all anchors in the\n image with shape (N, 4)\n pos_inds (Tensor): Indices of positive anchor with shape\n (num_pos,).\n neg_inds (Tensor): Indices of negative anchor with shape\n (num_neg,).\n sampling_result (:obj:`SamplingResult`): Sampling results.\n \"\"\"\n inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n img_meta['img_shape'][:2],\n self.train_cfg['allowed_border'])\n if not inside_flags.any():\n raise ValueError(\n 'There is no valid anchor inside the image boundary. Please '\n 'check the image size and anchor sizes, or set '\n '``allowed_border`` to -1 to skip the condition.')\n # assign gt and sample anchors\n anchors = flat_anchors[inside_flags, :]\n\n num_level_anchors_inside = self.get_num_level_anchors_inside(\n num_level_anchors, inside_flags)\n pred_instances = InstanceData(priors=anchors)\n assign_result = self.assigner.assign(pred_instances,\n num_level_anchors_inside,\n gt_instances, gt_instances_ignore)\n\n sampling_result = self.sampler.sample(assign_result, pred_instances,\n gt_instances)\n\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n bbox_weights = torch.zeros_like(anchors)\n labels = anchors.new_full((num_valid_anchors, ),\n self.num_classes,\n dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n if self.reg_decoded_bbox:\n pos_bbox_targets = sampling_result.pos_gt_bboxes\n else:\n pos_bbox_targets = self.bbox_coder.encode(\n sampling_result.pos_priors, sampling_result.pos_gt_bboxes)\n\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n\n labels[pos_inds] = sampling_result.pos_gt_labels\n if self.train_cfg['pos_weight'] <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = self.train_cfg['pos_weight']\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of anchors\n if unmap_outputs:\n num_total_anchors = flat_anchors.size(0)\n anchors = unmap(anchors, num_total_anchors, inside_flags)\n labels = unmap(\n labels, num_total_anchors, inside_flags, fill=self.num_classes)\n label_weights = unmap(label_weights, num_total_anchors,\n inside_flags)\n bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n pos_inds, neg_inds, sampling_result)\n\n def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n \"\"\"Get the number of valid anchors in every level.\"\"\"\n\n split_inside_flags = torch.split(inside_flags, num_level_anchors)\n num_level_anchors_inside = [\n int(flags.sum()) for flags in split_inside_flags\n ]\n return num_level_anchors_inside" } ]
import copy import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List, Optional, Sequence, Tuple, Union from mmcv.cnn import Scale from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d from mmengine.config import ConfigDict from mmengine.model import BaseModel from mmengine.structures import InstanceData from torch import Tensor from transformers import BertConfig from mmdet.registry import MODELS from mmdet.structures.bbox import cat_boxes from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..utils import (BertEncoderLayer, VLFuse, filter_scores_and_topk, permute_and_flatten, select_single_mlvl, unpack_gt_instances) from ..utils.vlfuse_helper import MAX_CLAMP_VALUE from .atss_head import ATSSHead
11,366
with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(bbox_preds) == len(score_factors) num_levels = len(bbox_preds) featmap_sizes = [bbox_preds[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] token_positive_maps = batch_token_positive_maps[img_id] bbox_pred_list = select_single_mlvl( bbox_preds, img_id, detach=True) score_factor_list = select_single_mlvl( score_factors, img_id, detach=True) cls_logit_list = select_single_mlvl( cls_logits, img_id, detach=True) results = self._predict_by_feat_single( bbox_pred_list=bbox_pred_list, score_factor_list=score_factor_list, cls_logit_list=cls_logit_list, mlvl_priors=mlvl_priors, token_positive_maps=token_positive_maps, img_meta=img_meta, cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(results) return result_list def _predict_by_feat_single(self, bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], cls_logit_list: List[Tensor], mlvl_priors: List[Tensor], token_positive_maps: dict, img_meta: dict, cfg: ConfigDict, rescale: bool = True, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). cls_logit_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). token_positive_maps (dict): Token positive map. img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) score_thr = cfg.get('score_thr', 0) mlvl_bbox_preds = [] mlvl_valid_priors = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (bbox_pred, score_factor, cls_logit, priors) in \ enumerate(zip(bbox_pred_list, score_factor_list, cls_logit_list, mlvl_priors)): bbox_pred = bbox_pred.permute(1, 2, 0).reshape( -1, self.bbox_coder.encode_size) score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() scores = convert_grounding_to_cls_scores( logits=cls_logit.sigmoid()[None], positive_maps=[token_positive_maps])[0]
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: BertConfig = None def convert_grounding_to_cls_scores(logits: Tensor, positive_maps: List[dict]) -> Tensor: """Convert logits to class scores.""" assert len(positive_maps) == logits.shape[0] # batch size scores = torch.zeros(logits.shape[0], logits.shape[1], len(positive_maps[0])).to(logits.device) if positive_maps is not None: if all(x == positive_maps[0] for x in positive_maps): # only need to compute once positive_map = positive_maps[0] for label_j in positive_map: scores[:, :, label_j - 1] = logits[:, :, torch.LongTensor(positive_map[label_j] )].mean(-1) else: for i, positive_map in enumerate(positive_maps): for label_j in positive_map: scores[i, :, label_j - 1] = logits[ i, :, torch.LongTensor(positive_map[label_j])].mean(-1) return scores class Conv3x3Norm(nn.Module): """Conv3x3 and norm.""" def __init__(self, in_channels: int, out_channels: int, stride: int, groups: int = 1, use_dcn: bool = False, norm_type: Optional[Union[Sequence, str]] = None): super().__init__() if use_dcn: self.conv = ModulatedDeformConv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) if isinstance(norm_type, Sequence): assert len(norm_type) == 2 assert norm_type[0] == 'gn' gn_group = norm_type[1] norm_type = norm_type[0] if norm_type == 'bn': bn_op = nn.BatchNorm2d(out_channels) elif norm_type == 'gn': bn_op = nn.GroupNorm( num_groups=gn_group, num_channels=out_channels) if norm_type is not None: self.bn = bn_op else: self.bn = None def forward(self, x, **kwargs): x = self.conv(x, **kwargs) if self.bn: x = self.bn(x) return x class DyReLU(nn.Module): """Dynamic ReLU.""" def __init__(self, in_channels: int, out_channels: int, expand_ratio: int = 4): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.expand_ratio = expand_ratio self.out_channels = out_channels self.fc = nn.Sequential( nn.Linear(in_channels, in_channels // expand_ratio), nn.ReLU(inplace=True), nn.Linear(in_channels // expand_ratio, out_channels * self.expand_ratio), nn.Hardsigmoid(inplace=True)) def forward(self, x) -> Tensor: x_out = x b, c, h, w = x.size() x = self.avg_pool(x).view(b, c) x = self.fc(x).view(b, -1, 1, 1) a1, b1, a2, b2 = torch.split(x, self.out_channels, dim=1) a1 = (a1 - 0.5) * 2 + 1.0 a2 = (a2 - 0.5) * 2 b1 = b1 - 0.5 b2 = b2 - 0.5 out = torch.max(x_out * a1 + b1, x_out * a2 + b2) return out class DyConv(nn.Module): """Dynamic Convolution.""" def __init__(self, conv_func: Callable, in_channels: int, out_channels: int, use_dyfuse: bool = True, use_dyrelu: bool = False, use_dcn: bool = False): super().__init__() self.dyconvs = nn.ModuleList() self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 2)) if use_dyfuse: self.attnconv = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, 1, kernel_size=1), nn.ReLU(inplace=True)) self.h_sigmoid = nn.Hardsigmoid(inplace=True) else: self.attnconv = None if use_dyrelu: self.relu = DyReLU(in_channels, out_channels) else: self.relu = nn.ReLU() if use_dcn: self.offset = nn.Conv2d( in_channels, 27, kernel_size=3, stride=1, padding=1) else: self.offset = None self.init_weights() def init_weights(self): for m in self.dyconvs.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() if self.attnconv is not None: for m in self.attnconv.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() def forward(self, inputs: dict) -> dict: visual_feats = inputs['visual'] out_vis_feats = [] for level, feature in enumerate(visual_feats): offset_conv_args = {} if self.offset is not None: offset_mask = self.offset(feature) offset = offset_mask[:, :18, :, :] mask = offset_mask[:, 18:, :, :].sigmoid() offset_conv_args = dict(offset=offset, mask=mask) temp_feats = [self.dyconvs[1](feature, **offset_conv_args)] if level > 0: temp_feats.append(self.dyconvs[2](visual_feats[level - 1], **offset_conv_args)) if level < len(visual_feats) - 1: temp_feats.append( F.upsample_bilinear( self.dyconvs[0](visual_feats[level + 1], **offset_conv_args), size=[feature.size(2), feature.size(3)])) mean_feats = torch.mean( torch.stack(temp_feats), dim=0, keepdim=False) if self.attnconv is not None: attn_feat = [] res_feat = [] for feat in temp_feats: res_feat.append(feat) attn_feat.append(self.attnconv(feat)) res_feat = torch.stack(res_feat) spa_pyr_attn = self.h_sigmoid(torch.stack(attn_feat)) mean_feats = torch.mean( res_feat * spa_pyr_attn, dim=0, keepdim=False) out_vis_feats.append(mean_feats) out_vis_feats = [self.relu(item) for item in out_vis_feats] features_dict = {'visual': out_vis_feats, 'lang': inputs['lang']} return features_dict class VLFusionModule(BaseModel): """Visual-lang Fusion Module.""" def __init__(self, in_channels: int, feat_channels: int, num_base_priors: int, early_fuse: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', use_dyrelu: bool = True, use_dyfuse: bool = True, use_dcn: bool = True, use_checkpoint: bool = False, **kwargs) -> None: super().__init__(**kwargs) if BertConfig is None: raise RuntimeError( 'transformers is not installed, please install it by: ' 'pip install transformers.') self.in_channels = in_channels self.feat_channels = feat_channels self.num_base_priors = num_base_priors self.early_fuse = early_fuse self.num_dyhead_blocks = num_dyhead_blocks self.use_dyrelu = use_dyrelu self.use_dyfuse = use_dyfuse self.use_dcn = use_dcn self.use_checkpoint = use_checkpoint self.lang_cfg = BertConfig.from_pretrained(lang_model_name) self.lang_dim = self.lang_cfg.hidden_size self._init_layers() def _init_layers(self) -> None: """Initialize layers of the model.""" bias_value = -math.log((1 - 0.01) / 0.01) dyhead_tower = [] for i in range(self.num_dyhead_blocks): if self.early_fuse: # cross-modality fusion dyhead_tower.append(VLFuse(use_checkpoint=self.use_checkpoint)) # lang branch dyhead_tower.append( BertEncoderLayer( self.lang_cfg, clamp_min_for_underflow=True, clamp_max_for_overflow=True)) # vision branch dyhead_tower.append( DyConv( lambda i, o, s: Conv3x3Norm( i, o, s, use_dcn=self.use_dcn, norm_type=['gn', 16]), self.in_channels if i == 0 else self.feat_channels, self.feat_channels, use_dyrelu=(self.use_dyrelu and self.in_channels == self.feat_channels) if i == 0 else self.use_dyrelu, use_dyfuse=(self.use_dyfuse and self.in_channels == self.feat_channels) if i == 0 else self.use_dyfuse, use_dcn=(self.use_dcn and self.in_channels == self.feat_channels) if i == 0 else self.use_dcn, )) self.add_module('dyhead_tower', nn.Sequential(*dyhead_tower)) self.bbox_pred = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, kernel_size=1) self.centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, kernel_size=1) self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual)) dot_product_proj_queries = permute_and_flatten( visual, B, self.num_base_priors, C, H, W) bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat( 1, self.num_base_priors, 1) dot_product_logit = ( torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2)) / self.log_scale.exp()) + bias dot_product_logit = torch.clamp( dot_product_logit, max=MAX_CLAMP_VALUE) dot_product_logit = torch.clamp( dot_product_logit, min=-MAX_CLAMP_VALUE) cls_logits.append(dot_product_logit) return bbox_preds, centerness, cls_logits @MODELS.register_module() class ATSSVLFusionHead(ATSSHead): """ATSS head with visual-language fusion module. Args: early_fuse (bool): Whether to fuse visual and language features Defaults to False. use_checkpoint (bool): Whether to use checkpoint. Defaults to False. num_dyhead_blocks (int): Number of dynamic head blocks. Defaults to 6. lang_model_name (str): Name of the language model. Defaults to 'bert-base-uncased'. """ def __init__(self, *args, early_fuse: bool = False, use_checkpoint: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', init_cfg=None, **kwargs): super().__init__(*args, **kwargs, init_cfg=init_cfg) self.head = VLFusionModule( in_channels=self.in_channels, feat_channels=self.feat_channels, num_base_priors=self.num_base_priors, early_fuse=early_fuse, use_checkpoint=use_checkpoint, num_dyhead_blocks=num_dyhead_blocks, lang_model_name=lang_model_name) self.text_masks = None def _init_layers(self) -> None: """No need to initialize the ATSS head layer.""" pass def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple[Tensor]: """Forward function.""" bbox_preds, centerness, cls_logits = self.head(visual_feats, language_feats) return cls_logits, bbox_preds, centerness def loss(self, visual_feats: Tuple[Tensor], language_feats: dict, batch_data_samples): outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs outs = self(visual_feats, language_feats) self.text_masks = language_feats['masks'] loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) centernesses (list[Tensor]): Centerness for each scale level with shape (N, num_anchors * 1, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() anchors = torch.cat(anchor_list, dim=1) labels = torch.cat(labels_list, dim=1) label_weights = torch.cat(label_weights_list, dim=1) bbox_targets = torch.cat(bbox_targets_list, dim=1) cls_scores = torch.cat(cls_scores, dim=1) centernesses_ = [] bbox_preds_ = [] for bbox_pred, centerness in zip(bbox_preds, centernesses): centernesses_.append( centerness.permute(0, 2, 3, 1).reshape(cls_scores.size(0), -1, 1)) bbox_preds_.append( bbox_pred.permute(0, 2, 3, 1).reshape(cls_scores.size(0), -1, 4)) bbox_preds = torch.cat(bbox_preds_, dim=1) centernesses = torch.cat(centernesses_, dim=1) losses_cls, losses_bbox, loss_centerness, bbox_avg_factor = \ self._loss_by_feat( anchors, cls_scores, bbox_preds, centernesses, labels, label_weights, bbox_targets, avg_factor=avg_factor) bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item() losses_bbox = losses_bbox / bbox_avg_factor return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_centerness=loss_centerness) def _loss_by_feat(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, centerness: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, avg_factor: float) -> dict: """Calculate the loss of all scale level based on the features extracted by the detection head. Returns: dict[str, Tensor]: A dictionary of loss components. """ anchors = anchors.reshape(-1, 4) # ===== this change ===== pos_inds = (labels.sum(-1) > 0).reshape(-1) # Loss is not computed for the padded regions of the text. assert (self.text_masks.dim() == 2) text_mask = (self.text_masks > 0).unsqueeze(1) text_mask = text_mask.repeat(1, cls_score.size(1), 1) cls_score = torch.masked_select(cls_score, text_mask).contiguous() labels = torch.masked_select(labels, text_mask) label_weights = label_weights[..., None].repeat(1, 1, text_mask.size(-1)) label_weights = torch.masked_select(label_weights, text_mask) bbox_pred = bbox_pred.reshape(-1, 4) centerness = centerness.reshape(-1) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # classification loss loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor) if pos_inds.sum() > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_centerness = centerness[pos_inds] centerness_targets = self.centerness_target( pos_anchors, pos_bbox_targets) if torch.isnan(centerness_targets).any(): print('=====Centerness includes NaN=====') mask = ~torch.isnan(centerness_targets) centerness_targets = centerness_targets[mask] pos_centerness = pos_centerness[mask] pos_anchors = pos_anchors[mask] pos_bbox_targets = pos_bbox_targets[mask] pos_bbox_pred = pos_bbox_pred[mask] if pos_bbox_targets.shape[0] == 0: loss_bbox = bbox_pred.sum() * 0 loss_centerness = centerness.sum() * 0 centerness_targets = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, loss_centerness, \ centerness_targets.sum() # The decoding process takes the offset into consideration. pos_anchors[:, 2:] += 1 pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchors, pos_bbox_pred) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_bbox_targets, weight=centerness_targets, avg_factor=1.0) # centerness loss loss_centerness = self.loss_centerness( pos_centerness, centerness_targets, avg_factor=avg_factor) else: loss_bbox = bbox_pred.sum() * 0 loss_centerness = centerness.sum() * 0 centerness_targets = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum() def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, num_level_anchors: List[int], gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors (List[int]): Number of anchors of each scale level. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) pos_inds (Tensor): Indices of positive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). sampling_result (:obj:`SamplingResult`): Sampling results. """ anchors = flat_anchors # Align the official implementation anchors[:, 2:] -= 1 num_level_anchors_inside = num_level_anchors pred_instances = InstanceData(priors=anchors) assign_result = self.assigner.assign(pred_instances, num_level_anchors_inside, gt_instances, gt_instances_ignore) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) # ===== this change ===== labels = anchors.new_full((num_valid_anchors, self.feat_channels), 0, dtype=torch.float32) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if self.reg_decoded_bbox: pos_bbox_targets = sampling_result.pos_gt_bboxes else: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_priors, sampling_result.pos_gt_bboxes) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 # ===== this change ===== labels[pos_inds] = gt_instances.positive_maps[ sampling_result.pos_assigned_gt_inds] if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor: """Calculate the centerness between anchors and gts. Only calculate pos centerness targets, otherwise there may be nan. Args: anchors (Tensor): Anchors with shape (N, 4), "xyxy" format. gts (Tensor): Ground truth bboxes with shape (N, 4), "xyxy" format. Returns: Tensor: Centerness between anchors and gts. """ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 l_ = anchors_cx - gts[:, 0] t_ = anchors_cy - gts[:, 1] r_ = gts[:, 2] - anchors_cx b_ = gts[:, 3] - anchors_cy left_right = torch.stack([l_, r_], dim=1) top_bottom = torch.stack([t_, b_], dim=1) centerness = torch.sqrt( (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) # assert not torch.isnan(centerness).any() return centerness def predict(self, visual_feats: Tuple[Tensor], language_feats: dict, batch_data_samples, rescale: bool = True): """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Args: visual_feats (tuple[Tensor]): Multi-level visual features from the upstream network, each is a 4D-tensor. language_feats (dict): Language features from the upstream network. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] batch_token_positive_maps = [ data_samples.token_positive_map for data_samples in batch_data_samples ] outs = self(visual_feats, language_feats) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, batch_token_positive_maps=batch_token_positive_maps, rescale=rescale) return predictions def predict_by_feat(self, cls_logits: List[Tensor], bbox_preds: List[Tensor], score_factors: List[Tensor], batch_img_metas: Optional[List[dict]] = None, batch_token_positive_maps: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_logits (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. batch_token_positive_maps (list[dict], Optional): Batch token positive map. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(bbox_preds) == len(score_factors) num_levels = len(bbox_preds) featmap_sizes = [bbox_preds[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] token_positive_maps = batch_token_positive_maps[img_id] bbox_pred_list = select_single_mlvl( bbox_preds, img_id, detach=True) score_factor_list = select_single_mlvl( score_factors, img_id, detach=True) cls_logit_list = select_single_mlvl( cls_logits, img_id, detach=True) results = self._predict_by_feat_single( bbox_pred_list=bbox_pred_list, score_factor_list=score_factor_list, cls_logit_list=cls_logit_list, mlvl_priors=mlvl_priors, token_positive_maps=token_positive_maps, img_meta=img_meta, cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(results) return result_list def _predict_by_feat_single(self, bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], cls_logit_list: List[Tensor], mlvl_priors: List[Tensor], token_positive_maps: dict, img_meta: dict, cfg: ConfigDict, rescale: bool = True, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). cls_logit_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). token_positive_maps (dict): Token positive map. img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) score_thr = cfg.get('score_thr', 0) mlvl_bbox_preds = [] mlvl_valid_priors = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (bbox_pred, score_factor, cls_logit, priors) in \ enumerate(zip(bbox_pred_list, score_factor_list, cls_logit_list, mlvl_priors)): bbox_pred = bbox_pred.permute(1, 2, 0).reshape( -1, self.bbox_coder.encode_size) score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() scores = convert_grounding_to_cls_scores( logits=cls_logit.sigmoid()[None], positive_maps=[token_positive_maps])[0]
results = filter_scores_and_topk(
4
2023-11-30 08:58:00+00:00
16k
SEU-ProactiveSecurity-Group/MalPurifier
examples/md_nn_test.py
[ { "identifier": "Dataset", "path": "core/defense/dataset.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n def __init__(self, seed=0, device='cuda', feature_ext_args=None):\n \"\"\"\n 为机器学习模型学习构建数据集。\n \n :param seed: 随机种子\n :param device: 设备类型,'cuda' 或 'cpu'\n :param feature_ext_args: 提取特征的参数\n \"\"\"\n \n # 设置随机种子,并确保随机性在不同库之间是一致的\n self.seed = seed\n random.seed(self.seed)\n np.random.seed(self.seed)\n torch.manual_seed(self.seed)\n \n # 设置PyTorch的默认数据类型为float32\n torch.set_default_dtype(torch.float32)\n \n # 初始化简化类的临时数据存储\n self.temp_data = utils.SimplifyClass(Manager())\n \n # 设定使用的设备\n self.device = device\n\n # 根据提供的参数初始化特征提取器\n self.feature_ext_args = feature_ext_args\n if feature_ext_args is None:\n self.feature_extractor = Apk2features(config.get('metadata', 'naive_data_pool'),\n config.get('dataset', 'intermediate'))\n else:\n assert isinstance(feature_ext_args, dict)\n self.feature_extractor = Apk2features(config.get('metadata', 'naive_data_pool'),\n config.get('dataset', 'intermediate'),\n **feature_ext_args)\n\n # 分割数据集为训练、验证和测试集\n data_saving_path = os.path.join(config.get('dataset', 'intermediate'), 'dataset.idx')\n \n # 检查是否已保存了分割数据,且不需要更新\n if os.path.exists(data_saving_path) and (not self.feature_extractor.update):\n (self.train_dataset, self.validation_dataset, self.test_dataset) = utils.read_pickle(data_saving_path)\n\n # # 计算良性和恶意apk的数量\n # benign_train = np.sum(self.train_dataset[1] == 0)\n # malicious_train = np.sum(self.train_dataset[1] == 1)\n\n # benign_val = np.sum(self.validation_dataset[1] == 0)\n # malicious_val = np.sum(self.validation_dataset[1] == 1)\n\n # benign_test = np.sum(self.test_dataset[1] == 0)\n # malicious_test = np.sum(self.test_dataset[1] == 1)\n\n # # 打印数据量\n # total_data = len(self.train_dataset[0]) + len(self.validation_dataset[0]) + len(self.test_dataset[0])\n # print(f\"总数据量: {total_data}\")\n # print(f\"训练数据量: {len(self.train_dataset[0])} (良性: {benign_train}, 恶意: {malicious_train})\")\n # print(f\"验证数据量: {len(self.validation_dataset[0])} (良性: {benign_val}, 恶意: {malicious_val})\")\n # print(f\"测试数据量: {len(self.test_dataset[0])} (良性: {benign_test}, 恶意: {malicious_test})\")\n\n # 更新数据路径\n def path_tran(data_paths):\n return np.array(\n [os.path.join(config.get('metadata', 'naive_data_pool'),\n os.path.splitext(os.path.basename(name))[0] + self.feature_extractor.file_ext) for \n name in data_paths])\n\n self.train_dataset = (path_tran(self.train_dataset[0]), self.train_dataset[1])\n self.validation_dataset = (path_tran(self.validation_dataset[0]), self.validation_dataset[1])\n self.test_dataset = (path_tran(self.test_dataset[0]), self.test_dataset[1])\n else:\n # 预处理恶意软件和良性软件的APK文件,并获取其特征路径\n mal_feature_paths = self.apk_preprocess(config.get('dataset', 'malware_dir'))\n ben_feature_paths = self.apk_preprocess(config.get('dataset', 'benware_dir'))\n feature_paths = mal_feature_paths + ben_feature_paths\n \n # 根据恶意软件和良性软件的数量生成标签\n gt_labels = np.zeros((len(mal_feature_paths) + len(ben_feature_paths)), dtype=np.int32)\n gt_labels[:len(mal_feature_paths)] = 1\n \n # 根据特征路径和标签分割数据\n self.train_dataset, self.validation_dataset, self.test_dataset = self.data_split(feature_paths, gt_labels)\n \n # 保存分割后的数据\n utils.dump_pickle((self.train_dataset, self.validation_dataset, self.test_dataset), data_saving_path)\n\n # 获取特征词汇表和大小\n self.vocab, _1, _2 = self.feature_extractor.get_vocab(*self.train_dataset)\n self.vocab_size = len(self.vocab)\n \n # 获取非API的数量\n self.non_api_size = self.feature_extractor.get_non_api_size(self.vocab)\n \n # 获取类别数量\n self.n_classes = np.unique(self.train_dataset[1]).size\n\n\n def data_split(self, feature_paths, labels):\n \"\"\"\n 将数据分为训练、验证和测试集。\n\n :param feature_paths: 特征文件的路径列表。\n :param labels: 对应的标签列表。\n :return: (训练数据, 训练标签), (验证数据, 验证标签), (测试数据, 测试标签)\n \"\"\"\n \n # 确保特征文件路径数量与标签数量相同\n assert len(feature_paths) == len(labels)\n \n # 初始化训练、验证和测试集的文件名列表为None\n train_dn, validation_dn, test_dn = None, None, None\n \n # 定义数据集切分文件的路径\n data_split_path = os.path.join(config.get('dataset', 'dataset_dir'), 'tr_te_va_split.name')\n \n # 检查数据切分文件是否存在\n if os.path.exists(data_split_path):\n train_dn, val_dn, test_dn = utils.read_pickle(data_split_path)\n\n # 如果任何文件名列表为空\n if (train_dn is None) or (validation_dn is None) or (test_dn is None):\n # 从特征文件路径中提取文件名\n data_names = [os.path.splitext(os.path.basename(path))[0] for path in feature_paths]\n \n # 分割数据为训练和测试集,20%为测试集\n train_dn, test_dn = train_test_split(data_names, test_size=0.2, random_state=self.seed, shuffle=True)\n \n # 从训练集中进一步分割出验证集,25%为验证集\n train_dn, validation_dn = train_test_split(train_dn, test_size=0.25, random_state=self.seed, shuffle=True)\n \n # 将切分结果保存为pickle文件\n utils.dump_pickle((train_dn, validation_dn, test_dn), path=data_split_path)\n\n # 根据提供的文件名列表查询路径\n def query_path(_data_names):\n return np.array(\n [path for path in feature_paths if os.path.splitext(os.path.basename(path))[0] in _data_names])\n\n # 根据提供的文件名列表查询对应的指示器(布尔列表)\n def query_indicator(_data_names):\n return [True if os.path.splitext(os.path.basename(path))[0] in _data_names else False for path in\n feature_paths]\n\n # 查询训练、验证和测试数据的路径\n train_data = query_path(train_dn)\n val_data = query_path(validation_dn)\n test_data = query_path(test_dn)\n \n # 为确保数据与标签一致,随机打乱训练数据和标签\n random.seed(self.seed)\n random.shuffle(train_data)\n train_y = labels[query_indicator(train_dn)]\n random.seed(self.seed)\n random.shuffle(train_y)\n \n # 查询训练、验证和测试数据的标签\n val_y = labels[query_indicator(validation_dn)]\n test_y = labels[query_indicator(test_dn)]\n \n # 返回切分的数据和标签\n return (train_data, train_y), (val_data, val_y), (test_data, test_y)\n\n\n def apk_preprocess(self, apk_paths, labels=None, update_feature_extraction=False):\n \"\"\"\n APK 文件的预处理。\n \n :param apk_paths: APK文件路径列表。\n :param labels: APK文件对应的标签列表,可以为None。\n :param update_feature_extraction: 是否更新特征提取器的状态。\n :return: 处理后的特征路径,和可选的标签。\n \"\"\"\n \n # 保存特征提取器的当前更新状态\n old_status = self.feature_extractor.update\n \n # 将特征提取器的更新状态设置为提供的参数值\n self.feature_extractor.update = update_feature_extraction\n \n # 如果没有提供标签\n if labels is None:\n # 使用特征提取器从apk_paths中提取特征\n feature_paths = self.feature_extractor.feature_extraction(apk_paths)\n \n # 恢复特征提取器的原始状态\n self.feature_extractor.update = old_status\n \n # 返回特征路径\n return feature_paths\n else:\n # 确保apk文件的数量与标签的数量相匹配\n assert len(apk_paths) == len(labels), \\\n '不匹配的数据形状 {} vs. {}'.format(len(apk_paths), len(labels))\n \n # 使用特征提取器从apk_paths中提取特征\n feature_paths = self.feature_extractor.feature_extraction(apk_paths)\n \n labels_ = []\n for i, feature_path in enumerate(feature_paths):\n # 获取不带扩展名的文件名\n fname = os.path.splitext(os.path.basename(feature_path))[0]\n \n # 确保当前文件名在对应的apk路径中\n if fname in apk_paths[i]:\n # 添加对应的标签到labels_列表中\n labels_.append(labels[i])\n \n # 恢复特征提取器的原始状态\n self.feature_extractor.update = old_status\n \n # 返回特征路径和对应的标签\n return feature_paths, np.array(labels_)\n\n\n def feature_preprocess(self, feature_paths):\n raise NotImplementedError\n # self.feature_extractor.update_cg(feature_paths)\n\n\n def feature_api_rpst_sum(self, api_feat_representation_list):\n \"\"\"\n 对API表示进行求和\n :param api_feat_representation_list: 一个稀疏矩阵列表\n \"\"\"\n \n # 确保输入是一个列表\n assert isinstance(api_feat_representation_list, list), \"期望输入是一个列表。\"\n \n # 如果列表不为空\n if len(api_feat_representation_list) > 0:\n # 确保列表中的第一个元素是 csr_matrix 类型的稀疏矩阵\n assert isinstance(api_feat_representation_list[0], csr_matrix)\n else:\n # 如果列表为空,则返回一个全为0的矩阵\n return np.zeros(shape=(self.vocab_size - self.non_api_size, self.vocab_size - self.non_api_size),\n dtype=np.float)\n \n # 将第一个稀疏矩阵转为密集型矩阵,并转换为浮点类型\n adj_array = np.asarray(api_feat_representation_list[0].todense()).astype(np.float32)\n \n # 遍历列表中的其余稀疏矩阵\n for sparse_mat in api_feat_representation_list[1:]:\n # 将稀疏矩阵转为密集型矩阵,转换为浮点类型,并与之前的结果进行相加\n adj_array += np.asarray(sparse_mat.todense()).astype(np.float32)\n \n # 将最终结果中的所有值限制在[0,1]之间\n return np.clip(adj_array, a_min=0, a_max=1)\n\n\n def get_numerical_input(self, feature_path, label):\n \"\"\"\n loading features for given a feature path\n # results:\n # --->> mapping feature path to numerical representations\n # --->> features: 1d array, and a list of sparse matrices\n # --->> label: scalar\n \"\"\"\n feature_vector, label = self.feature_extractor.feature2ipt(feature_path, label,\n self.vocab,\n None)\n return feature_vector, label\n\n\n def get_input_producer(self, feature_paths, y, batch_size, name='train', use_cache=False):\n \"\"\"\n 获取输入生产器,返回一个 DataLoader 对象。\n \n :param feature_paths: 特征路径列表。\n :param y: 标签。\n :param batch_size: 每个批次的数据数量。\n :param name: 使用场景名称,默认为'train'。\n :param use_cache: 是否使用缓存,默认为False。\n :return: 返回一个 DataLoader 对象。\n \"\"\"\n \n # 定义 DataLoader 的参数\n params = {\n 'batch_size': batch_size,\n 'num_workers': self.feature_ext_args['proc_number'],\n 'shuffle': False\n }\n \n # 如果是训练过程,则使用用户设定的缓存值;否则,不使用缓存\n use_cache = use_cache if name == 'train' else False\n \n # 创建 DataLoader,它会使用自定义的 DatasetTorch 数据集对象\n # worker_init_fn 参数用于为每个工作线程设定一个随机种子,确保数据的打乱是随机的\n return torch.utils.data.DataLoader(\n DatasetTorch(feature_paths, y, self, name=name, use_cache=use_cache),\n worker_init_fn=lambda x: np.random.seed(torch.randint(0, 2**31, [1,])[0] + x),\n **params\n )\n\n\n def clear_up(self):\n self.temp_data.reset()\n\n @staticmethod\n def get_modification(adv_x, x, idx, sp=True):\n # 确认adv_x和x是numpy.ndarray类型或torch.Tensor类型的实例\n assert isinstance(adv_x, (np.ndarray, torch.Tensor))\n assert isinstance(x, (np.ndarray, torch.Tensor))\n \n # 计算对抗样本和原始样本之间的差异\n x_mod = adv_x - x\n \n # 根据索引idx选择对应的元素\n if isinstance(x_mod, np.ndarray):\n x_mod = np.array([x_mod[i, idx[i]] for i in range(x.shape[0])])\n else:\n x_mod = torch.stack([x_mod[i, idx[i]] for i in range(x.shape[0])])\n \n # 判断是否需要转为稀疏表示\n if sp:\n # 如果x_mod是torch.Tensor,那么将其转换为稀疏表示并移到cpu上\n # 如果x_mod是numpy.ndarray,那么先将其转换为torch.Tensor,然后转换为稀疏表示并移到cpu上\n if isinstance(x_mod, torch.Tensor):\n return x_mod.to_sparse().cpu().unbind(dim=0)\n else:\n return torch.tensor(x_mod, dtype=torch.int).to_sparse().cpu().unbind(dim=0)\n else:\n # 如果不需要转为稀疏表示,那么直接将其移到cpu上或者分割为numpy数组\n if isinstance(x_mod, torch.Tensor):\n return x_mod.cpu().unbind(dim=0)\n else:\n return np.split(x_mod, x_mod.shape[0], axis=0)\n\n\n @staticmethod\n def modification_integ(x_mod_integrated, x_mod):\n # 确认x_mod_integrated和x_mod是列表类型的实例\n assert isinstance(x_mod_integrated, list) and isinstance(x_mod, list)\n \n # 如果x_mod_integrated为空列表,则返回x_mod\n if len(x_mod_integrated) == 0:\n return x_mod\n \n # 确认x_mod_integrated和x_mod的长度相同\n assert len(x_mod_integrated) == len(x_mod)\n \n # 遍历x_mod和x_mod_integrated中的每个元素\n for i in range(len(x_mod)):\n # 确认当前x_mod中的元素不在GPU上,\n # 因为在GPU上的Tensor进行list相加操作的时候是列表拼接,而在CPU上则是张量之间的加法\n assert not x_mod[i].is_cuda\n \n # 更新x_mod_integrated中的元素\n x_mod_integrated[i] += x_mod[i]\n \n # 返回更新后的x_mod_integrated\n return x_mod_integrated" }, { "identifier": "MalwareDetectionDNN", "path": "core/defense/md_dnn.py", "snippet": "class MalwareDetectionDNN(nn.Module):\n def __init__(self, input_size, n_classes, device='cpu', name='DNN', **kwargs):\n \"\"\"\n 初始化恶意软件检测器\n\n 参数:\n ----------\n @param input_size: 整数,输入向量的维度数量。\n @param n_classes: 整数,表示分类的数量,例如二分类问题中n=2。\n @param device: 字符串,可以是'cpu'或'cuda',表示模型应该在CPU还是GPU上运行。\n @param name: 字符串,用于命名模型。\n \"\"\"\n super(MalwareDetectionDNN, self).__init__() # 调用父类初始化\n self.input_size = input_size # 定义输入尺寸\n self.n_classes = n_classes # 定义分类数量\n self.device = device # 定义运行设备\n self.name = name # 定义模型名称\n\n self.parse_args(**kwargs) # 解析额外参数\n\n self.dense_layers = [] # 初始化一个空的密集层列表\n \n # 检查是否至少有一个隐藏层\n if len(self.dense_hidden_units) >= 1:\n # 添加第一个密集层\n self.dense_layers.append(nn.Linear(self.input_size, self.dense_hidden_units[0]))\n else:\n # 如果没有隐藏层,抛出异常\n raise ValueError(\"Expect at least one hidden layer.\")\n\n # 为每一对连续的隐藏单元添加一个密集层\n for i in range(len(self.dense_hidden_units[0:-1])):\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[i], \n self.dense_hidden_units[i + 1]))\n \n # 添加最后一个连接到输出层的密集层\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[-1], self.n_classes))\n \n # 将密集层添加到模型中以进行跟踪\n for idx_i, dense_layer in enumerate(self.dense_layers):\n self.add_module('nn_model_layer_{}'.format(idx_i), dense_layer)\n\n # 根据参数选择使用SELU或ReLU激活函数\n if self.smooth:\n self.activation_func = F.selu # 使用SELU激活函数\n else:\n self.activation_func = F.relu # 使用ReLU激活函数\n\n # 定义模型的保存路径\n self.model_save_path = path.join(config.get('experiments', 'md_dnn') + '_' + self.name,\n 'model.pth')\n \n # 日志中打印模型的结构信息\n logger.info('========================================dnn model architecture===============================')\n logger.info(self)\n logger.info('===============================================end==========================================')\n\n\n def parse_args(self,\n dense_hidden_units=None,\n dropout=0.6,\n alpha_=0.2,\n smooth=False,\n **kwargs\n ):\n \"\"\"\n 解析并设置网络的超参数。\n\n 参数:\n ----------\n dense_hidden_units : list, 可选\n 网络中每个隐藏层的单元数。如果没有指定,则默认为两个隐藏层,每层200个单元。\n dropout : float, 可选\n dropout正则化的比率,默认为0.6。\n alpha_ : float, 可选\n 某些激活函数的参数,默认为0.2。\n smooth : bool, 可选\n 是否使用平滑的激活函数,默认为False。\n **kwargs : dict\n 其他超参数。\n \"\"\"\n\n # 如果用户没有指定隐藏层,使用默认的配置\n if dense_hidden_units is None:\n self.dense_hidden_units = [200, 200]\n # 如果用户指定了一个列表,使用它\n elif isinstance(dense_hidden_units, list):\n self.dense_hidden_units = dense_hidden_units\n # 否则抛出一个异常\n else:\n raise TypeError(\"Expect a list of hidden units.\")\n\n # 设置dropout, alpha和smooth参数\n self.dropout = dropout\n self.alpha_ = alpha_\n self.smooth = smooth\n\n # 从kwargs中获取并设置proc_number\n self.proc_number = kwargs.get('proc_number', None) # 如果不存在,则返回None\n\n # 如果还有其他参数,记录警告,因为这些参数可能是未知的\n if len(kwargs) > 0:\n logger.warning(\"Unknown hyper-parameters {}\".format(str(kwargs)))\n\n\n def forward(self, x):\n \"\"\"\n 使输入数据 x 通过神经网络\n \n 参数\n ----------\n @param x: 2D张量,特征表示\n \"\"\"\n # 遍历神经网络的每一层,除了最后一层\n for dense_layer in self.dense_layers[:-1]:\n x = self.activation_func(dense_layer(x)) # 使用激活函数处理每一层的输出\n\n # 对处理过的数据进行 dropout 操作,用于防止过拟合\n latent_representation = F.dropout(x, self.dropout, training=self.training)\n \n # 用最后一层进行处理,得到logits(未归一化的预测或分类得分)\n logits = self.dense_layers[-1](latent_representation)\n return logits\n\n def inference(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n\n # 将所有批次的置信度垂直堆叠成一个张量\n confidences = torch.vstack(confidences)\n # 将所有批次的真实标签连接成一个张量\n gt_labels = torch.cat(gt_labels, dim=0)\n \n return confidences, gt_labels\n\n def inference_dae(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n \n return confidences, gt_labels\n\n\n def get_important_attributes(self, test_data_producer, target_label=1):\n \"\"\"\n 使用集成梯度(Integrated Gradients)方法获取重要的属性/特征\n\n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n @param target_label: 目标标签,默认为1\n \n 返回值\n ----------\n 返回重要的属性/特征\n \"\"\"\n attributions = [] # 存储属性或特征的重要性得分\n gt_labels = [] # 存储真实标签\n\n # 定义一个使用集成梯度方法的包装器\n def _ig_wrapper(_x):\n logits = self.forward(_x)\n return F.softmax(logits, dim=-1)\n\n # 初始化集成梯度对象\n ig = IntegratedGradients(_ig_wrapper)\n\n # 遍历测试数据集\n for i, (x, y) in enumerate(test_data_producer):\n # 将数据和标签转移到指定的设备上\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 使x能够计算梯度\n x.requires_grad = True\n # 定义基线,用于集成梯度的计算\n baseline = torch.zeros_like(x, dtype=torch.double, device=self.device)\n # 计算属性的重要性\n attribution_bs = ig.attribute(x,\n baselines=baseline,\n target=target_label)\n # 将所有批次的属性垂直堆叠\n attribution = torch.hstack(attribution_bs)\n # 保存得到的属性重要性得分和真实标签\n attributions.append(attribution.clone().detach().cpu().numpy())\n gt_labels.append(y.clone().detach().cpu().numpy())\n # 将真实标签保存为.npy文件\n np.save('./labels', np.concatenate(gt_labels))\n \n return np.vstack(attributions)\n\n\n def inference_batch_wise(self, x):\n \"\"\"\n 仅支持恶意软件样本的批量推理\n \n 参数\n ----------\n @param x: 输入数据的张量\n \n 返回值\n ----------\n 返回推理的置信度和标签\n \"\"\"\n # 确保输入是一个张量\n assert isinstance(x, torch.Tensor)\n \n # 获得模型的输出\n logit = self.forward(x)\n \n # 返回每个样本的置信度和一个与logit形状相同的全1数组(表示恶意软件样本)\n return torch.softmax(logit, dim=-1).detach().cpu().numpy(), np.ones((logit.size()[0],))\n\n\n def predict(self, test_data_producer, indicator_masking=True):\n \"\"\"\n 预测标签并进行评估\n\n 参数\n --------\n @param test_data_producer: torch.DataLoader, 用于生成测试数据的数据加载器\n \"\"\"\n # 进行评估\n confidence, y_true = self.inference(test_data_producer)\n y_pred = confidence.argmax(1).cpu().numpy() # 预测标签\n y_true = y_true.cpu().numpy() # 真实标签\n \n # print(\"y_true.shape:\", y_true.shape)\n # print(\"y_pred.shape:\", y_pred.shape)\n \n # 使用sklearn的评估指标进行评估\n from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score\n accuracy = accuracy_score(y_true, y_pred)\n b_accuracy = balanced_accuracy_score(y_true, y_pred)\n \n MSG = \"The accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(accuracy * 100))\n \n MSG = \"The balanced accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(b_accuracy * 100))\n\n # 检查数据中是否存在缺失的类别\n if np.any([np.all(y_true == i) for i in range(self.n_classes)]):\n logger.warning(\"class absent.\")\n return\n\n # 计算混淆矩阵\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n fpr = fp / float(tn + fp) # 计算假阳性率\n fnr = fn / float(tp + fn) # 计算假阴性率\n f1 = f1_score(y_true, y_pred, average='binary') # 计算F1分数\n\n print(\"Other evaluation metrics we may need:\")\n MSG = \"False Negative Rate (FNR) is {:.5f}%、False Positive Rate (FPR) is {:.5f}%, F1 score is {:.5f}%\"\n logger.info(MSG.format(fnr * 100, fpr * 100, f1 * 100))\n\n\n def customize_loss(self, logits, gt_labels, representation=None, mini_batch_idx=None):\n \"\"\"\n 自定义损失函数\n\n 参数\n --------\n @param logits: Tensor, 模型的输出\n @param gt_labels: Tensor, 真实的标签\n @param representation: Tensor, 可选参数,表示特征表示\n @param mini_batch_idx: Int, 可选参数,表示小批次的索引\n \n 返回值\n --------\n 返回交叉熵损失\n \"\"\"\n return F.cross_entropy(logits, gt_labels)\n\n\n def fit(self, train_data_producer, validation_data_producer, epochs=100, lr=0.005, weight_decay=0., weight_sampling=0.5, verbose=True):\n \"\"\"\n 训练恶意软件检测器,根据验证集上的交叉熵损失选择最佳模型。\n\n 参数\n ----------\n @param train_data_producer: 对象, 用于生成一批训练数据的迭代器\n @param validation_data_producer: 对象, 用于生成验证数据的迭代器\n @param epochs: 整数, 训练的周期数\n @param lr: 浮点数, Adam优化器的学习率\n @param weight_decay: 浮点数, 惩罚因子\n @param verbose: 布尔值, 是否显示详细的日志\n \"\"\"\n # 初始化优化器\n optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)\n best_avg_acc = 0. # 记录验证集上的最佳准确率\n best_epoch = 0 # 记录最佳准确率对应的周期\n total_time = 0. # 总的训练时间\n\n # 获取训练数据批次的数量\n nbatches = len(train_data_producer)\n \n # 进行指定次数的训练周期\n for i in range(epochs):\n # 设置模型为训练模式\n self.train()\n # 初始化列表用于保存每批数据的损失值和准确率\n losses, accuracies = [], []\n\n # 对每个训练数据批次进行遍历\n for idx_batch, (x_train, y_train) in enumerate(train_data_producer):\n # 将数据转移到指定的计算设备(例如GPU或CPU)\n x_train, y_train = utils.to_device(x_train.double(), y_train.long(), self.device)\n\n # 记录开始训练的时间\n start_time = time.time()\n\n # 清空之前累积的梯度\n optimizer.zero_grad() \n \n # 对输入数据进行前向传播\n logits = self.forward(x_train) \n \n # 根据模型的输出和真实标签计算损失\n loss_train = self.customize_loss(logits, y_train) \n\n # 对损失进行反向传播\n loss_train.backward()\n \n # 使用优化器更新模型参数\n optimizer.step()\n\n # 计算训练这批数据所花费的总时间\n total_time += time.time() - start_time\n \n # 计算这批数据上的准确率\n acc_train = (logits.argmax(1) == y_train).sum().item() / x_train.size()[0]\n \n # 将时间转换为分钟和秒\n mins, secs = int(total_time / 60), int(total_time % 60)\n \n # 将这批数据的损失和准确率加入到列表中\n losses.append(loss_train.item())\n accuracies.append(acc_train)\n\n # 如果开启了详细输出模式,显示当前训练进度和这批数据上的损失和准确率\n if verbose:\n logger.info(f'小批次: {i * nbatches + idx_batch + 1}/{epochs * nbatches} | 训练时间为 {mins:.0f} 分钟, {secs} 秒。')\n logger.info(f'训练损失(小批次级别): {losses[-1]:.4f} | 训练精度: {acc_train * 100:.2f}')\n\n\n self.eval() # 将模型设置为评估模式\n avg_acc_val = []\n\n with torch.no_grad(): # 确保在评估模式下不进行梯度的计算\n for x_val, y_val in validation_data_producer:\n # 将数据移动到指定设备(例如GPU或CPU)上,并确保数据的类型为双精度浮点数和长整型\n x_val, y_val = utils.to_device(x_val.double(), y_val.long(), self.device)\n \n # 使用模型进行前向传播,得到输出结果\n logits = self.forward(x_val)\n \n # 计算验证数据上的准确率\n acc_val = (logits.argmax(1) == y_val).sum().item() / x_val.size()[0]\n \n # 保存每一批验证数据的准确率\n avg_acc_val.append(acc_val)\n \n # 计算所有验证数据的平均准确率\n avg_acc_val = np.mean(avg_acc_val)\n\n # 如果当前周期的验证精度超过之前的最佳验证精度\n if avg_acc_val >= best_avg_acc:\n # 更新最佳验证精度\n best_avg_acc = avg_acc_val\n best_epoch = i\n \n # 检查模型保存路径是否存在,如果不存在,则创建\n if not path.exists(self.model_save_path):\n utils.mkdir(path.dirname(self.model_save_path))\n \n # 保存当前的模型参数\n torch.save(self.state_dict(), self.model_save_path)\n \n # 如果开启了详细输出模式,显示模型保存路径\n if verbose:\n print(f'模型保存在路径: {self.model_save_path}')\n\n # 如果开启了详细输出模式,显示训练损失、训练精度、验证精度和最佳验证精度\n if verbose:\n logger.info(f'训练损失(周期级别): {np.mean(losses):.4f} | 训练精度: {np.mean(accuracies) * 100:.2f}')\n logger.info(f'验证精度: {avg_acc_val * 100:.2f} | 最佳验证精度: {best_avg_acc * 100:.2f} 在第 {best_epoch} 个周期')\n\n def load(self):\n \"\"\"\n 从磁盘加载模型参数\n \"\"\"\n self.load_state_dict(torch.load(self.model_save_path))" }, { "identifier": "save_args", "path": "tools/utils.py", "snippet": "def save_args(fout, args):\n if isinstance(args, str):\n dump_txt(args, fout, mode='w')\n elif isinstance(args, dict):\n args_str = build_kwargs(args.keys(), args)\n dump_txt(args_str, fout, mode='w')\n else:\n raise TypeError(\"Expected str or dict.\")" }, { "identifier": "get_group_args", "path": "tools/utils.py", "snippet": "def get_group_args(args, args_parser, title):\n \"\"\"\n 从给定的 argparse.ArgumentParser 对象中获取指定组的参数值,并以字典形式返回。\n\n Args:\n - args (argparse.Namespace): 已解析的命令行参数对象。\n - args_parser (argparse.ArgumentParser): 命令行参数解析器对象。\n - title (str): 目标参数组的标题。\n\n Returns:\n - dict: 包含目标参数组中参数名及其对应的值的字典。\n \"\"\"\n import argparse\n\n # 确保传入的参数 args 和 args_parser 是 argparse.Namespace 和 argparse.ArgumentParser 类型\n assert isinstance(args, argparse.Namespace) and isinstance(args_parser, argparse.ArgumentParser)\n\n # 遍历 args_parser 中的所有参数组\n for group in args_parser._action_groups:\n # 如果找到了指定标题的参数组,则返回该组中指定参数名及其对应的值\n if group.title == title:\n return {action.dest: getattr(args, action.dest, None) for action in group._group_actions}\n else:\n # 否则继续查找下一个参数组\n continue\n\n # 如果未找到目标参数组,则返回空字典\n return {}" }, { "identifier": "to_tensor", "path": "tools/utils.py", "snippet": "def to_tensor(feature_x=None, labels=None, device='cpu'):\n \"\"\"Convert features, labels from array or sparse matrix to\n torch Tensor.\n code is adapted from: https://github.com/deeprobust/DeepRobust/graph/utils.py\n Parameters\n ----------\n adj : scipy.sparse.csr_matrix\n the adjacency matrix.\n features : scipy.sparse.csr_matrix\n node features\n labels : numpy.array\n node labels\n device : str\n 'cpu' or 'cuda'\n \"\"\"\n\n def _to_torch_tensor(mat):\n if sp.issparse(mat):\n mat = sparse_mx_to_torch_sparse_tensor(mat)\n elif isinstance(mat, torch.Tensor):\n pass\n else:\n mat = torch.DoubleTensor(mat)\n return mat\n\n feature_x = _to_torch_tensor(feature_x).to(device)\n if labels is None:\n return feature_x\n else:\n labels = torch.LongTensor(labels).to(device)\n return feature_x, labels" }, { "identifier": "dump_pickle", "path": "tools/utils.py", "snippet": "def dump_pickle(data, path, use_gzip=False):\n print(\"tr_te_va_split path:\", path)\n if not os.path.exists(os.path.dirname(path)):\n mkdir(os.path.dirname(path))\n if not use_gzip:\n with open(path, 'wb') as wr:\n pkl.dump(data, wr)\n else:\n with gzip.open(path, 'wb') as wr:\n pkl.dump(data, wr)\n return True" }, { "identifier": "read_pickle", "path": "tools/utils.py", "snippet": "def read_pickle(path, use_gzip=False):\n if os.path.isfile(path):\n if not use_gzip:\n with open(path, 'rb') as fr:\n return pkl.load(fr)\n else:\n with gzip.open(path, 'rb') as fr:\n return pkl.load(fr)\n else:\n raise IOError(\"The {0} is not been found.\".format(path))" } ]
import os.path as path import argparse import time import numpy from core.defense import Dataset from core.defense import MalwareDetectionDNN from tools.utils import save_args, get_group_args, to_tensor, dump_pickle, read_pickle
11,763
# 使用未来版本特性,确保代码在Python2和Python3中有一致的行为 from __future__ import absolute_import from __future__ import division from __future__ import print_function # 导入所需的库 # 导入自定义模块 # 初始化argparse对象,用于解析命令行参数 cmd_md = argparse.ArgumentParser(description='arguments for learning malware detector') # 定义与特征提取相关的命令行参数 feature_argparse = cmd_md.add_argument_group(title='feature') feature_argparse.add_argument('--proc_number', type=int, default=2, help='The number of threads for features extraction.') # 特征提取的线程数量 feature_argparse.add_argument('--number_of_smali_files', type=int, default=1000000, help='The maximum number of smali files to represent each app') # 表示每个应用的smali文件的最大数量 feature_argparse.add_argument('--max_vocab_size', type=int, default=10000, help='The maximum number of vocabulary size') # 词汇的最大数量 feature_argparse.add_argument('--update', action='store_true', help='Whether update the existed features.') # 是否更新已存在的特征 # 定义与检测器相关的命令行参数 detector_argparse = cmd_md.add_argument_group(title='detector') detector_argparse.add_argument('--cuda', action='store_true', default=False, help='whether use cuda enable gpu or cpu.') # 是否使用CUDA启用GPU detector_argparse.add_argument('--seed', type=int, default=0, help='random seed.') # 随机种子 detector_argparse.add_argument('--dense_hidden_units', type=lambda s: [int(u) for u in s.split(',')], default='200,200', help='delimited list input, e.g., "200,200"') # 密集隐藏单元列表 detector_argparse.add_argument('--dropout', type=float, default=0.6, help='dropout rate') # dropout率 detector_argparse.add_argument('--alpha_', type=float, default=0.2, help='slope coefficient of leaky-relu or elu') # leaky-relu或elu的斜率系数 detector_argparse.add_argument('--smooth', action='store_true', default=False, help='use smooth activation elu (rather than leaky-relu) in the GAT layer.') # 在GAT层使用平滑激活函数elu detector_argparse.add_argument('--batch_size', type=int, default=128, help='mini-batch size') # mini-batch大小 detector_argparse.add_argument('--epochs', type=int, default=50, help='number of epochs to train.') # 训练的epoch数 detector_argparse.add_argument('--lr', type=float, default=0.001, help='initial learning rate.') # 初始学习率 detector_argparse.add_argument('--weight_decay', type=float, default=0e-4, help='coefficient of weight decay') # 权重衰减系数 # 定义与数据集相关的命令行参数 dataset_argparse = cmd_md.add_argument_group(title='data_producer') detector_argparse.add_argument('--cache', action='store_true', default=False, help='use cache data or not.') # 是否使用缓存数据 # 定义与模式相关的命令行参数 mode_argparse = cmd_md.add_argument_group(title='mode') mode_argparse.add_argument('--mode', type=str, default='train', choices=['train', 'test'], required=False, help='learn a model or test it.') # 学习模型或测试模型的模式 mode_argparse.add_argument('--model_name', type=str, default='xxxxxxxx-xxxxxx', required=False, help='suffix date of a tested model name.') # 测试模型名称的后缀日期 # 定义主函数 def _main(): args = cmd_md.parse_args() # 根据参数创建数据集 dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature')) # 获取训练数据集输入生成器 train_dataset_producer = dataset.get_input_producer(*dataset.train_dataset, batch_size=args.batch_size, name='train', use_cache=args.cache) # 获取验证数据集输入生成器 val_dataset_producer = dataset.get_input_producer(*dataset.validation_dataset, batch_size=args.batch_size, name='val') # 获取测试数据集输入生成器 test_dataset_producer = dataset.get_input_producer(*dataset.test_dataset, batch_size=args.batch_size, name='test') # 确保数据集的类别数为2 assert dataset.n_classes == 2 # 设置设备为CPU或CUDA if not args.cuda: dv = 'cpu' else: dv = 'cuda' # 设置模型名称 model_name = args.model_name if args.mode == 'test' else time.strftime("%Y%m%d-%H%M%S") # 创建模型实例 model = MalwareDetectionDNN(dataset.vocab_size, dataset.n_classes, device=dv, name=model_name, **vars(args) ) # 将模型移至指定设备并转换为双精度浮点数 model = model.to(dv).double() # 如果模式为训练,则进行模型拟合 if args.mode == 'train': model.fit(train_dataset_producer, val_dataset_producer, epochs=args.epochs, lr=args.lr, weight_decay=args.weight_decay ) # 将参数以人类可读的方式保存
# 使用未来版本特性,确保代码在Python2和Python3中有一致的行为 from __future__ import absolute_import from __future__ import division from __future__ import print_function # 导入所需的库 # 导入自定义模块 # 初始化argparse对象,用于解析命令行参数 cmd_md = argparse.ArgumentParser(description='arguments for learning malware detector') # 定义与特征提取相关的命令行参数 feature_argparse = cmd_md.add_argument_group(title='feature') feature_argparse.add_argument('--proc_number', type=int, default=2, help='The number of threads for features extraction.') # 特征提取的线程数量 feature_argparse.add_argument('--number_of_smali_files', type=int, default=1000000, help='The maximum number of smali files to represent each app') # 表示每个应用的smali文件的最大数量 feature_argparse.add_argument('--max_vocab_size', type=int, default=10000, help='The maximum number of vocabulary size') # 词汇的最大数量 feature_argparse.add_argument('--update', action='store_true', help='Whether update the existed features.') # 是否更新已存在的特征 # 定义与检测器相关的命令行参数 detector_argparse = cmd_md.add_argument_group(title='detector') detector_argparse.add_argument('--cuda', action='store_true', default=False, help='whether use cuda enable gpu or cpu.') # 是否使用CUDA启用GPU detector_argparse.add_argument('--seed', type=int, default=0, help='random seed.') # 随机种子 detector_argparse.add_argument('--dense_hidden_units', type=lambda s: [int(u) for u in s.split(',')], default='200,200', help='delimited list input, e.g., "200,200"') # 密集隐藏单元列表 detector_argparse.add_argument('--dropout', type=float, default=0.6, help='dropout rate') # dropout率 detector_argparse.add_argument('--alpha_', type=float, default=0.2, help='slope coefficient of leaky-relu or elu') # leaky-relu或elu的斜率系数 detector_argparse.add_argument('--smooth', action='store_true', default=False, help='use smooth activation elu (rather than leaky-relu) in the GAT layer.') # 在GAT层使用平滑激活函数elu detector_argparse.add_argument('--batch_size', type=int, default=128, help='mini-batch size') # mini-batch大小 detector_argparse.add_argument('--epochs', type=int, default=50, help='number of epochs to train.') # 训练的epoch数 detector_argparse.add_argument('--lr', type=float, default=0.001, help='initial learning rate.') # 初始学习率 detector_argparse.add_argument('--weight_decay', type=float, default=0e-4, help='coefficient of weight decay') # 权重衰减系数 # 定义与数据集相关的命令行参数 dataset_argparse = cmd_md.add_argument_group(title='data_producer') detector_argparse.add_argument('--cache', action='store_true', default=False, help='use cache data or not.') # 是否使用缓存数据 # 定义与模式相关的命令行参数 mode_argparse = cmd_md.add_argument_group(title='mode') mode_argparse.add_argument('--mode', type=str, default='train', choices=['train', 'test'], required=False, help='learn a model or test it.') # 学习模型或测试模型的模式 mode_argparse.add_argument('--model_name', type=str, default='xxxxxxxx-xxxxxx', required=False, help='suffix date of a tested model name.') # 测试模型名称的后缀日期 # 定义主函数 def _main(): args = cmd_md.parse_args() # 根据参数创建数据集 dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature')) # 获取训练数据集输入生成器 train_dataset_producer = dataset.get_input_producer(*dataset.train_dataset, batch_size=args.batch_size, name='train', use_cache=args.cache) # 获取验证数据集输入生成器 val_dataset_producer = dataset.get_input_producer(*dataset.validation_dataset, batch_size=args.batch_size, name='val') # 获取测试数据集输入生成器 test_dataset_producer = dataset.get_input_producer(*dataset.test_dataset, batch_size=args.batch_size, name='test') # 确保数据集的类别数为2 assert dataset.n_classes == 2 # 设置设备为CPU或CUDA if not args.cuda: dv = 'cpu' else: dv = 'cuda' # 设置模型名称 model_name = args.model_name if args.mode == 'test' else time.strftime("%Y%m%d-%H%M%S") # 创建模型实例 model = MalwareDetectionDNN(dataset.vocab_size, dataset.n_classes, device=dv, name=model_name, **vars(args) ) # 将模型移至指定设备并转换为双精度浮点数 model = model.to(dv).double() # 如果模式为训练,则进行模型拟合 if args.mode == 'train': model.fit(train_dataset_producer, val_dataset_producer, epochs=args.epochs, lr=args.lr, weight_decay=args.weight_decay ) # 将参数以人类可读的方式保存
save_args(path.join(path.dirname(model.model_save_path), "hparam"), vars(args))
2
2023-11-27 02:00:23+00:00
16k
Vali-98/XTTS-RVC-UI
rvc.py
[ { "identifier": "SynthesizerTrnMs256NSFsid", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs256NSFsid_nono", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid_nono", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "VC", "path": "vc_infer_pipeline.py", "snippet": "class VC(object):\n def __init__(self, tgt_sr, config):\n self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (\n config.x_pad,\n config.x_query,\n config.x_center,\n config.x_max,\n config.is_half,\n )\n self.sr = 16000 # hubert输入采样率\n self.window = 160 # 每帧点数\n self.t_pad = self.sr * self.x_pad # 每条前后pad时间\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # 查询切点前后查询时间\n self.t_center = self.sr * self.x_center # 查询切点位置\n self.t_max = self.sr * self.x_max # 免查询时长阈值\n self.device = config.device\n\n # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device)\n def get_optimal_torch_device(self, index: int = 0) -> torch.device:\n # Get cuda device\n if torch.cuda.is_available():\n return torch.device(\n f\"cuda:{index % torch.cuda.device_count()}\"\n ) # Very fast\n elif torch.backends.mps.is_available():\n return torch.device(\"mps\")\n # Insert an else here to grab \"xla\" devices if available. TO DO later. Requires the torch_xla.core.xla_model library\n # Else wise return the \"cpu\" as a torch device,\n return torch.device(\"cpu\")\n\n # Fork Feature: Compute f0 with the crepe method\n def get_f0_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n p_len,\n hop_length=160, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.\n model=\"full\", # Either use crepe-tiny \"tiny\" or crepe \"full\". Default is full\n ):\n x = x.astype(\n np.float32\n ) # fixes the F.conv2D exception. We needed to convert double to float.\n x /= np.quantile(np.abs(x), 0.999)\n torch_device = self.get_optimal_torch_device()\n audio = torch.from_numpy(x).to(torch_device, copy=True)\n audio = torch.unsqueeze(audio, dim=0)\n if audio.ndim == 2 and audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True).detach()\n audio = audio.detach()\n print(\"Initiating prediction with a crepe_hop_length of: \" + str(hop_length))\n pitch: Tensor = torchcrepe.predict(\n audio,\n self.sr,\n hop_length,\n f0_min,\n f0_max,\n model,\n batch_size=hop_length * 2,\n device=torch_device,\n pad=True,\n )\n p_len = p_len or x.shape[0] // hop_length\n # Resize the pitch for final f0\n source = np.array(pitch.squeeze(0).cpu().float().numpy())\n source[source < 0.001] = np.nan\n target = np.interp(\n np.arange(0, len(source) * p_len, len(source)) / p_len,\n np.arange(0, len(source)),\n source,\n )\n f0 = np.nan_to_num(target)\n return f0 # Resized f0\n\n def get_f0_official_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n model=\"full\",\n ):\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n return f0\n\n # Fork Feature: Compute pYIN f0 method\n def get_f0_pyin_computation(self, x, f0_min, f0_max):\n y, sr = librosa.load(\"saudio/Sidney.wav\", self.sr, mono=True)\n f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max)\n f0 = f0[1:] # Get rid of extra first frame\n return f0\n\n # Fork Feature: Acquire median hybrid f0 estimation calculation\n def get_f0_hybrid_computation(\n self,\n methods_str,\n input_audio_path,\n x,\n f0_min,\n f0_max,\n p_len,\n filter_radius,\n crepe_hop_length,\n time_step,\n ):\n # Get various f0 methods from input to use in the computation stack\n s = methods_str\n s = s.split(\"hybrid\")[1]\n s = s.replace(\"[\", \"\").replace(\"]\", \"\")\n methods = s.split(\"+\")\n f0_computation_stack = []\n\n print(\"Calculating f0 pitch estimations for methods: %s\" % str(methods))\n x = x.astype(np.float32)\n x /= np.quantile(np.abs(x), 0.999)\n # Get f0 calculations for all methods specified\n for method in methods:\n f0 = None\n if method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif method == \"crepe\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max)\n f0 = f0[1:] # Get rid of extra first frame\n elif method == \"crepe-tiny\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, \"tiny\")\n f0 = f0[1:] # Get rid of extra first frame\n elif method == \"mangio-crepe\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length\n )\n elif method == \"mangio-crepe-tiny\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length, \"tiny\"\n )\n elif method == \"harvest\":\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n f0 = f0[1:] # Get rid of first frame.\n elif method == \"dio\": # Potentially buggy?\n f0, t = pyworld.dio(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n f0 = f0[1:]\n # elif method == \"pyin\": Not Working just yet\n # f0 = self.get_f0_pyin_computation(x, f0_min, f0_max)\n # Push method to the stack\n f0_computation_stack.append(f0)\n\n for fc in f0_computation_stack:\n print(len(fc))\n\n print(\"Calculating hybrid median f0 from the stack of: %s\" % str(methods))\n f0_median_hybrid = None\n if len(f0_computation_stack) == 1:\n f0_median_hybrid = f0_computation_stack[0]\n else:\n f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0)\n return f0_median_hybrid\n\n def get_f0(\n self,\n input_audio_path,\n x,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n crepe_hop_length,\n inp_f0=None,\n ):\n global input_audio_path2wav\n time_step = self.window / self.sr * 1000\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n if f0_method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif f0_method == \"harvest\":\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"dio\": # Potentially Buggy?\n f0, t = pyworld.dio(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"crepe\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max)\n elif f0_method == \"crepe-tiny\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, \"tiny\")\n elif f0_method == \"mangio-crepe\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length\n )\n elif f0_method == \"mangio-crepe-tiny\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length, \"tiny\"\n )\n elif f0_method == \"rmvpe\":\n if hasattr(self, \"model_rmvpe\") == False:\n from rmvpe import RMVPE\n\n self.model_rmvpe = RMVPE(\n './models/rmvpe.pt', is_half=self.is_half, device=self.device\n )\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n\n elif \"hybrid\" in f0_method:\n # Perform hybrid median pitch estimation\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = self.get_f0_hybrid_computation(\n f0_method,\n input_audio_path,\n x,\n f0_min,\n f0_max,\n p_len,\n filter_radius,\n crepe_hop_length,\n time_step,\n )\n\n f0 *= pow(2, f0_up_key / 12)\n # with open(\"test.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n tf0 = self.sr // self.window # 每秒f0点数\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n # with open(\"test_opt.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(np.int_)\n\n return f0_coarse, f0bak # 1-0\n\n def vc(\n self,\n model,\n net_g,\n sid,\n audio0,\n pitch,\n pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n ): # ,file_index,file_big_npy\n feats = torch.from_numpy(audio0)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)\n\n inputs = {\n \"source\": feats.to(self.device),\n \"padding_mask\": padding_mask,\n \"output_layer\": 9 if version == \"v1\" else 12,\n }\n t0 = ttime()\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n feats = model.final_proj(logits[0]) if version == \"v1\" else logits[0]\n if protect < 0.5 and pitch != None and pitchf != None:\n feats0 = feats.clone()\n if (\n isinstance(index, type(None)) == False\n and isinstance(big_npy, type(None)) == False\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n # _, I = index.search(npy, 1)\n # npy = big_npy[I.squeeze()]\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)\n if protect < 0.5 and pitch != None and pitchf != None:\n feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(\n 0, 2, 1\n )\n t1 = ttime()\n p_len = audio0.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch != None and pitchf != None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n\n if protect < 0.5 and pitch != None and pitchf != None:\n pitchff = pitchf.clone()\n pitchff[pitchf > 0] = 1\n pitchff[pitchf < 1] = protect\n pitchff = pitchff.unsqueeze(-1)\n feats = feats * pitchff + feats0 * (1 - pitchff)\n feats = feats.to(feats0.dtype)\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n if pitch != None and pitchf != None:\n audio1 = (\n (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])\n .data.cpu()\n .float()\n .numpy()\n )\n else:\n audio1 = (\n (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()\n )\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = ttime()\n times[0] += t1 - t0\n times[2] += t2 - t1\n return audio1\n\n def pipeline(\n self,\n model,\n net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n # file_big_npy,\n index_rate,\n if_f0,\n filter_radius,\n tgt_sr,\n resample_sr,\n rms_mix_rate,\n version,\n protect,\n crepe_hop_length,\n f0_file=None,\n ):\n if (\n file_index != \"\"\n # and file_big_npy != \"\"\n # and os.path.exists(file_big_npy) == True\n and os.path.exists(file_index) == True\n and index_rate != 0\n ):\n try:\n index = faiss.read_index(file_index)\n # big_npy = np.load(file_big_npy)\n big_npy = index.reconstruct_n(0, index.ntotal)\n except:\n traceback.print_exc()\n index = big_npy = None\n else:\n index = big_npy = None\n audio = signal.filtfilt(bh, ah, audio)\n audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += audio_pad[i : i - self.window]\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n np.abs(audio_sum[t - self.t_query : t + self.t_query])\n == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()\n )[0][0]\n )\n s = 0\n audio_opt = []\n t = None\n t1 = ttime()\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\") == True:\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n input_audio_path,\n audio_pad,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n crepe_hop_length,\n inp_f0,\n )\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if self.device == \"mps\":\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()\n t2 = ttime()\n times[1] += t2 - t1\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n pitch[:, s // self.window : (t + self.t_pad2) // self.window],\n pitchf[:, s // self.window : (t + self.t_pad2) // self.window],\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window :] if t is not None else pitch,\n pitchf[:, t // self.window :] if t is not None else pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n if rms_mix_rate != 1:\n audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)\n if resample_sr >= 16000 and tgt_sr != resample_sr:\n audio_opt = librosa.resample(\n audio_opt, orig_sr=tgt_sr, target_sr=resample_sr\n )\n audio_max = np.abs(audio_opt).max() / 0.99\n max_int16 = 32768\n if audio_max > 1:\n max_int16 /= audio_max\n audio_opt = (audio_opt * max_int16).astype(np.int16)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt" } ]
from multiprocessing import cpu_count from pathlib import Path from fairseq import checkpoint_utils from scipy.io import wavfile from infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from vc_infer_pipeline import VC import torch import librosa import numpy as np
11,095
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) elif version == "v2": if if_f0 == 1:
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) elif version == "v2": if if_f0 == 1:
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half)
2
2023-11-30 08:47:28+00:00
16k
ubc-vision/nf-soft-mining
examples/utils.py
[ { "identifier": "OccGridEstimator", "path": "nerfacc/estimators/occ_grid.py", "snippet": "class OccGridEstimator(AbstractEstimator):\n \"\"\"Occupancy grid transmittance estimator for spatial skipping.\n\n References: \"Instant Neural Graphics Primitives.\"\n\n Args:\n roi_aabb: The axis-aligned bounding box of the region of interest. Useful for mapping\n the 3D space to the grid.\n resolution: The resolution of the grid. If an integer is given, the grid is assumed to\n be a cube. Otherwise, a list or a tensor of shape (3,) is expected. Default: 128.\n levels: The number of levels of the grid. Default: 1.\n \"\"\"\n\n DIM: int = 3\n\n def __init__(\n self,\n roi_aabb: Union[List[int], Tensor],\n resolution: Union[int, List[int], Tensor] = 128,\n levels: int = 1,\n **kwargs,\n ) -> None:\n super().__init__()\n\n if \"contraction_type\" in kwargs:\n raise ValueError(\n \"`contraction_type` is not supported anymore for nerfacc >= 0.4.0.\"\n )\n\n # check the resolution is legal\n if isinstance(resolution, int):\n resolution = [resolution] * self.DIM\n if isinstance(resolution, (list, tuple)):\n resolution = torch.tensor(resolution, dtype=torch.int32)\n assert isinstance(resolution, Tensor), f\"Invalid type: {resolution}!\"\n assert resolution.shape[0] == self.DIM, f\"Invalid shape: {resolution}!\"\n\n # check the roi_aabb is legal\n if isinstance(roi_aabb, (list, tuple)):\n roi_aabb = torch.tensor(roi_aabb, dtype=torch.float32)\n assert isinstance(roi_aabb, Tensor), f\"Invalid type: {roi_aabb}!\"\n assert roi_aabb.shape[0] == self.DIM * 2, f\"Invalid shape: {roi_aabb}!\"\n\n # multiple levels of aabbs\n aabbs = torch.stack(\n [_enlarge_aabb(roi_aabb, 2**i) for i in range(levels)], dim=0\n )\n\n # total number of voxels\n self.cells_per_lvl = int(resolution.prod().item())\n self.levels = levels\n\n # Buffers\n self.register_buffer(\"resolution\", resolution) # [3]\n self.register_buffer(\"aabbs\", aabbs) # [n_aabbs, 6]\n self.register_buffer(\n \"occs\", torch.zeros(self.levels * self.cells_per_lvl)\n )\n self.register_buffer(\n \"binaries\",\n torch.zeros([levels] + resolution.tolist(), dtype=torch.bool),\n )\n\n # Grid coords & indices\n grid_coords = _meshgrid3d(resolution).reshape(\n self.cells_per_lvl, self.DIM\n )\n self.register_buffer(\"grid_coords\", grid_coords, persistent=False)\n grid_indices = torch.arange(self.cells_per_lvl)\n self.register_buffer(\"grid_indices\", grid_indices, persistent=False)\n\n @torch.no_grad()\n def sampling(\n self,\n # rays\n rays_o: Tensor, # [n_rays, 3]\n rays_d: Tensor, # [n_rays, 3]\n # sigma/alpha function for skipping invisible space\n sigma_fn: Optional[Callable] = None,\n alpha_fn: Optional[Callable] = None,\n near_plane: float = 0.0,\n far_plane: float = 1e10,\n t_min: Optional[Tensor] = None, # [n_rays]\n t_max: Optional[Tensor] = None, # [n_rays]\n # rendering options\n render_step_size: float = 1e-3,\n early_stop_eps: float = 1e-4,\n alpha_thre: float = 0.0,\n stratified: bool = False,\n cone_angle: float = 0.0,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Sampling with spatial skipping.\n\n Note:\n This function is not differentiable to any inputs.\n\n Args:\n rays_o: Ray origins of shape (n_rays, 3).\n rays_d: Normalized ray directions of shape (n_rays, 3).\n sigma_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `sigma_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation density values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n alpha_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `alpha_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation opacity values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n near_plane: Optional. Near plane distance. Default: 0.0.\n far_plane: Optional. Far plane distance. Default: 1e10.\n t_min: Optional. Per-ray minimum distance. Tensor with shape (n_rays).\n If profided, the marching will start from maximum of t_min and near_plane.\n t_max: Optional. Per-ray maximum distance. Tensor with shape (n_rays).\n If profided, the marching will stop by minimum of t_max and far_plane.\n render_step_size: Step size for marching. Default: 1e-3.\n early_stop_eps: Early stop threshold for skipping invisible space. Default: 1e-4.\n alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.\n stratified: Whether to use stratified sampling. Default: False.\n cone_angle: Cone angle for linearly-increased step size. 0. means\n constant step size. Default: 0.0.\n\n Returns:\n A tuple of {LongTensor, Tensor, Tensor}:\n\n - **ray_indices**: Ray index of each sample. IntTensor with shape (n_samples).\n - **t_starts**: Per-sample start distance. Tensor with shape (n_samples,).\n - **t_ends**: Per-sample end distance. Tensor with shape (n_samples,).\n\n Examples:\n\n .. code-block:: python\n\n >>> ray_indices, t_starts, t_ends = grid.sampling(\n >>> rays_o, rays_d, render_step_size=1e-3)\n >>> t_mid = (t_starts + t_ends) / 2.0\n >>> sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]\n\n \"\"\"\n\n near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)\n far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)\n\n if t_min is not None:\n near_planes = torch.clamp(near_planes, min=t_min)\n if t_max is not None:\n far_planes = torch.clamp(far_planes, max=t_max)\n\n if stratified:\n near_planes += torch.rand_like(near_planes) * render_step_size\n intervals, samples, _ = traverse_grids(\n rays_o,\n rays_d,\n self.binaries,\n self.aabbs,\n near_planes=near_planes,\n far_planes=far_planes,\n step_size=render_step_size,\n cone_angle=cone_angle,\n )\n t_starts = intervals.vals[intervals.is_left]\n t_ends = intervals.vals[intervals.is_right]\n ray_indices = samples.ray_indices\n packed_info = samples.packed_info\n\n # skip invisible space\n if (alpha_thre > 0.0 or early_stop_eps > 0.0) and (\n sigma_fn is not None or alpha_fn is not None\n ):\n alpha_thre = min(alpha_thre, self.occs.mean().item())\n\n # Compute visibility of the samples, and filter out invisible samples\n if sigma_fn is not None:\n if t_starts.shape[0] != 0:\n sigmas = sigma_fn(t_starts, t_ends, ray_indices)\n else:\n sigmas = torch.empty((0,), device=t_starts.device)\n assert (\n sigmas.shape == t_starts.shape\n ), \"sigmas must have shape of (N,)! Got {}\".format(sigmas.shape)\n masks = render_visibility_from_density(\n t_starts=t_starts,\n t_ends=t_ends,\n sigmas=sigmas,\n packed_info=packed_info,\n early_stop_eps=early_stop_eps,\n alpha_thre=alpha_thre,\n )\n elif alpha_fn is not None:\n if t_starts.shape[0] != 0:\n alphas = alpha_fn(t_starts, t_ends, ray_indices)\n else:\n alphas = torch.empty((0,), device=t_starts.device)\n assert (\n alphas.shape == t_starts.shape\n ), \"alphas must have shape of (N,)! Got {}\".format(alphas.shape)\n masks = render_visibility_from_alpha(\n alphas=alphas,\n packed_info=packed_info,\n early_stop_eps=early_stop_eps,\n alpha_thre=alpha_thre,\n )\n ray_indices, t_starts, t_ends = (\n ray_indices[masks],\n t_starts[masks],\n t_ends[masks],\n )\n return ray_indices, t_starts, t_ends\n\n @torch.no_grad()\n def update_every_n_steps(\n self,\n step: int,\n occ_eval_fn: Callable,\n occ_thre: float = 1e-2,\n ema_decay: float = 0.95,\n warmup_steps: int = 256,\n n: int = 16,\n ) -> None:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n step: Current training step.\n occ_eval_fn: A function that takes in sample locations :math:`(N, 3)` and\n returns the occupancy values :math:`(N, 1)` at those locations.\n occ_thre: Threshold used to binarize the occupancy grid. Default: 1e-2.\n ema_decay: The decay rate for EMA updates. Default: 0.95.\n warmup_steps: Sample all cells during the warmup stage. After the warmup\n stage we change the sampling strategy to 1/4 uniformly sampled cells\n together with 1/4 occupied cells. Default: 256.\n n: Update the grid every n steps. Default: 16.\n \"\"\"\n if not self.training:\n raise RuntimeError(\n \"You should only call this function only during training. \"\n \"Please call _update() directly if you want to update the \"\n \"field during inference.\"\n )\n if step % n == 0 and self.training:\n self._update(\n step=step,\n occ_eval_fn=occ_eval_fn,\n occ_thre=occ_thre,\n ema_decay=ema_decay,\n warmup_steps=warmup_steps,\n )\n\n # adapted from https://github.com/kwea123/ngp_pl/blob/master/models/networks.py\n @torch.no_grad()\n def mark_invisible_cells(\n self,\n K: Tensor,\n c2w: Tensor,\n width: int,\n height: int,\n near_plane: float = 0.0,\n chunk: int = 32**3,\n ) -> None:\n \"\"\"Mark the cells that aren't covered by the cameras with density -1.\n Should only be executed once before training starts.\n\n Args:\n K: Camera intrinsics of shape (N, 3, 3) or (1, 3, 3).\n c2w: Camera to world poses of shape (N, 3, 4) or (N, 4, 4).\n width: Image width in pixels\n height: Image height in pixels\n near_plane: Near plane distance\n chunk: The chunk size to split the cells (to avoid OOM)\n \"\"\"\n assert K.dim() == 3 and K.shape[1:] == (3, 3)\n assert c2w.dim() == 3 and (\n c2w.shape[1:] == (3, 4) or c2w.shape[1:] == (4, 4)\n )\n assert K.shape[0] == c2w.shape[0] or K.shape[0] == 1\n\n N_cams = c2w.shape[0]\n w2c_R = c2w[:, :3, :3].transpose(2, 1) # (N_cams, 3, 3)\n w2c_T = -w2c_R @ c2w[:, :3, 3:] # (N_cams, 3, 1)\n\n lvl_indices = self._get_all_cells()\n for lvl, indices in enumerate(lvl_indices):\n grid_coords = self.grid_coords[indices]\n\n for i in range(0, len(indices), chunk):\n x = grid_coords[i : i + chunk] / (self.resolution - 1)\n indices_chunk = indices[i : i + chunk]\n # voxel coordinates [0, 1]^3 -> world\n xyzs_w = (\n self.aabbs[lvl, :3]\n + x * (self.aabbs[lvl, 3:] - self.aabbs[lvl, :3])\n ).T\n xyzs_c = w2c_R @ xyzs_w + w2c_T # (N_cams, 3, chunk)\n uvd = K @ xyzs_c # (N_cams, 3, chunk)\n uv = uvd[:, :2] / uvd[:, 2:] # (N_cams, 2, chunk)\n in_image = (\n (uvd[:, 2] >= 0)\n & (uv[:, 0] >= 0)\n & (uv[:, 0] < width)\n & (uv[:, 1] >= 0)\n & (uv[:, 1] < height)\n )\n covered_by_cam = (\n uvd[:, 2] >= near_plane\n ) & in_image # (N_cams, chunk)\n # if the cell is visible by at least one camera\n count = covered_by_cam.sum(0) / N_cams\n\n too_near_to_cam = (\n uvd[:, 2] < near_plane\n ) & in_image # (N, chunk)\n # if the cell is too close (in front) to any camera\n too_near_to_any_cam = too_near_to_cam.any(0)\n # a valid cell should be visible by at least one camera and not too close to any camera\n valid_mask = (count > 0) & (~too_near_to_any_cam)\n\n cell_ids_base = lvl * self.cells_per_lvl\n self.occs[cell_ids_base + indices_chunk] = torch.where(\n valid_mask, 0.0, -1.0\n )\n\n @torch.no_grad()\n def _get_all_cells(self) -> List[Tensor]:\n \"\"\"Returns all cells of the grid.\"\"\"\n lvl_indices = []\n for lvl in range(self.levels):\n # filter out the cells with -1 density (non-visible to any camera)\n cell_ids = lvl * self.cells_per_lvl + self.grid_indices\n indices = self.grid_indices[self.occs[cell_ids] >= 0.0]\n lvl_indices.append(indices)\n return lvl_indices\n\n @torch.no_grad()\n def _sample_uniform_and_occupied_cells(self, n: int) -> List[Tensor]:\n \"\"\"Samples both n uniform and occupied cells.\"\"\"\n lvl_indices = []\n for lvl in range(self.levels):\n uniform_indices = torch.randint(\n self.cells_per_lvl, (n,), device=self.device\n )\n # filter out the cells with -1 density (non-visible to any camera)\n cell_ids = lvl * self.cells_per_lvl + uniform_indices\n uniform_indices = uniform_indices[self.occs[cell_ids] >= 0.0]\n occupied_indices = torch.nonzero(self.binaries[lvl].flatten())[:, 0]\n if n < len(occupied_indices):\n selector = torch.randint(\n len(occupied_indices), (n,), device=self.device\n )\n occupied_indices = occupied_indices[selector]\n indices = torch.cat([uniform_indices, occupied_indices], dim=0)\n lvl_indices.append(indices)\n return lvl_indices\n\n @torch.no_grad()\n def _update(\n self,\n step: int,\n occ_eval_fn: Callable,\n occ_thre: float = 0.01,\n ema_decay: float = 0.95,\n warmup_steps: int = 256,\n ) -> None:\n \"\"\"Update the occ field in the EMA way.\"\"\"\n # sample cells\n if step < warmup_steps:\n lvl_indices = self._get_all_cells()\n else:\n N = self.cells_per_lvl // 4\n lvl_indices = self._sample_uniform_and_occupied_cells(N)\n\n for lvl, indices in enumerate(lvl_indices):\n # infer occupancy: density * step_size\n grid_coords = self.grid_coords[indices]\n x = (\n grid_coords + torch.rand_like(grid_coords, dtype=torch.float32)\n ) / self.resolution\n # voxel coordinates [0, 1]^3 -> world\n x = self.aabbs[lvl, :3] + x * (\n self.aabbs[lvl, 3:] - self.aabbs[lvl, :3]\n )\n occ = occ_eval_fn(x).squeeze(-1)\n # ema update\n cell_ids = lvl * self.cells_per_lvl + indices\n self.occs[cell_ids] = torch.maximum(\n self.occs[cell_ids] * ema_decay, occ\n )\n # suppose to use scatter max but emperically it is almost the same.\n # self.occs, _ = scatter_max(\n # occ, indices, dim=0, out=self.occs * ema_decay\n # )\n thre = torch.clamp(self.occs[self.occs >= 0].mean(), max=occ_thre)\n self.binaries = (self.occs > thre).view(self.binaries.shape)" }, { "identifier": "PropNetEstimator", "path": "nerfacc/estimators/prop_net.py", "snippet": "class PropNetEstimator(AbstractEstimator):\n \"\"\"Proposal network transmittance estimator.\n\n References: \"Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields.\"\n\n Args:\n optimizer: The optimizer to use for the proposal networks.\n scheduler: The learning rate scheduler to use for the proposal networks.\n \"\"\"\n\n def __init__(\n self,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n ) -> None:\n super().__init__()\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.prop_cache: List = []\n\n @torch.no_grad()\n def sampling(\n self,\n prop_sigma_fns: List[Callable],\n prop_samples: List[int],\n num_samples: int,\n # rendering options\n n_rays: int,\n near_plane: float,\n far_plane: float,\n sampling_type: Literal[\"uniform\", \"lindisp\"] = \"lindisp\",\n # training options\n stratified: bool = False,\n requires_grad: bool = False,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Sampling with CDFs from proposal networks.\n\n Note:\n When `requires_grad` is `True`, the gradients are allowed to flow\n through the proposal networks, and the outputs of the proposal\n networks are cached to update them later when calling `update_every_n_steps()`\n\n Args:\n prop_sigma_fns: Proposal network evaluate functions. It should be a list\n of functions that take in samples {t_starts (n_rays, n_samples),\n t_ends (n_rays, n_samples)} and returns the post-activation densities\n (n_rays, n_samples).\n prop_samples: Number of samples to draw from each proposal network. Should\n be the same length as `prop_sigma_fns`.\n num_samples: Number of samples to draw in the end.\n n_rays: Number of rays.\n near_plane: Near plane.\n far_plane: Far plane.\n sampling_type: Sampling type. Either \"uniform\" or \"lindisp\". Default to\n \"lindisp\".\n stratified: Whether to use stratified sampling. Default to `False`.\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n\n Returns:\n A tuple of {Tensor, Tensor}:\n\n - **t_starts**: The starts of the samples. Shape (n_rays, num_samples).\n - **t_ends**: The ends of the samples. Shape (n_rays, num_samples).\n\n \"\"\"\n assert len(prop_sigma_fns) == len(prop_samples), (\n \"The number of proposal networks and the number of samples \"\n \"should be the same.\"\n )\n cdfs = torch.cat(\n [\n torch.zeros((n_rays, 1), device=self.device),\n torch.ones((n_rays, 1), device=self.device),\n ],\n dim=-1,\n )\n intervals = RayIntervals(vals=cdfs)\n\n for level_fn, level_samples in zip(prop_sigma_fns, prop_samples):\n intervals, _ = importance_sampling(\n intervals, cdfs, level_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n\n with torch.set_grad_enabled(requires_grad):\n sigmas = level_fn(t_starts, t_ends)\n assert sigmas.shape == t_starts.shape\n trans, _ = render_transmittance_from_density(\n t_starts, t_ends, sigmas\n )\n cdfs = 1.0 - torch.cat(\n [trans, torch.zeros_like(trans[:, :1])], dim=-1\n )\n if requires_grad:\n self.prop_cache.append((intervals, cdfs))\n\n intervals, _ = importance_sampling(\n intervals, cdfs, num_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n if requires_grad:\n self.prop_cache.append((intervals, None))\n\n return t_starts, t_ends\n\n @torch.enable_grad()\n def compute_loss(self, trans: Tensor, loss_scaler: float = 1.0) -> Tensor:\n \"\"\"Compute the loss for the proposal networks.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n loss_scaler: The loss scaler. Default to 1.0.\n\n Returns:\n The loss for the proposal networks.\n \"\"\"\n if len(self.prop_cache) == 0:\n return torch.zeros((), device=self.device)\n\n intervals, _ = self.prop_cache.pop()\n # get cdfs at all edges of intervals\n cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[:, :1])], dim=-1)\n cdfs = cdfs.detach()\n\n loss = 0.0\n while self.prop_cache:\n prop_intervals, prop_cdfs = self.prop_cache.pop()\n loss += _pdf_loss(intervals, cdfs, prop_intervals, prop_cdfs).mean()\n return loss * loss_scaler\n\n @torch.enable_grad()\n def update_every_n_steps(\n self,\n trans: Tensor,\n requires_grad: bool = False,\n loss_scaler: float = 1.0,\n ) -> float:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n loss_scaler: The loss scaler to use. Default to 1.0.\n\n Returns:\n The loss of the proposal networks for logging (a float scalar).\n \"\"\"\n if requires_grad:\n return self._update(trans=trans, loss_scaler=loss_scaler)\n else:\n if self.scheduler is not None:\n self.scheduler.step()\n return 0.0\n\n @torch.enable_grad()\n def _update(self, trans: Tensor, loss_scaler: float = 1.0) -> float:\n assert len(self.prop_cache) > 0\n assert self.optimizer is not None, \"No optimizer is provided.\"\n\n loss = self.compute_loss(trans, loss_scaler)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n return loss.item()" }, { "identifier": "ray_aabb_intersect", "path": "nerfacc/grid.py", "snippet": "@torch.no_grad()\ndef ray_aabb_intersect(\n rays_o: Tensor,\n rays_d: Tensor,\n aabbs: Tensor,\n near_plane: float = -float(\"inf\"),\n far_plane: float = float(\"inf\"),\n miss_value: float = float(\"inf\"),\n) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Ray-AABB intersection.\n\n Args:\n rays_o: (n_rays, 3) Ray origins.\n rays_d: (n_rays, 3) Normalized ray directions.\n aabbs: (m, 6) Axis-aligned bounding boxes {xmin, ymin, zmin, xmax, ymax, zmax}.\n near_plane: Optional. Near plane. Default to -infinity.\n far_plane: Optional. Far plane. Default to infinity.\n miss_value: Optional. Value to use for tmin and tmax when there is no intersection.\n Default to infinity.\n\n Returns:\n A tuple of {Tensor, Tensor, BoolTensor}:\n\n - **t_mins**: (n_rays, m) tmin for each ray-AABB pair.\n - **t_maxs**: (n_rays, m) tmax for each ray-AABB pair.\n - **hits**: (n_rays, m) whether each ray-AABB pair intersects.\n \"\"\"\n assert rays_o.ndim == 2 and rays_o.shape[-1] == 3\n assert rays_d.ndim == 2 and rays_d.shape[-1] == 3\n assert aabbs.ndim == 2 and aabbs.shape[-1] == 6\n t_mins, t_maxs, hits = _C.ray_aabb_intersect(\n rays_o.contiguous(),\n rays_d.contiguous(),\n aabbs.contiguous(),\n near_plane,\n far_plane,\n miss_value,\n )\n return t_mins, t_maxs, hits" }, { "identifier": "traverse_grids", "path": "nerfacc/grid.py", "snippet": "@torch.no_grad()\ndef traverse_grids(\n # rays\n rays_o: Tensor, # [n_rays, 3]\n rays_d: Tensor, # [n_rays, 3]\n # grids\n binaries: Tensor, # [m, resx, resy, resz]\n aabbs: Tensor, # [m, 6]\n # options\n near_planes: Optional[Tensor] = None, # [n_rays]\n far_planes: Optional[Tensor] = None, # [n_rays]\n step_size: Optional[float] = 1e-3,\n cone_angle: Optional[float] = 0.0,\n traverse_steps_limit: Optional[int] = None,\n over_allocate: Optional[bool] = False,\n rays_mask: Optional[Tensor] = None, # [n_rays]\n # pre-compute intersections\n t_sorted: Optional[Tensor] = None, # [n_rays, n_grids * 2]\n t_indices: Optional[Tensor] = None, # [n_rays, n_grids * 2]\n hits: Optional[Tensor] = None, # [n_rays, n_grids]\n) -> Tuple[RayIntervals, RaySamples, Tensor]:\n \"\"\"Ray Traversal within Multiple Grids.\n\n Note:\n This function is not differentiable to any inputs.\n\n Args:\n rays_o: (n_rays, 3) Ray origins.\n rays_d: (n_rays, 3) Normalized ray directions.\n binary_grids: (m, resx, resy, resz) Multiple binary grids with the same resolution.\n aabbs: (m, 6) Axis-aligned bounding boxes {xmin, ymin, zmin, xmax, ymax, zmax}.\n near_planes: Optional. (n_rays,) Near planes for the traversal to start. Default to 0.\n far_planes: Optional. (n_rays,) Far planes for the traversal to end. Default to infinity.\n step_size: Optional. Step size for ray traversal. Default to 1e-3.\n cone_angle: Optional. Cone angle for linearly-increased step size. 0. means\n constant step size. Default: 0.0.\n traverse_steps_limit: Optional. Maximum number of samples per ray.\n over_allocate: Optional. Whether to over-allocate the memory for the outputs.\n rays_mask: Optional. (n_rays,) Skip some rays if given.\n t_sorted: Optional. (n_rays, n_grids * 2) Pre-computed sorted t values for each ray-grid pair. Default to None.\n t_indices: Optional. (n_rays, n_grids * 2) Pre-computed sorted t indices for each ray-grid pair. Default to None.\n hits: Optional. (n_rays, n_grids) Pre-computed hit flags for each ray-grid pair. Default to None.\n\n Returns:\n A :class:`RayIntervals` object containing the intervals of the ray traversal, and\n a :class:`RaySamples` object containing the samples within each interval.\n t :class:`Tensor` of shape (n_rays,) containing the terminated t values for each ray.\n \"\"\"\n\n if near_planes is None:\n near_planes = torch.zeros_like(rays_o[:, 0])\n if far_planes is None:\n far_planes = torch.full_like(rays_o[:, 0], float(\"inf\"))\n\n if rays_mask is None:\n rays_mask = torch.ones_like(rays_o[:, 0], dtype=torch.bool)\n if traverse_steps_limit is None:\n traverse_steps_limit = -1\n if over_allocate:\n assert (\n traverse_steps_limit > 0\n ), \"traverse_steps_limit must be set if over_allocate is True.\"\n\n if t_sorted is None or t_indices is None or hits is None:\n # Compute ray aabb intersection for all levels of grid. [n_rays, m]\n t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, aabbs)\n # Sort the t values for each ray. [n_rays, m]\n t_sorted, t_indices = torch.sort(\n torch.cat([t_mins, t_maxs], dim=-1), dim=-1\n )\n\n # Traverse the grids.\n intervals, samples, termination_planes = _C.traverse_grids(\n # rays\n rays_o.contiguous(), # [n_rays, 3]\n rays_d.contiguous(), # [n_rays, 3]\n rays_mask.contiguous(), # [n_rays]\n # grids\n binaries.contiguous(), # [m, resx, resy, resz]\n aabbs.contiguous(), # [m, 6]\n # intersections\n t_sorted.contiguous(), # [n_rays, m * 2]\n t_indices.contiguous(), # [n_rays, m * 2]\n hits.contiguous(), # [n_rays, m]\n # options\n near_planes.contiguous(), # [n_rays]\n far_planes.contiguous(), # [n_rays]\n step_size,\n cone_angle,\n True,\n True,\n True,\n traverse_steps_limit,\n over_allocate,\n )\n return (\n RayIntervals._from_cpp(intervals),\n RaySamples._from_cpp(samples),\n termination_planes,\n )" }, { "identifier": "accumulate_along_rays_", "path": "nerfacc/volrend.py", "snippet": "def accumulate_along_rays_(\n weights: Tensor,\n values: Optional[Tensor] = None,\n ray_indices: Optional[Tensor] = None,\n outputs: Optional[Tensor] = None,\n) -> None:\n \"\"\"Accumulate volumetric values along the ray.\n\n Inplace version of :func:`accumulate_along_rays`.\n \"\"\"\n if values is None:\n src = weights[..., None]\n else:\n assert values.dim() == weights.dim() + 1\n assert weights.shape == values.shape[:-1]\n src = weights[..., None] * values\n if ray_indices is not None:\n assert weights.dim() == 1, \"weights must be flattened\"\n assert (\n outputs.dim() == 2 and outputs.shape[-1] == src.shape[-1]\n ), \"outputs must be of shape (n_rays, D)\"\n outputs.index_add_(0, ray_indices, src)\n else:\n outputs.add_(src.sum(dim=-2))" }, { "identifier": "render_weight_from_density", "path": "nerfacc/volrend.py", "snippet": "def render_weight_from_density(\n t_starts: Tensor,\n t_ends: Tensor,\n sigmas: Tensor,\n packed_info: Optional[Tensor] = None,\n ray_indices: Optional[Tensor] = None,\n n_rays: Optional[int] = None,\n prefix_trans: Optional[Tensor] = None,\n) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Compute rendering weights :math:`w_i` from density :math:`\\\\sigma_i` and interval :math:`\\\\delta_i`.\n\n .. math::\n w_i = T_i(1 - exp(-\\\\sigma_i\\delta_i)), \\\\quad\\\\textrm{where}\\\\quad T_i = exp(-\\\\sum_{j=1}^{i-1}\\\\sigma_j\\delta_j)\n\n This function supports both batched and flattened input tensor. For flattened input tensor, either\n (`packed_info`) or (`ray_indices` and `n_rays`) should be provided.\n\n Args:\n t_starts: The start time of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).\n t_ends: The end time of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).\n sigmas: The density values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).\n packed_info: A tensor of shape (n_rays, 2) that specifies the start and count\n of each chunk in the flattened samples, with in total n_rays chunks.\n Useful for flattened input.\n ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).\n n_rays: Number of rays. Only useful when `ray_indices` is provided.\n prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,).\n\n Returns:\n The rendering weights, transmittance and opacities, both with the same shape as `sigmas`.\n\n Examples:\n\n .. code-block:: python\n\n >>> t_starts = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device=\"cuda\")\n >>> t_ends = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], device=\"cuda\")\n >>> sigmas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device=\"cuda\")\n >>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device=\"cuda\")\n >>> weights, transmittance, alphas = render_weight_from_density(\n >>> t_starts, t_ends, sigmas, ray_indices=ray_indices)\n weights: [0.33, 0.37, 0.03, 0.55, 0.04, 0.00, 0.59]\n transmittance: [1.00, 0.67, 0.30, 1.00, 0.45, 1.00, 1.00]\n alphas: [0.33, 0.55, 0.095, 0.55, 0.095, 0.00, 0.59]\n\n \"\"\"\n trans, alphas = render_transmittance_from_density(\n t_starts, t_ends, sigmas, packed_info, ray_indices, n_rays, prefix_trans\n )\n weights = trans * alphas\n return weights, trans, alphas" }, { "identifier": "rendering", "path": "nerfacc/volrend.py", "snippet": "def rendering(\n # ray marching results\n t_starts: Tensor,\n t_ends: Tensor,\n ray_indices: Optional[Tensor] = None,\n n_rays: Optional[int] = None,\n # radiance field\n rgb_sigma_fn: Optional[Callable] = None,\n rgb_alpha_fn: Optional[Callable] = None,\n # rendering options\n render_bkgd: Optional[Tensor] = None,\n) -> Tuple[Tensor, Tensor, Tensor, Dict]:\n \"\"\"Render the rays through the radience field defined by `rgb_sigma_fn`.\n\n This function is differentiable to the outputs of `rgb_sigma_fn` so it can\n be used for gradient-based optimization. It supports both batched and flattened input tensor.\n For flattened input tensor, both `ray_indices` and `n_rays` should be provided.\n\n\n Note:\n Either `rgb_sigma_fn` or `rgb_alpha_fn` should be provided.\n\n Warning:\n This function is not differentiable to `t_starts`, `t_ends` and `ray_indices`.\n\n Args:\n t_starts: Per-sample start distance. Tensor with shape (n_rays, n_samples) or (all_samples,).\n t_ends: Per-sample end distance. Tensor with shape (n_rays, n_samples) or (all_samples,).\n ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).\n n_rays: Number of rays. Only useful when `ray_indices` is provided.\n rgb_sigma_fn: A function that takes in samples {t_starts, t_ends,\n ray indices} and returns the post-activation rgb (..., 3) and density\n values (...,). The shape `...` is the same as the shape of `t_starts`.\n rgb_alpha_fn: A function that takes in samples {t_starts, t_ends,\n ray indices} and returns the post-activation rgb (..., 3) and opacity\n values (...,). The shape `...` is the same as the shape of `t_starts`.\n render_bkgd: Background color. Tensor with shape (3,).\n\n Returns:\n Ray colors (n_rays, 3), opacities (n_rays, 1), depths (n_rays, 1) and a dict\n containing extra intermediate results (e.g., \"weights\", \"trans\", \"alphas\")\n\n Examples:\n\n .. code-block:: python\n\n >>> t_starts = torch.tensor([0.1, 0.2, 0.1, 0.2, 0.3], device=\"cuda:0\")\n >>> t_ends = torch.tensor([0.2, 0.3, 0.2, 0.3, 0.4], device=\"cuda:0\")\n >>> ray_indices = torch.tensor([0, 0, 1, 1, 1], device=\"cuda:0\")\n >>> def rgb_sigma_fn(t_starts, t_ends, ray_indices):\n >>> # This is a dummy function that returns random values.\n >>> rgbs = torch.rand((t_starts.shape[0], 3), device=\"cuda:0\")\n >>> sigmas = torch.rand((t_starts.shape[0],), device=\"cuda:0\")\n >>> return rgbs, sigmas\n >>> colors, opacities, depths, extras = rendering(\n >>> t_starts, t_ends, ray_indices, n_rays=2, rgb_sigma_fn=rgb_sigma_fn)\n >>> print(colors.shape, opacities.shape, depths.shape)\n torch.Size([2, 3]) torch.Size([2, 1]) torch.Size([2, 1])\n >>> extras.keys()\n dict_keys(['weights', 'alphas', 'trans'])\n\n \"\"\"\n if ray_indices is not None:\n assert (\n t_starts.shape == t_ends.shape == ray_indices.shape\n ), \"Since nerfacc 0.5.0, t_starts, t_ends and ray_indices must have the same shape (N,). \"\n\n if rgb_sigma_fn is None and rgb_alpha_fn is None:\n raise ValueError(\n \"At least one of `rgb_sigma_fn` and `rgb_alpha_fn` should be specified.\"\n )\n\n # Query sigma/alpha and color with gradients\n if rgb_sigma_fn is not None:\n if t_starts.shape[0] != 0:\n rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices)\n else:\n rgbs = torch.empty((0, 3), device=t_starts.device)\n sigmas = torch.empty((0,), device=t_starts.device)\n assert rgbs.shape[-1] == 3, \"rgbs must have 3 channels, got {}\".format(\n rgbs.shape\n )\n assert (\n sigmas.shape == t_starts.shape\n ), \"sigmas must have shape of (N,)! Got {}\".format(sigmas.shape)\n # Rendering: compute weights.\n weights, trans, alphas = render_weight_from_density(\n t_starts,\n t_ends,\n sigmas,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n extras = {\n \"weights\": weights,\n \"alphas\": alphas,\n \"trans\": trans,\n \"sigmas\": sigmas,\n \"rgbs\": rgbs,\n }\n elif rgb_alpha_fn is not None:\n if t_starts.shape[0] != 0:\n rgbs, alphas = rgb_alpha_fn(t_starts, t_ends, ray_indices)\n else:\n rgbs = torch.empty((0, 3), device=t_starts.device)\n alphas = torch.empty((0,), device=t_starts.device)\n assert rgbs.shape[-1] == 3, \"rgbs must have 3 channels, got {}\".format(\n rgbs.shape\n )\n assert (\n alphas.shape == t_starts.shape\n ), \"alphas must have shape of (N,)! Got {}\".format(alphas.shape)\n # Rendering: compute weights.\n weights, trans = render_weight_from_alpha(\n alphas,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n extras = {\n \"weights\": weights,\n \"trans\": trans,\n \"rgbs\": rgbs,\n \"alphas\": alphas,\n }\n\n # Rendering: accumulate rgbs, opacities, and depths along the rays.\n colors = accumulate_along_rays(\n weights, values=rgbs, ray_indices=ray_indices, n_rays=n_rays\n )\n opacities = accumulate_along_rays(\n weights, values=None, ray_indices=ray_indices, n_rays=n_rays\n )\n depths = accumulate_along_rays(\n weights,\n values=(t_starts + t_ends)[..., None] / 2.0,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n depths = depths / opacities.clamp_min(torch.finfo(rgbs.dtype).eps)\n\n # Background composition.\n if render_bkgd is not None:\n colors = colors + render_bkgd * (1.0 - opacities)\n\n return colors, opacities, depths, extras" } ]
import random import numpy as np import torch from typing import Optional, Sequence from typing import Literal from typing_extensions import Literal from datasets.utils import Rays, namedtuple_map from torch.utils.data._utils.collate import collate, default_collate_fn_map from nerfacc.estimators.occ_grid import OccGridEstimator from nerfacc.estimators.prop_net import PropNetEstimator from nerfacc.grid import ray_aabb_intersect, traverse_grids from nerfacc.volrend import ( accumulate_along_rays_, render_weight_from_density, rendering, )
11,678
NERF_SYNTHETIC_SCENES = [ "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", ] MIPNERF360_UNBOUNDED_SCENES = [ "garden", "bicycle", "bonsai", "counter", "kitchen", "room", "stump", ] LLFF_NDC_SCENES = [ "fern", "flower", "fortress", "horns", "leaves", "orchids", "room_llff", "trex", ] def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def render_image_with_occgrid( # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, # test options test_chunk_size: int = 8192, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) sigmas = radiance_field.query_density(positions, t) else: sigmas = radiance_field.query_density(positions) return sigmas.squeeze(-1) def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) results = [] chunk = ( torch.iinfo(torch.int32).max if radiance_field.training else test_chunk_size ) for i in range(0, num_rays, chunk): chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) ray_indices, t_starts, t_ends = estimator.sampling( chunk_rays.origins, chunk_rays.viewdirs, sigma_fn=sigma_fn, near_plane=near_plane, far_plane=far_plane, render_step_size=render_step_size, stratified=radiance_field.training, cone_angle=cone_angle, alpha_thre=alpha_thre, )
""" Copyright (c) 2022 Ruilong Li, UC Berkeley. """ try: except ImportError: NERF_SYNTHETIC_SCENES = [ "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", ] MIPNERF360_UNBOUNDED_SCENES = [ "garden", "bicycle", "bonsai", "counter", "kitchen", "room", "stump", ] LLFF_NDC_SCENES = [ "fern", "flower", "fortress", "horns", "leaves", "orchids", "room_llff", "trex", ] def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def render_image_with_occgrid( # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, # test options test_chunk_size: int = 8192, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) sigmas = radiance_field.query_density(positions, t) else: sigmas = radiance_field.query_density(positions) return sigmas.squeeze(-1) def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) results = [] chunk = ( torch.iinfo(torch.int32).max if radiance_field.training else test_chunk_size ) for i in range(0, num_rays, chunk): chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) ray_indices, t_starts, t_ends = estimator.sampling( chunk_rays.origins, chunk_rays.viewdirs, sigma_fn=sigma_fn, near_plane=near_plane, far_plane=far_plane, render_step_size=render_step_size, stratified=radiance_field.training, cone_angle=cone_angle, alpha_thre=alpha_thre, )
rgb, opacity, depth, extras = rendering(
6
2023-11-27 22:12:55+00:00
16k
facebookresearch/SOC-matching
main.py
[ { "identifier": "get_folder_name", "path": "SOC_matching/utils.py", "snippet": "def get_folder_name(cfg):\n folder_name = (\n cfg.method.algorithm\n + \"_\"\n + cfg.method.setting\n + \"_\"\n + str(cfg.method.lmbd)\n + \"_\"\n + str(cfg.method.T)\n + \"_\"\n + str(cfg.method.num_steps)\n + \"_\"\n + str(cfg.method.use_warm_start)\n + \"_\"\n + str(cfg.method.seed)\n + \"_\"\n + str(cfg.optim.batch_size)\n + \"_\"\n + str(cfg.optim.M_lr)\n + \"_\"\n + str(cfg.optim.nabla_V_lr)\n )\n return folder_name" }, { "identifier": "get_file_name", "path": "SOC_matching/utils.py", "snippet": "def get_file_name(folder_name, num_iterations=0, last=False):\n if last:\n return folder_name + \"/last.pkl\"\n file_name = str(num_iterations)\n print(f\"folder_name: {folder_name}\")\n return folder_name + \"/\" + file_name + \".pkl\"" }, { "identifier": "control_objective", "path": "SOC_matching/utils.py", "snippet": "def control_objective(\n sde, x0, ts, lmbd, batch_size, total_n_samples=65536, verbose=False\n):\n n_batches = int(total_n_samples // batch_size)\n effective_n_samples = n_batches * batch_size\n for k in range(n_batches):\n state0 = x0.repeat(batch_size, 1)\n (\n _,\n _,\n _,\n _,\n log_path_weight_deterministic,\n _,\n log_terminal_weight,\n _,\n ) = stochastic_trajectories(\n sde,\n state0,\n ts.to(state0),\n lmbd,\n verbose=verbose,\n )\n if k == 0:\n ctrl_losses = -lmbd * (log_path_weight_deterministic + log_terminal_weight)\n else:\n ctrl_loss = -lmbd * (log_path_weight_deterministic + log_terminal_weight)\n ctrl_losses = torch.cat((ctrl_losses, ctrl_loss), 0)\n if k % 32 == 31:\n print(f\"Batch {k+1}/{n_batches} done\")\n return torch.mean(ctrl_losses), torch.std(ctrl_losses) / np.sqrt(\n effective_n_samples - 1\n )" }, { "identifier": "save_results", "path": "SOC_matching/utils.py", "snippet": "def save_results(results, folder_name, file_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n with open(file_name, \"wb\") as f:\n pickle.dump(results, f)" }, { "identifier": "compute_EMA", "path": "SOC_matching/utils.py", "snippet": "def compute_EMA(value, EMA_value, EMA_coeff=0.01, itr=0):\n itr_avg = int(np.floor(1 / EMA_coeff))\n if itr == 0:\n return value\n elif itr <= itr_avg:\n return (value + itr * EMA_value) / (itr + 1)\n else:\n return EMA_coeff * value + (1 - EMA_coeff) * EMA_value" }, { "identifier": "normalization_constant", "path": "SOC_matching/utils.py", "snippet": "def normalization_constant(\n sde, x0, ts, cfg, n_batches_normalization=512, ground_truth_control=None\n):\n log_weights_list = []\n weights_list = []\n\n if ground_truth_control is not None:\n norm_sqd_diff_mean = 0\n for k in range(n_batches_normalization):\n (\n states,\n _,\n _,\n _,\n log_path_weight_deterministic,\n log_path_weight_stochastic,\n log_terminal_weight,\n controls,\n ) = stochastic_trajectories(\n sde,\n x0,\n ts.to(x0),\n cfg.method.lmbd,\n )\n log_weights = (\n log_path_weight_deterministic\n + log_path_weight_stochastic\n + log_terminal_weight\n )\n log_weights_list.append(log_weights)\n weights = torch.exp(\n log_path_weight_deterministic\n + log_path_weight_stochastic\n + log_terminal_weight\n )\n weights_list.append(weights)\n\n if ground_truth_control is not None:\n gt_controls = ground_truth_control(ts, states, t_is_tensor=True)[\n :-1, :, :\n ].detach()\n norm_sqd_diff = torch.sum(\n (gt_controls - controls) ** 2\n * weights.unsqueeze(0).unsqueeze(2)\n / (gt_controls.shape[0] * gt_controls.shape[1])\n )\n norm_sqd_diff_mean += norm_sqd_diff\n if k % 32 == 31:\n print(f\"Batch {k+1}/{n_batches_normalization} done\")\n if ground_truth_control is not None:\n norm_sqd_diff_mean = norm_sqd_diff_mean / n_batches_normalization\n else:\n norm_sqd_diff_mean = None\n\n log_weights = torch.stack(log_weights_list, dim=1)\n weights = torch.stack(weights_list, dim=1)\n\n print(\n f\"Average and std. dev. of log_weights for all batches: {torch.mean(log_weights)} {torch.std(log_weights)}\"\n )\n\n normalization_const = torch.mean(weights)\n normalization_const_std_error = torch.std(weights) / np.sqrt(\n weights.shape[0] * weights.shape[1] - 1\n )\n return normalization_const, normalization_const_std_error, norm_sqd_diff_mean" }, { "identifier": "SOC_Solver", "path": "SOC_matching/method.py", "snippet": "class SOC_Solver(nn.Module):\n noise_type = \"diagonal\"\n sde_type = \"ito\"\n\n def __init__(\n self,\n neural_sde,\n x0,\n ut,\n T=1.0,\n num_steps=100,\n lmbd=1.0,\n d=2,\n sigma=torch.eye(2),\n ):\n super().__init__()\n self.dim = neural_sde.dim\n self.neural_sde = neural_sde\n self.x0 = x0\n self.ut = ut\n self.T = T\n self.ts = torch.linspace(0, T, num_steps + 1).to(x0.device)\n self.num_steps = num_steps\n self.dt = T / num_steps\n self.lmbd = lmbd\n self.d = d\n self.y0 = torch.nn.Parameter(torch.randn(1, device=x0.device))\n self.sigma = sigma\n\n def control(self, t0, x0):\n x0 = x0.reshape(-1, self.dim)\n t0_expanded = t0.reshape(-1, 1).expand(x0.shape[0], 1)\n tx = torch.cat([t0_expanded, x0], dim=-1)\n nabla_V = self.neural_sde.nabla_V(tx)\n learned_control = -torch.einsum(\n \"ij,bj->bi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n return learned_control\n\n def control_objective(self, batch_size, total_n_samples=65536):\n n_batches = int(total_n_samples // batch_size)\n effective_n_samples = n_batches * batch_size\n for k in range(n_batches):\n state0 = self.x0.repeat(batch_size, 1)\n (\n states,\n _,\n _,\n _,\n log_path_weight_deterministic,\n _,\n log_terminal_weight,\n _,\n ) = utils.stochastic_trajectories(\n self.neural_sde,\n state0,\n self.ts.to(state0),\n self.lmbd,\n )\n if k == 0:\n ctrl_losses = -self.lmbd * (\n log_path_weight_deterministic + log_terminal_weight\n )\n trajectory = states\n else:\n ctrl_loss = -self.lmbd * (\n log_path_weight_deterministic + log_terminal_weight\n )\n ctrl_losses = torch.cat((ctrl_losses, ctrl_loss), 0)\n if k % 32 == 31:\n print(f\"Batch {k+1}/{n_batches} done\")\n return (\n torch.mean(ctrl_losses),\n torch.std(ctrl_losses) / np.sqrt(effective_n_samples - 1),\n trajectory,\n )\n\n def loss(\n self,\n batch_size,\n compute_L2_error=False,\n optimal_control=None,\n compute_control_objective=False,\n algorithm=\"SOCM_const_M\",\n add_weights=False,\n total_n_samples=65536,\n verbose=False,\n u_warm_start=None,\n use_warm_start=True,\n use_stopping_time=False,\n ):\n\n state0 = self.x0.repeat(batch_size, 1)\n d = state0.shape[1]\n detach = algorithm != \"rel_entropy\"\n (\n states,\n noises,\n stop_indicators,\n fractional_timesteps,\n log_path_weight_deterministic,\n log_path_weight_stochastic,\n log_terminal_weight,\n controls,\n ) = utils.stochastic_trajectories(\n self.neural_sde,\n state0,\n self.ts.to(state0),\n self.lmbd,\n detach=detach,\n )\n unsqueezed_stop_indicators = stop_indicators.unsqueeze(2)\n weight = torch.exp(\n log_path_weight_deterministic\n + log_path_weight_stochastic\n + log_terminal_weight\n )\n\n if algorithm == \"rel_entropy\":\n ctrl_losses = -self.lmbd * (\n log_path_weight_deterministic + log_terminal_weight\n )\n objective = torch.mean(ctrl_losses)\n weight = weight.detach()\n learned_control = controls.detach()\n else:\n ts_repeat = self.ts.unsqueeze(1).unsqueeze(2).repeat(1, states.shape[1], 1)\n tx = torch.cat([ts_repeat, states], dim=-1)\n tx_reshape = torch.reshape(tx, (-1, tx.shape[2]))\n\n # Evaluate nabla_V\n nabla_V = self.neural_sde.nabla_V(tx_reshape)\n nabla_V = torch.reshape(nabla_V, states.shape)\n\n if u_warm_start and use_warm_start:\n sigma_inverse_transpose = torch.transpose(\n torch.inverse(self.sigma), 0, 1\n )\n u_warm_start_eval = u_warm_start(self.ts, states).detach()\n nabla_V = nabla_V - torch.einsum(\n \"ij,abj->abi\", sigma_inverse_transpose, u_warm_start_eval\n )\n\n if algorithm == \"SOCM_const_M\":\n sigma_inverse_transpose = torch.transpose(torch.inverse(self.sigma), 0, 1)\n least_squares_target_integrand_term_1 = (\n self.neural_sde.nabla_f(self.ts[0], states)\n )[:-1, :, :]\n least_squares_target_integrand_term_2 = -np.sqrt(self.lmbd) * torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n least_squares_target_integrand_term_3 = -torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, controls),\n )\n least_squares_target_terminal = self.neural_sde.nabla_g(states[-1, :, :])\n\n dts = self.ts[1:] - self.ts[:-1]\n least_squares_target_integrand_term_1_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_1[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_1\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_2[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_2\n * torch.sqrt(dts).unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_3_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_3[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_3\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n\n cumulative_sum_least_squares_term_1 = torch.sum(\n least_squares_target_integrand_term_1_times_dt, dim=0\n ).unsqueeze(0) - torch.cumsum(\n least_squares_target_integrand_term_1_times_dt, dim=0\n )\n cumulative_sum_least_squares_term_2 = torch.sum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n ).unsqueeze(0) - torch.cumsum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n )\n cumulative_sum_least_squares_term_3 = torch.sum(\n least_squares_target_integrand_term_3_times_dt, dim=0\n ).unsqueeze(0) - torch.cumsum(\n least_squares_target_integrand_term_3_times_dt, dim=0\n )\n least_squares_target = (\n cumulative_sum_least_squares_term_1\n + cumulative_sum_least_squares_term_2\n + cumulative_sum_least_squares_term_3\n + least_squares_target_terminal.unsqueeze(0)\n )\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), least_squares_target\n )\n\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n if algorithm == \"SOCM_exp\":\n sigma_inverse_transpose = torch.transpose(torch.inverse(self.sigma), 0, 1)\n exp_factor = torch.exp(-self.gamma * self.ts)\n identity = torch.eye(d).to(self.x0.device)\n least_squares_target_integrand_term_1 = (\n exp_factor.unsqueeze(1).unsqueeze(2)\n * self.neural_sde.nabla_f(self.ts[0], states)\n )[:-1, :, :]\n least_squares_target_integrand_term_2 = exp_factor[:-1].unsqueeze(\n 1\n ).unsqueeze(2) * (\n -np.sqrt(self.lmbd)\n * torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :]\n + self.gamma * identity,\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n )\n least_squares_target_integrand_term_3 = exp_factor[:-1].unsqueeze(\n 1\n ).unsqueeze(2) * (\n -torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :]\n + self.gamma * identity,\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, controls),\n )\n )\n least_squares_target_terminal = torch.exp(\n -self.gamma * (self.T - self.ts)\n ).unsqueeze(1).unsqueeze(2) * self.neural_sde.nabla_g(\n states[-1, :, :]\n ).unsqueeze(\n 0\n )\n\n dts = self.ts[1:] - self.ts[:-1]\n least_squares_target_integrand_term_1_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_1[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_1\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_2[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_2\n * torch.sqrt(dts).unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_3_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_3[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_3\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n\n inv_exp_factor = 1 / exp_factor\n cumsum_least_squares_term_1 = inv_exp_factor.unsqueeze(1).unsqueeze(2) * (\n torch.sum(\n least_squares_target_integrand_term_1_times_dt, dim=0\n ).unsqueeze(0)\n - torch.cumsum(least_squares_target_integrand_term_1_times_dt, dim=0)\n )\n cumsum_least_squares_term_2 = inv_exp_factor.unsqueeze(1).unsqueeze(2) * (\n torch.sum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n ).unsqueeze(0)\n - torch.cumsum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n )\n )\n cumsum_least_squares_term_3 = inv_exp_factor.unsqueeze(1).unsqueeze(2) * (\n torch.sum(\n least_squares_target_integrand_term_3_times_dt, dim=0\n ).unsqueeze(0)\n - torch.cumsum(least_squares_target_integrand_term_3_times_dt, dim=0)\n )\n\n least_squares_target = (\n cumsum_least_squares_term_1\n + cumsum_least_squares_term_2\n + cumsum_least_squares_term_3\n + least_squares_target_terminal\n )\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), least_squares_target\n )\n\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n if algorithm == \"SOCM\":\n sigma_inverse_transpose = torch.transpose(torch.inverse(self.sigma), 0, 1)\n identity = torch.eye(d).to(self.x0.device)\n\n if use_stopping_time:\n sum_M = lambda t, s, stopping_timestep_values: self.neural_sde.M(\n t, s, stopping_timestep_values\n ).sum(dim=0)\n\n derivative_M_0 = functorch.jacrev(sum_M, argnums=1)\n derivative_M = lambda t, s, stopping_timestep_values: torch.transpose(\n torch.transpose(\n torch.transpose(\n derivative_M_0(t, s, stopping_timestep_values), 2, 3\n ),\n 1,\n 2,\n ),\n 0,\n 1,\n )\n\n M_evals = torch.zeros(len(self.ts), len(self.ts), batch_size, d, d).to(\n self.ts.device\n )\n derivative_M_evals = torch.zeros(\n len(self.ts), len(self.ts), batch_size, d, d\n ).to(self.ts.device)\n\n else:\n sum_M = lambda t, s: self.neural_sde.M(t, s).sum(dim=0)\n\n derivative_M_0 = functorch.jacrev(sum_M, argnums=1)\n derivative_M = lambda t, s: torch.transpose(\n torch.transpose(derivative_M_0(t, s), 1, 2), 0, 1\n )\n\n M_evals = torch.zeros(len(self.ts), len(self.ts), d, d).to(\n self.ts.device\n )\n derivative_M_evals = torch.zeros(len(self.ts), len(self.ts), d, d).to(\n self.ts.device\n )\n\n if use_stopping_time:\n stopping_function_output_int = (self.neural_sde.Phi(states) > 0).to(\n torch.int\n )\n stopping_timestep = (\n torch.sum(stopping_function_output_int, dim=0) - 1\n ) / (len(self.ts) - 1)\n stopping_timestep_vector = []\n\n s_vector = []\n t_vector = []\n for k, t in enumerate(self.ts):\n s_vector.append(\n torch.linspace(t, self.T, self.num_steps + 1 - k).to(self.ts.device)\n )\n t_vector.append(\n t * torch.ones(self.num_steps + 1 - k).to(self.ts.device)\n )\n if use_stopping_time:\n stopping_timestep_vector.append(\n stopping_timestep.unsqueeze(0).repeat(self.num_steps + 1 - k, 1)\n )\n s_vector = torch.cat(s_vector)\n t_vector = torch.cat(t_vector)\n if use_stopping_time:\n stopping_timestep_vector = torch.cat(stopping_timestep_vector, dim=0)\n M_evals_all = self.neural_sde.M(\n t_vector, s_vector, stopping_timestep_vector\n )\n derivative_M_evals_all = torch.nan_to_num(\n derivative_M(t_vector, s_vector, stopping_timestep_vector)\n )\n counter = 0\n for k, t in enumerate(self.ts):\n M_evals[k, k:, :, :, :] = M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :, :\n ]\n derivative_M_evals[k, k:, :, :, :] = derivative_M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :, :\n ]\n counter += self.num_steps + 1 - k\n else:\n M_evals_all = self.neural_sde.M(\n t_vector,\n s_vector,\n )\n derivative_M_evals_all = derivative_M(\n t_vector,\n s_vector,\n )\n counter = 0\n for k, t in enumerate(self.ts):\n M_evals[k, k:, :, :] = M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :\n ]\n derivative_M_evals[k, k:, :, :] = derivative_M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :\n ]\n counter += self.num_steps + 1 - k\n\n if use_stopping_time:\n least_squares_target_integrand_term_1 = torch.einsum(\n \"ijmkl,jml->ijmk\",\n M_evals,\n self.neural_sde.nabla_f(self.ts, states),\n )[:, :-1, :, :]\n else:\n least_squares_target_integrand_term_1 = torch.einsum(\n \"ijkl,jml->ijmk\",\n M_evals,\n self.neural_sde.nabla_f(self.ts, states),\n )[:, :-1, :, :]\n\n if use_stopping_time:\n M_nabla_b_term = (\n torch.einsum(\n \"ijmkl,jmln->ijmkn\",\n M_evals,\n self.neural_sde.nabla_b(self.ts, states),\n )\n - derivative_M_evals\n )\n least_squares_target_integrand_term_2 = -np.sqrt(\n self.lmbd\n ) * torch.einsum(\n \"ijmkn,jmn->ijmk\",\n M_nabla_b_term[:, :-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n else:\n M_nabla_b_term = torch.einsum(\n \"ijkl,jmln->ijmkn\",\n M_evals,\n self.neural_sde.nabla_b(self.ts, states),\n ) - derivative_M_evals.unsqueeze(2)\n least_squares_target_integrand_term_2 = -np.sqrt(\n self.lmbd\n ) * torch.einsum(\n \"ijmkn,jmn->ijmk\",\n M_nabla_b_term[:, :-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n\n least_squares_target_integrand_term_3 = -torch.einsum(\n \"ijmkn,jmn->ijmk\",\n M_nabla_b_term[:, :-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, controls),\n )\n\n if use_stopping_time:\n M_evals_final = M_evals[:, -1, :, :, :]\n least_squares_target_terminal = torch.einsum(\n \"imkl,ml->imk\",\n M_evals_final,\n self.neural_sde.nabla_g(states[-1, :, :]),\n )\n else:\n M_evals_final = M_evals[:, -1, :, :]\n least_squares_target_terminal = torch.einsum(\n \"ikl,ml->imk\",\n M_evals_final,\n self.neural_sde.nabla_g(states[-1, :, :]),\n )\n\n if use_stopping_time:\n least_squares_target_integrand_term_1_times_dt = (\n least_squares_target_integrand_term_1\n * fractional_timesteps.unsqueeze(0).unsqueeze(3)\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = (\n least_squares_target_integrand_term_2\n * torch.sqrt(fractional_timesteps).unsqueeze(0).unsqueeze(3)\n )\n least_squares_target_integrand_term_3_times_dt = (\n least_squares_target_integrand_term_3\n * fractional_timesteps.unsqueeze(0).unsqueeze(3)\n )\n else:\n dts = self.ts[1:] - self.ts[:-1]\n least_squares_target_integrand_term_1_times_dt = (\n least_squares_target_integrand_term_1\n * dts.unsqueeze(1).unsqueeze(2).unsqueeze(0)\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = (\n least_squares_target_integrand_term_2\n * torch.sqrt(dts).unsqueeze(1).unsqueeze(2)\n )\n least_squares_target_integrand_term_3_times_dt = (\n least_squares_target_integrand_term_3 * dts.unsqueeze(1).unsqueeze(2)\n )\n\n cumsum_least_squares_term_1 = torch.sum(\n least_squares_target_integrand_term_1_times_dt, dim=1\n )\n cumsum_least_squares_term_2 = torch.sum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=1\n )\n cumsum_least_squares_term_3 = torch.sum(\n least_squares_target_integrand_term_3_times_dt, dim=1\n )\n\n least_squares_target = (\n cumsum_least_squares_term_1\n + cumsum_least_squares_term_2\n + cumsum_least_squares_term_3\n + least_squares_target_terminal\n )\n\n if use_stopping_time:\n control_learned = -unsqueezed_stop_indicators * torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -unsqueezed_stop_indicators * torch.einsum(\n \"ij,...j->...i\",\n torch.transpose(self.sigma, 0, 1),\n least_squares_target,\n )\n else:\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\",\n torch.transpose(self.sigma, 0, 1),\n least_squares_target,\n )\n\n if use_stopping_time:\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (torch.sum(stop_indicators))\n else:\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n if algorithm == \"SOCM_adjoint\":\n nabla_f_evals = self.neural_sde.nabla_f(self.ts, states)\n nabla_b_evals = self.neural_sde.nabla_b(self.ts, states)\n nabla_g_evals = self.neural_sde.nabla_g(states[-1, :, :])\n\n # print(f'nabla_b_evals.shape: {nabla_b_evals.shape}')\n\n a_vectors = torch.zeros_like(states)\n a = nabla_g_evals\n a_vectors[-1, :, :] = a\n\n for k in range(1,len(self.ts)):\n # a += self.dt * (nabla_f_evals[-1-k, :, :] + torch.einsum(\"mkl,ml->mk\", nabla_b_evals[-1-k, :, :, :], a))\n a += self.dt * ((nabla_f_evals[-1-k, :, :] + nabla_f_evals[-k, :, :]) / 2 + torch.einsum(\"mkl,ml->mk\", (nabla_b_evals[-1-k, :, :, :] + nabla_b_evals[-k, :, :, :]) / 2, a))\n a_vectors[-1-k, :, :] = a\n\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\",\n torch.transpose(self.sigma, 0, 1),\n a_vectors,\n )\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n elif algorithm == \"cross_entropy\":\n learned_controls = -torch.einsum(\n \"ij,abj->abi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n integrand_term_1 = -(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * controls, dim=2\n )\n integrand_term_2 = (1 / (2 * self.lmbd)) * torch.sum(\n learned_controls**2, dim=2\n )[:-1, :]\n deterministic_integrand = integrand_term_1 + integrand_term_2\n stochastic_integrand = -np.sqrt(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * noises, dim=2\n )\n\n if use_stopping_time:\n deterministic_integrand_times_dt = (\n deterministic_integrand * fractional_timesteps\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n fractional_timesteps\n )\n else:\n dts = self.ts[1:] - self.ts[:-1]\n deterministic_integrand_times_dt = (\n deterministic_integrand * dts.unsqueeze(1)\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n dts\n ).unsqueeze(1)\n\n deterministic_term = torch.sum(deterministic_integrand_times_dt, dim=0)\n stochastic_term = torch.sum(stochastic_integrand_times_sqrt_dt, dim=0)\n\n objective = torch.mean((deterministic_term + stochastic_term) * weight)\n\n elif (\n algorithm == \"variance\"\n or algorithm == \"log-variance\"\n or algorithm == \"moment\"\n ):\n learned_controls = -torch.einsum(\n \"ij,abj->abi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n integrand_term_1 = -(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * controls, dim=2\n )\n integrand_term_2 = (1 / (2 * self.lmbd)) * torch.sum(\n learned_controls**2, dim=2\n )[:-1, :]\n integrand_term_3 = (\n -(1 / self.lmbd) * self.neural_sde.f(self.ts[0], states)[:-1, :]\n )\n deterministic_integrand = (\n integrand_term_1 + integrand_term_2 + integrand_term_3\n )\n stochastic_integrand = -np.sqrt(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * noises, dim=2\n )\n if use_stopping_time:\n deterministic_integrand = (\n deterministic_integrand * stop_indicators[:-1, :]\n )\n stochastic_integrand = stochastic_integrand * stop_indicators[:-1, :]\n\n if use_stopping_time:\n deterministic_integrand_times_dt = (\n deterministic_integrand * fractional_timesteps\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n fractional_timesteps\n )\n else:\n dts = self.ts[1:] - self.ts[:-1]\n deterministic_integrand_times_dt = (\n deterministic_integrand * dts.unsqueeze(1)\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n dts\n ).unsqueeze(1)\n\n deterministic_term = torch.sum(deterministic_integrand_times_dt, dim=0)\n stochastic_term = torch.sum(stochastic_integrand_times_sqrt_dt, dim=0)\n g_term = -(1 / self.lmbd) * self.neural_sde.g(states[-1, :, :])\n if algorithm == \"log-variance\":\n sum_terms = deterministic_term + stochastic_term + g_term\n elif algorithm == \"variance\":\n sum_terms = torch.exp(deterministic_term + stochastic_term + g_term)\n elif algorithm == \"moment\":\n sum_terms = deterministic_term + stochastic_term + g_term + self.y0\n\n if add_weights:\n weight_2 = weight\n else:\n weight_2 = torch.ones_like(weight)\n if algorithm == \"log-variance\" or algorithm == \"variance\":\n objective = (\n len(sum_terms)\n / (len(sum_terms) - 1)\n * (\n torch.mean(sum_terms**2 * weight_2)\n - torch.mean(sum_terms * weight_2) ** 2\n )\n )\n elif algorithm == \"moment\":\n objective = torch.mean(sum_terms**2 * weight_2)\n\n if compute_L2_error:\n if algorithm == \"rel_entropy\":\n target_control = optimal_control(self.ts, states, t_is_tensor=True)[\n :-1, :, :\n ].detach()\n else:\n target_control = optimal_control(self.ts, states, t_is_tensor=True)\n if algorithm != \"rel_entropy\":\n learned_control = -torch.einsum(\n \"ij,abj->abi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n norm_sqd_diff = torch.sum(\n (target_control - learned_control) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n / (target_control.shape[0] * target_control.shape[1])\n )\n else:\n norm_sqd_diff = None\n\n if compute_control_objective:\n ctrl_loss_mean, ctrl_loss_std_err, trajectory = self.control_objective(\n batch_size, total_n_samples=total_n_samples\n )\n else:\n ctrl_loss_mean = None\n ctrl_loss_std_err = None\n trajectory = None\n\n if verbose:\n # To print amount of memory used in GPU\n nvidia_smi.nvmlInit()\n handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)\n # card id 0 hardcoded here, there is also a call to get all available card ids, so we could iterate\n info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)\n print(\"Total memory:\", info.total / 1048576, \"MiB\")\n print(\"Free memory:\", info.free / 1048576, \"MiB\")\n print(\"Used memory:\", info.used / 1048576, \"MiB\")\n nvidia_smi.nvmlShutdown()\n\n return (\n objective,\n norm_sqd_diff,\n ctrl_loss_mean,\n ctrl_loss_std_err,\n trajectory,\n torch.mean(weight),\n torch.std(weight),\n stop_indicators,\n )" }, { "identifier": "define_variables", "path": "SOC_matching/experiment_settings/settings.py", "snippet": "def define_variables(cfg, ts):\n if (\n cfg.method.setting == \"OU_quadratic_easy\"\n or cfg.method.setting == \"OU_quadratic_hard\"\n ):\n if cfg.method.d == 2:\n x0 = torch.tensor([0.4, 0.6]).to(cfg.method.device)\n else:\n x0 = 0.5 * torch.randn(cfg.method.d).to(cfg.method.device)\n print(f\"x0: {x0}\")\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n if cfg.method.setting == \"OU_quadratic_hard\":\n A = 1.0 * torch.eye(cfg.method.d).to(cfg.method.device)\n P = 1.0 * torch.eye(cfg.method.d).to(cfg.method.device)\n Q = 0.5 * torch.eye(cfg.method.d).to(cfg.method.device)\n elif cfg.method.setting == \"OU_quadratic_easy\":\n A = 0.2 * torch.eye(cfg.method.d).to(cfg.method.device)\n P = 0.2 * torch.eye(cfg.method.d).to(cfg.method.device)\n Q = 0.1 * torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(cfg, ts, x0, sigma=sigma, A=A, P=P, Q=Q)\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg, ts, x0, u_warm_start, sigma=sigma, A=A, P=P, Q=Q\n )\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"OU_linear\":\n x0 = torch.zeros(cfg.method.d).to(cfg.method.device)\n nu = 0.1\n xi = nu * torch.randn(cfg.method.d, cfg.method.d).to(cfg.method.device)\n omega = torch.ones(cfg.method.d).to(cfg.method.device)\n A = -torch.eye(cfg.method.d).to(cfg.method.device) + xi\n sigma = torch.eye(cfg.method.d).to(cfg.method.device) + xi\n\n optimal_sde = ground_truth_control(cfg, ts, x0, sigma=sigma, omega=omega, A=A)\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg, ts, x0, u_warm_start, sigma=sigma, omega=omega, A=A\n )\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"double_well\":\n print(f\"double_well\")\n x0 = torch.zeros(cfg.method.d).to(cfg.method.device)\n\n kappa_i = 5\n nu_i = 3\n kappa = torch.ones(cfg.method.d).to(cfg.method.device)\n nu = torch.ones(cfg.method.d).to(cfg.method.device)\n kappa[0] = kappa_i\n kappa[1] = kappa_i\n kappa[2] = kappa_i\n nu[0] = nu_i\n nu[1] = nu_i\n nu[2] = nu_i\n\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(cfg, ts, x0, sigma=sigma, kappa=kappa, nu=nu)\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg, ts, x0, u_warm_start, sigma=sigma, kappa=kappa, nu=nu\n )\n\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"molecular_dynamics\":\n print(f\"molecular_dynamics\")\n x0 = -torch.ones(cfg.method.d).to(cfg.method.device)\n\n kappa = torch.ones(cfg.method.d).to(cfg.method.device)\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(\n cfg,\n ts,\n x0,\n sigma=sigma,\n kappa=kappa,\n )\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg,\n ts,\n x0,\n u_warm_start,\n sigma=sigma,\n kappa=kappa,\n )\n\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"multiagent_8\":\n print(f\"multiagent_8\")\n x0 = torch.tensor(\n [\n -4.0,\n 4.5,\n -7.0,\n 4.5,\n -4.0,\n 1.5,\n -7.0,\n 1.5,\n -4.0,\n -1.5,\n -7.0,\n -1.5,\n -4.0,\n -4.5,\n -7.0,\n -4.5,\n ]\n ).to(cfg.method.device)\n\n g_center = torch.tensor(\n [\n 4.0,\n 4.5,\n 7.0,\n 4.5,\n 4.0,\n 1.5,\n 7.0,\n 1.5,\n 4.0,\n -1.5,\n 7.0,\n -1.5,\n 4.0,\n -4.5,\n 7.0,\n -4.5,\n ]\n ).to(cfg.method.device)\n g_coeff = 2.00\n f_coeff = 0.05\n\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(\n cfg,\n ts,\n x0,\n sigma=sigma,\n g_center=g_center,\n g_coeff=g_coeff,\n f_coeff=f_coeff,\n )\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg,\n ts,\n x0,\n u_warm_start,\n sigma=sigma,\n g_center=g_center,\n g_coeff=g_coeff,\n f_coeff=f_coeff,\n )\n\n return x0, sigma, optimal_sde, neural_sde, u_warm_start" } ]
import torch import sys import logging import os import time import json import hydra import traceback from tqdm.notebook import tqdm from omegaconf import DictConfig from SOC_matching.utils import ( get_folder_name, get_file_name, control_objective, save_results, compute_EMA, normalization_constant, ) from SOC_matching.method import ( SOC_Solver, ) from SOC_matching.experiment_settings.settings import define_variables
11,353
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory. log = logging.getLogger(__name__) @hydra.main(version_base=None, config_path="configs", config_name="soc") def main(cfg: DictConfig): logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory. log = logging.getLogger(__name__) @hydra.main(version_base=None, config_path="configs", config_name="soc") def main(cfg: DictConfig): logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg)
file_name = get_file_name(folder_name, num_iterations=cfg.method.num_iterations)
1
2023-12-04 20:26:18+00:00
16k
yiwenlu66/learning-qp
src/modules/qp_unrolled_network.py
[ { "identifier": "QPSolver", "path": "src/modules/qp_solver.py", "snippet": "class QPSolver(nn.Module):\n \"\"\"\n Solve QP problem:\n minimize (1/2)x'Px + q'x\n subject to Hx + b >= 0,\n where x in R^n, b in R^m.\n \"\"\"\n def __init__(self, device, n, m,\n P=None, Pinv=None, H=None,\n alpha=1, beta=1,\n preconditioner=None, warm_starter=None,\n is_warm_starter_trainable=False,\n keep_X=True,\n symmetric_constraint=False,\n buffered=False,\n ):\n \"\"\"\n Initialize the QP solver.\n\n device: PyTorch device\n\n n, m: dimensions of decision variable x and constraint vector b\n\n P, Pinv, H: Optional matrices that define the QP. If not provided, must be supplied during forward pass. At most one of P and Pinv can be specified.\n\n alpha, beta: Parameters of the PDHG algorithm\n\n preconditioner: Optional preconditioner module\n\n warm_starter: Optional warm start module\n\n is_warm_starter_trainable: Flag for training the warm starter\n\n keep_X: Flag for keeping the primal-dual variable history\n\n symmetric_constraint: Flag for making the inequality constraint symmetric; when True, the constraint is assumed to be -1 <= Hx + b <= 1, instead of Hx + b >= 0.\n\n buffered: Flag for indicating whether the problem is modeled with the buffer variable \\epsilon. When True, it is assumed that the first (n-1) decision variables are the original x, and the last decision variable is \\epsilon; in this case, if symmetric constraint is enabled, then the projection is done as follows:\n 1. Project epsilon to [0, +\\infty)\n 2. Project H_x x + b_x to [-1 - eps, 1 + eps]\n\n Note: Assumes that H is full column rank when m >= n, and full row rank otherwise.\n \"\"\"\n super().__init__()\n self.device = device\n self.n = n\n self.m = m\n create_tensor = lambda t: (torch.tensor(t, dtype=torch.float, device=device).unsqueeze(0) if t is not None else None) if type(t) != torch.Tensor else t.unsqueeze(0)\n assert (P is None) or (Pinv is None), \"At most one of P and Pinv can be specified\"\n self.bP = create_tensor(P) # (1, n, n)\n self.bPinv = create_tensor(Pinv) # (1, n, n)\n self.bH = create_tensor(H) # (1, m, n)\n self.alpha = alpha\n self.beta = beta\n if preconditioner is None:\n # Use dummy preconditioner which gives D=I/beta\n self.preconditioner = Preconditioner(device, n, m, P=P, Pinv=Pinv, H=H, beta=beta, dummy=True)\n else:\n self.preconditioner = preconditioner\n self.warm_starter = warm_starter\n self.is_warm_starter_trainable = is_warm_starter_trainable\n self.keep_X = keep_X\n self.symmetric_constraint = symmetric_constraint\n self.buffered = buffered\n\n self.bIm = torch.eye(m, device=device).unsqueeze(0)\n self.X0 = torch.zeros((1, 2 * self.m), device=self.device)\n\n # If P, H are constant, we can pre-compute the transformation from z to x\n if self.bP is not None and self.bH is not None:\n self.get_sol = self.get_sol_transform(self.bP, self.bH)\n elif self.bPinv is not None and self.bH is not None:\n self.get_sol = self.get_sol_transform(self.bH, bPinv=self.bPinv)\n else:\n self.get_sol = None\n\n # If possible, cache intermediate results in the computation of the affine transform used for each PDHG iteration\n if (P is not None or Pinv is not None) and H is not None and preconditioner is None:\n self.cache_keys = [\"D\", \"tD\", \"tDD\", \"A\"]\n else:\n self.cache_keys = []\n self.cache = {}\n\n def get_sol_transform(self, H, bP=None, bPinv=None):\n \"\"\"\n Computes the transformation from dual variable z to primal variable x.\n\n H: Constraint matrix\n bP, bPinv: Either the matrix P or its inverse. Exactly one must be specified. Specifying Pinv can reduce number of linear solves.\n\n Returns: Function that performs the transformation\n \"\"\"\n bH = self.bH if self.bH is not None else H\n if self.m >= self.n:\n return lambda z, q, b: bmv(pinv(bH), z - b)\n else:\n bP_param = bP if bP is not None else bPinv\n op = solve if bP is not None else bma\n def get_sol(z, q, b):\n t = lambda bM: bM.transpose(-1, -2)\n bPinvHt = op(bP_param, t(bH))\n Mt = solve(t(bH @ bPinvHt), t(bPinvHt))\n M = t(Mt)\n bPinvq = op(bP_param, q)\n return bmv(M @ bH, bPinvq) - bPinvq + bmv(M, z - b)\n return get_sol\n\n def get_AB(self, q, b, H=None, P=None, Pinv=None):\n \"\"\"\n Computes matrices A and B used in the PDHG iterations.\n\n q, b: Coefficients in the objective and constraint\n H, P, Pinv: Matrix H, and (either the matrix P or its inverse). Must be specified if not initialized. Specifying Pinv can reduce number of linear solves.\n\n Returns: Matrices A and B\n \"\"\"\n\n def _lookup_or_compute(keys, compute_fn):\n \"\"\"Lookup variable(s) from cache or compute them if not available.\n\n keys: either a variable name (str), or a list of variable names\n compute_fn: function that computes the variable(s) if not available in cache; returns a single value if keys is a string, or a tuple of values if keys is a list\n \"\"\"\n is_single = (type(keys) == str)\n if is_single:\n keys = [keys]\n if not all([key in self.cache for key in keys]):\n values = compute_fn()\n if is_single:\n values = (values,)\n for key, value in zip(keys, values):\n if key in self.cache_keys:\n self.cache[key] = value\n else:\n values = tuple([self.cache[key] for key in keys])\n return values if not is_single else values[0]\n\n # q: (bs, n), b: (bs, m)\n if self.bP is not None or self.bPinv is not None:\n if self.bP is not None:\n bP_param = self.bP\n P_is_inv = False\n else:\n bP_param = self.bPinv\n P_is_inv = True\n else:\n if P is not None:\n bP_param = P\n P_is_inv = False\n else:\n bP_param = Pinv\n P_is_inv = True\n op = bsolve if not P_is_inv else bma\n\n bH = self.bH if self.bH is not None else H\n D, tD = _lookup_or_compute([\"D\", \"tD\"], lambda: self.preconditioner(q, b, bP_param, H, input_P_is_inversed=P_is_inv, output_tD_is_inversed=False)) # (bs, m, m) or (1, m, m)\n mu = bmv(tD, bmv(bH, op(bP_param, q)) - b) # (bs, m)\n tDD = _lookup_or_compute(\"tDD\", lambda: tD @ D)\n\n A = _lookup_or_compute(\"A\", lambda:\n torch.cat([\n torch.cat([tDD, tD], 2),\n torch.cat([-2 * self.alpha * tDD + self.bIm, self.bIm - 2 * self.alpha * tD], 2),\n ], 1) # (bs, 2m, 2m)\n )\n B = torch.cat([\n mu,\n -2 * self.alpha * mu\n ], 1) # (bs, 2m)\n return A, B\n\n def compute_residuals(self, x, z, u, q, b, P=None, H=None, Pinv=None):\n \"\"\"\n Computes the primal and dual residuals.\n\n x, z: Primal variables\n u: Dual variable\n q, b: Coefficients in the objective and constraint\n P, H, Pinv: Optional matrices defining the QP. Must be provided if not initialized.\n\n Returns: Primal and dual residuals\n \"\"\"\n # Determine effective P and H matrices\n if self.bP is not None or self.bPinv is not None:\n if self.bP is not None:\n eff_P = self.bP\n P_is_inv = False\n else:\n eff_P = self.bPinv\n P_is_inv = True\n else:\n if P is not None:\n eff_P = P\n P_is_inv = False\n else:\n eff_P = Pinv\n P_is_inv = True\n\n if self.bH is not None:\n eff_H = self.bH\n else:\n eff_H = H\n\n # Compute primal residual: Hx + b - z\n primal_residual = bmv(eff_H, x) + b - z\n\n # Determine the operation for multiplying with P or its inverse\n op = bsolve if P_is_inv else bmv\n\n # Compute dual residual: Px + q + H'u\n dual_residual = op(eff_P, x) + q + bmv(eff_H.transpose(-1, -2), u)\n\n return primal_residual, dual_residual\n\n\n def forward(\n self, q, b,\n P=None, H=None, Pinv=None,\n iters=1000,\n only_last_primal=True,\n return_residuals=False\n ):\n \"\"\"\n Solves the QP problem using PDHG.\n\n q, b: Coefficients in the objective and constraint\n P, H, Pinv: Optional matrices defining the QP, i.e., matrix H, and (either the matrix P or its inverse). Must be provided if not initialized. Using Pinv is more efficient in learned setting.\n iters: Number of PDHG iterations\n only_last_primal: Flag for returning only the last primal solution (when True, primal_sols is (bs, 1, n); otherwise (bs, iters + 1, n))\n return_residuals: Flag for returning residuals\n\n Returns: History of primal-dual variables, primal solutions, and optionally residuals of the last iteration\n \"\"\"\n # q: (bs, n), b: (bs, m)\n bs = q.shape[0]\n if self.keep_X:\n Xs = torch.zeros((bs, iters + 1, 2 * self.m), device=self.device)\n else:\n Xs = None\n primal_sols = torch.zeros((bs, (iters if not only_last_primal else 0) + 1, self.n), device=self.device)\n if self.warm_starter is not None:\n with torch.set_grad_enabled(self.is_warm_starter_trainable):\n qd, bd, Pd, Hd, Pinvd = map(lambda t: t.detach() if t is not None else None, [q, b, P, H, Pinv])\n P_param_to_ws = Pd if Pd is not None else Pinvd\n self.X0 = self.warm_starter(qd, bd, P_param_to_ws, Hd)\n get_sol = self.get_sol if self.get_sol is not None else self.get_sol_transform(H, P, Pinv)\n if self.keep_X:\n Xs[:, 0, :] = self.X0.clone()\n if not only_last_primal:\n primal_sols[:, 0, :] = get_sol(self.X0[:, self.m:], q, b)\n X = self.X0\n A, B = self.get_AB(q, b, H, P, Pinv)\n for k in range(1, iters + 1):\n # PDHG update\n X = bmv(A, X) + B # (bs, 2m)\n if not self.symmetric_constraint:\n # Project to [0, +\\infty)\n F.relu(X[:, self.m:], inplace=True)\n else:\n if not self.buffered:\n # Project to [-1, 1]\n projected = torch.clamp(X[:, self.m:], -1, 1)\n X = torch.cat((X[:, :self.m], projected), dim=1)\n else:\n # Hybrid projection: epsilon to [0, +\\infty), the rest decision variables to [-1 - eps, 1 + eps]\n # Project epsilon\n F.relu(X[:, -1:], inplace=True)\n # Project the rest variables\n projected = torch.clamp(X[:, self.m:-1], -1 - X[:, -1:], 1 + X[:, -1:])\n # Concatenate\n X = torch.cat((X[:, :self.m], projected, X[:, -1:]), dim=1)\n if self.keep_X:\n Xs[:, k, :] = X.clone()\n if not only_last_primal:\n primal_sols[:, k, :] = get_sol(X[:, self.m:], q, b)\n\n if only_last_primal:\n primal_sols[:, 0, :] = get_sol(X[:, self.m:], q, b)\n\n # Compute residuals for the last step if the flag is set\n if return_residuals:\n x_last = primal_sols[:, -1, :]\n z_last = Xs[:, -1, self.m:]\n u_last = Xs[:, -1, :self.m]\n primal_residual, dual_residual = self.compute_residuals(x_last, z_last, u_last, q, b, P, H, Pinv)\n return Xs, primal_sols, (primal_residual, dual_residual)\n else:\n return Xs, primal_sols" }, { "identifier": "WarmStarter", "path": "src/modules/warm_starter.py", "snippet": "class WarmStarter(nn.Module):\n def __init__(self, device, n, m, fixed_P=True, fixed_H=True):\n super().__init__()\n self.device = device\n self.n = n\n self.m = m\n self.fixed_P = fixed_P\n self.fixed_H = fixed_H\n num_in = n + m\n if not fixed_P:\n num_in += n * (n + 1) // 2\n if not fixed_H:\n num_in += n * m\n num_out = 2 * m\n num_hidden = max(num_in, num_out)\n self.net = nn.Sequential(\n nn.Linear(num_in, num_hidden),\n nn.ReLU(),\n nn.Linear(num_hidden, num_hidden),\n nn.ReLU(),\n nn.Linear(num_hidden, num_out),\n ).to(device=device)\n\n def forward(self, q, b, P=None, H=None):\n \"\"\"The P argument can be either P or inv(P) in the original PDHG formulation, as long as consistent.\"\"\"\n net_input = [q, b]\n if not self.fixed_P:\n net_input.append(vectorize_upper_triangular(P))\n if not self.fixed_H:\n net_input.append(H.flatten(start_dim=-2))\n net_input_t = torch.cat(net_input, 1)\n X = self.net(net_input_t)\n return X" }, { "identifier": "make_psd", "path": "src/utils/torch_utils.py", "snippet": "def make_psd(x, min_eig=0.1):\n \"\"\"Assume x is (bs, N*(N+1)/2), create (bs, N, N) batch of PSD matrices using Cholesky.\"\"\"\n bs, n_elem = x.shape\n N = (int(np.sqrt(1 + 8 * n_elem)) - 1) // 2\n cholesky_diag_index = torch.arange(N, dtype=torch.long) + 1\n cholesky_diag_index = (cholesky_diag_index * (cholesky_diag_index + 1)) // 2 - 1 # computes the indices of the future diagonal elements of the matrix\n elem = x.clone()\n elem[:, cholesky_diag_index] = np.sqrt(min_eig) + F.softplus(elem[:, cholesky_diag_index])\n tril_indices = torch.tril_indices(row=N, col=N, offset=0) # Collection that contains the indices of the non-zero elements of a lower triangular matrix\n cholesky = torch.zeros(size=(bs, N, N), dtype=torch.float, device=elem.device) #initialize a square matrix to zeros\n cholesky[:, tril_indices[0], tril_indices[1]] = elem # Assigns the elements of the vector to their correct position in the lower triangular matrix\n return cholesky @ cholesky.transpose(1, 2)" }, { "identifier": "interpolate_state_dicts", "path": "src/utils/torch_utils.py", "snippet": "def interpolate_state_dicts(state_dict_1, state_dict_2, weight):\n return {\n key: (1 - weight) * state_dict_1[key] + weight * state_dict_2[key] for key in state_dict_1.keys()\n }" }, { "identifier": "mpc2qp", "path": "src/utils/mpc_utils.py", "snippet": "def mpc2qp(n_mpc, m_mpc, N, A, B, Q, R, x_min, x_max, u_min, u_max, x0, x_ref, normalize=False, Qf=None):\n \"\"\"\n Converts Model Predictive Control (MPC) problem parameters into Quadratic Programming (QP) form.\n\n Parameters:\n - n_mpc (int): Dimension of the state space.\n - m_mpc (int): Dimension of the input space.\n - N (int): Prediction horizon.\n - A (torch.Tensor): State transition matrix, shape (n_mpc, n_mpc).\n - B (torch.Tensor): Control input matrix, shape (n_mpc, m_mpc).\n - Q (torch.Tensor): State cost matrix, shape (n_mpc, n_mpc).\n - R (torch.Tensor): Control cost matrix, shape (m_mpc, m_mpc).\n - x_min (float): Lower state bounds.\n - x_max (float): Upper state bounds.\n - u_min (float): Lower control bounds.\n - u_max (float): Upper control bounds.\n - x0 (torch.Tensor): Initial state, shape (batch_size, n_mpc).\n - x_ref (torch.Tensor): Reference state, shape (batch_size, n_mpc).\n - normalize (bool): Whether to normalize the control actions. If set to True, the solution of the QP problem will be rescaled actions within range [-1, 1].\n - Qf (torch.Tensor, optional): Terminal state cost matrix, shape (n_mpc, n_mpc).\n\n Returns:\n - n (int): Number of decision variables.\n - m (int): Number of constraints.\n - P (torch.Tensor): QP cost matrix, shape (n, n).\n - q (torch.Tensor): QP cost vector, shape (batch_size, n).\n - H (torch.Tensor): Constraint matrix, shape (m, n).\n - b (torch.Tensor): Constraint bounds, shape (batch_size, m).\n\n The converted QP problem is in form:\n minimize (1/2)x'Px + q'x\n subject to Hx + b >= 0,\n\n Notes:\n - The function assumes that A, B, Q, R are single matrices, and x0 and x_ref are in batch.\n - All tensors are expected to be on the same device.\n \"\"\"\n bs = x0.shape[0]\n device = x0.device\n\n Ax0 = torch.cat([bmv((torch.linalg.matrix_power(A, k + 1)).unsqueeze(0), x0) for k in range(N)], 1) # (bs, N * n_mpc)\n m = 2 * (n_mpc + m_mpc) * N # number of constraints\n n = m_mpc * N # number of decision variables\n\n b = torch.cat([\n Ax0 - x_min,\n x_max - Ax0,\n -u_min * torch.ones((bs, n), device=device),\n u_max * torch.ones((bs, n), device=device),\n ], 1)\n\n XU = torch.zeros((N, n_mpc, N, m_mpc), device=device)\n for k in range(N):\n for j in range(k + 1):\n XU[k, :, j, :] = (torch.linalg.matrix_power(A, k - j) @ B)\n XU = XU.flatten(0, 1).flatten(1, 2) # (N * n_MPC, N * m_MPC)\n\n Q_kron = torch.kron(torch.eye(N, device=A.device), Q)\n if Qf is not None:\n # Adjust the last block of Q_kron to include Qf\n Q_kron[-n_mpc:, -n_mpc:] += Qf\n\n q = -2 * XU.t().unsqueeze(0) @ Q_kron.unsqueeze(0) @ (kron(torch.ones((bs, N, 1), device=device), x_ref.unsqueeze(-1)) - Ax0.unsqueeze(-1)) # (bs, N * m_MPC, 1)\n q = q.squeeze(-1) # (bs, N * m_MPC) = (bs, n)\n P = 2 * XU.t() @ Q_kron @ XU + 2 * kron(torch.eye(N, device=device), R) # (n, n)\n H = torch.cat([XU, -XU, torch.eye(n, device=device), -torch.eye(n, device=device)], 0) # (m, n)\n\n if normalize:\n # u = alpha * u_normalized + beta\n alpha = (u_max - u_min) / 2 * torch.ones((m_mpc,), device=device) # (m_MPC,)\n beta = (u_max + u_min) / 2 * torch.ones((m_mpc,), device=device) # (m_MPC,)\n Alpha = torch.diag_embed(alpha.repeat(N)) # (n, n)\n Beta = beta.repeat(N) # (n,)\n P_nom = Alpha @ P @ Alpha # (n,)\n q_nom = bmv(Alpha.unsqueeze(0), q + bmv(P, Beta).unsqueeze(0)) # (bs, n)\n H_nom = H @ Alpha # (m, n)\n b_nom = (H @ Beta).unsqueeze(0) + b # (bs, m)\n P, q, H, b = P_nom, q_nom, H_nom, b_nom\n\n return n, m, P, q, H, b" }, { "identifier": "scenario_robust_mpc", "path": "src/utils/mpc_utils.py", "snippet": "def scenario_robust_mpc(mpc_baseline_parameters, r):\n \"\"\"\n Scenario-based robust MPC with process noise handling and constraints.\n\n Inputs:\n - mpc_baseline_parameters: Dict containing A, B, Q, R, Qf, disturbance magnitude, state bounds, input bounds, etc.\n\n Output: Function mapping from x0 to u0.\n \"\"\"\n\n # Extract parameters\n A = mpc_baseline_parameters['A']\n B = mpc_baseline_parameters['B']\n Q = mpc_baseline_parameters['Q']\n R = mpc_baseline_parameters['R']\n n = mpc_baseline_parameters['n_mpc']\n m = mpc_baseline_parameters['m_mpc']\n Qf = mpc_baseline_parameters.get(\"terminal_coef\", 0.) * np.eye(n)\n A_scenarios = mpc_baseline_parameters.get(\"A_scenarios\", [A])\n B_scenarios = mpc_baseline_parameters.get(\"B_scenarios\", [B])\n w_scenarios = mpc_baseline_parameters.get(\"w_scenarios\", [np.zeros((n, 1))])\n x_min = mpc_baseline_parameters['x_min']\n x_max = mpc_baseline_parameters['x_max']\n u_min = mpc_baseline_parameters['u_min']\n u_max = mpc_baseline_parameters['u_max']\n\n # Define the model\n model = do_mpc.model.Model('discrete')\n\n # States, inputs, and noise variables\n x = model.set_variable('_x', 'x', shape=(n, 1))\n u = model.set_variable('_u', 'u', shape=(m, 1))\n w = model.set_variable('_p', 'w', shape=(n, 1)) # Process noise\n\n # Uncertain parameters\n Theta_A = model.set_variable('_p', 'Theta_A', shape=A.shape)\n Theta_B = model.set_variable('_p', 'Theta_B', shape=B.shape)\n\n # System dynamics including process noise\n model.set_rhs('x', Theta_A @ x + Theta_B @ u + w)\n\n # Setup model\n model.setup()\n\n # MPC controller\n mpc = do_mpc.controller.MPC(model)\n\n # MPC parameters\n setup_mpc = {\n 'n_horizon': mpc_baseline_parameters['N'],\n 'n_robust': 1, # Exponential growth, so only 1 is reasonable\n 't_step': 0.1,\n 'store_full_solution': True,\n }\n mpc.set_param(**setup_mpc)\n\n # Uncertain parameter scenarios\n mpc.set_uncertainty_values(\n Theta_A=np.array(A_scenarios),\n Theta_B=np.array(B_scenarios),\n w=np.array(w_scenarios),\n )\n\n # Constraints on states and inputs\n eps = 1e-3\n mpc.bounds['lower','_x', 'x'] = x_min + eps\n mpc.bounds['upper','_x', 'x'] = x_max - eps\n mpc.bounds['lower','_u', 'u'] = u_min\n mpc.bounds['upper','_u', 'u'] = u_max\n\n # Objective function\n mterm = (x - r).T @ Qf @ (x - r)\n lterm = (x - r).T @ Q @ (x - r) + u.T @ R @ u\n mpc.set_objective(mterm=mterm, lterm=lterm)\n\n # Setup MPC\n mpc.setup()\n\n # Control function\n def mpc_control(x0, is_active=True):\n if is_active:\n t = time.time()\n mpc.x0 = x0\n\n # Solve the MPC problem\n u0 = mpc.make_step(x0)\n\n return u0.squeeze(-1), time.time() - t\n else:\n return np.zeros((m,)), 0.\n\n return mpc_control" }, { "identifier": "tube_robust_mpc", "path": "src/utils/mpc_utils.py", "snippet": "def tube_robust_mpc(mpc_baseline_parameters, r):\n \"\"\"\n Tube-based robust MPC with process noise handling and constraints.\n\n Inputs:\n - mpc_baseline_parameters: Dict containing A, B, Q, R, Qf, disturbance magnitude, state bounds, input bounds, etc.\n\n Output: Function mapping from x0 to u0.\n\n Reference: https://github.com/martindoff/DC-TMPC/; we only consider the case of LTI system (so that there is no successive linearization and no A2, B2).\n \"\"\"\n # Extract parameters\n A = mpc_baseline_parameters['A']\n B = mpc_baseline_parameters['B']\n Q = mpc_baseline_parameters['Q']\n R = mpc_baseline_parameters['R']\n n = mpc_baseline_parameters['n_mpc']\n m = mpc_baseline_parameters['m_mpc']\n Qf = mpc_baseline_parameters.get(\"terminal_coef\", 0.) * np.eye(n)\n N = mpc_baseline_parameters['N']\n x_min = mpc_baseline_parameters['x_min']\n x_max = mpc_baseline_parameters['x_max']\n u_min = mpc_baseline_parameters['u_min']\n u_max = mpc_baseline_parameters['u_max']\n max_disturbance_per_dim = mpc_baseline_parameters.get('max_disturbance_per_dim', 0)\n\n # Define optimization problem\n N_ver = 2 ** n # number of vertices\n\n # Optimization variables\n theta = cp.Variable(N + 1) # cost\n u = cp.Variable((m, N)) # input\n x_low = cp.Variable((n, N + 1)) # state (lower bound)\n x_up = cp.Variable((n, N + 1)) # state (upper bound)\n x_ = {} # create dictionary for 3D variable\n ws = {} # Each item is a noise vector corresponding to a vertex\n for l in range(N_ver):\n x_[l] = cp.Expression\n ws[l] = np.zeros((n,))\n\n # Parameters (value set at run time)\n x0 = cp.Parameter(n)\n\n # Define blockdiag matrices for page-wise matrix multiplication\n A_ = block_diag(*([A] * N))\n B_ = block_diag(*([B] * N))\n\n # Objective\n objective = cp.Minimize(cp.sum(theta))\n\n # Constraints\n constr = []\n\n # Assemble vertices\n for l in range(N_ver):\n # Convert l to binary string\n l_bin = bin(l)[2:].zfill(n)\n # Map binary string to lows and ups\n mapping_str_to_xs = lambda c: x_low if c == '0' else x_up\n mapping_str_to_w = lambda c: -max_disturbance_per_dim if c == '0' else max_disturbance_per_dim\n xs = map(mapping_str_to_xs, l_bin)\n w = np.array(list(map(mapping_str_to_w, l_bin))) # (n,) array\n x_[l] = cp.vstack([x[i, :] for (i, x) in enumerate(xs)])\n ws[l] = w\n\n for l in range(N_ver):\n # Define some useful variables\n x_r = cp.reshape(x_[l][:, :-1], (n * N, 1))\n u_r = cp.reshape(u, (m * N, 1))\n A_x = cp.reshape(A_ @ x_r, ((n, N)))\n B_u = cp.reshape(B_ @ u_r, (n, N))\n\n # SOC objective constraints\n for i in range(N):\n constr += [\n theta[i] >= cp.quad_form(x_[l][:, i] - r, Q) + cp.quad_form(u[:, i], R)\n ]\n\n constr += [\n theta[-1] >= cp.quad_form(x_[l][:, -1] - r, Qf)\n ]\n\n # Input constraints\n constr += [u >= u_min,\n u <= u_max]\n\n # Tube\n constr += [\n x_low[:, 1:] <= A_x + B_u + np.expand_dims(ws[l], -1)\n ]\n\n constr += [\n x_up[:, 1:] >= A_x + B_u + np.expand_dims(ws[l], -1)\n ]\n\n # State constraints\n constr += [\n x_low[:, :-1] >= x_min,\n x_up[:, :-1] >= x_min,\n x_up[:, :-1] <= x_max,\n x_low[:, :-1] <= x_max,\n x_low[:, 0] == x0,\n x_up[:, 0] == x0,\n ]\n\n # Define problem\n problem = cp.Problem(objective, constr)\n\n # Control function\n def mpc_control(x0_current, is_active=True):\n if is_active:\n t = time.time()\n x0.value = x0_current\n try:\n problem.solve(solver=cp.MOSEK, verbose=True, mosek_params={'MSK_IPAR_NUM_THREADS': 1})\n if u.value is not None:\n u0 = u.value[:, 0]\n else:\n # No solution, use default value\n warnings.warn(\"Tube MPC infeasible\")\n u0 = np.zeros((m,))\n except cp.error.SolverError:\n # solver failed, use default value\n warnings.warn(\"MOSEK failure\")\n u0 = np.zeros((m,))\n return u0, time.time() - t\n else:\n return np.zeros((m,)), 0.\n\n return mpc_control" }, { "identifier": "osqp_oracle", "path": "src/utils/osqp_utils.py", "snippet": "def osqp_oracle(q, b, P, H, return_iter_count=False, max_iter=1000):\n sol, iter_count = osqp_solve_qp_guarantee_return(\n P=P, q=q, G=-H, h=b,\n A=None, b=None, lb=None, ub=None,\n max_iter=max_iter, eps_abs=1e-10, eps_rel=1e-10,eps_prim_inf=1e-10, eps_dual_inf=1e-10, verbose=False,\n )\n if not return_iter_count:\n return sol\n else:\n return sol, iter_count" }, { "identifier": "np_batch_op", "path": "src/utils/np_batch_op.py", "snippet": "def np_batch_op(f, *arrays, max_workers=int(os.environ.get(\"MAX_CPU_WORKERS\", 8))):\n \"\"\"\n Applies a function in a batch operation on multiple arrays, possibly in parallel, handling multiple return values.\n If the function 'f' returns a single value, the function returns a single concatenated value instead of a tuple.\n\n Parameters:\n f (callable): The function to apply. Can return multiple values.\n arrays (list of np.ndarray or scipy.sparse.csc_matrix): Arrays on which the function is to be applied.\n\n Returns:\n np.ndarray or tuple: A concatenated array if 'f' returns a single value, otherwise a tuple of concatenated arrays.\n \"\"\"\n get_bs = lambda arr: 1 if type(arr) == scipy.sparse.csc_matrix else arr.shape[0]\n bs = max([get_bs(arr) for arr in arrays])\n _worker.f = f\n _worker.arrays = arrays\n\n with ProcessPoolExecutor(max_workers=max_workers) as executor:\n all_results = list(executor.map(_worker, range(bs)))\n\n processed_results = []\n for i in range(len(all_results[0])):\n results = [result[i] for result in all_results]\n if isinstance(results[0], np.ndarray):\n processed_result = np.concatenate([np.expand_dims(arr, 0) for arr in results], 0)\n else:\n processed_result = np.array(results)\n processed_results.append(processed_result)\n\n # Return a single value if there's only one result, otherwise return a tuple\n return processed_results[0] if len(processed_results) == 1 else tuple(processed_results)" } ]
import torch import numpy as np import scipy import functools import os from torch import nn from ..modules.qp_solver import QPSolver from ..modules.warm_starter import WarmStarter from ..utils.torch_utils import make_psd, interpolate_state_dicts from ..utils.mpc_utils import mpc2qp, scenario_robust_mpc, tube_robust_mpc from ..utils.osqp_utils import osqp_oracle from ..utils.np_batch_op import np_batch_op from concurrent.futures import ThreadPoolExecutor
11,273
else: self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref) if self.n_mlp_output > 0: self.mlp = mlp_builder(input_size, self.n_mlp_output) else: self.mlp = None # TODO: add preconditioner self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.train_warm_starter = train_warm_starter self.ws_loss_coef = ws_loss_coef self.ws_update_rate = ws_update_rate self.ws_loss_shaper = ws_loss_shaper # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True) self.fixed_PH = is_test and shared_PH # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning self.autonomous_losses = {} self.mpc_baseline = mpc_baseline self.use_osqp_for_mpc = use_osqp_for_mpc self.imitate_mpc = imitate_mpc # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem self.use_residual_loss = use_residual_loss # Whether to force the problem to be feasible self.force_feasible = force_feasible self.feasible_lambda = feasible_lambda self.solver = None self.info = {} # Reserved for storing the controllers for each simulation instance when robust MPC is enabled self.robust_controllers = [] # Store info returned by env self.env_info = {} # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization self.is_active = None def initialize_solver(self): # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable) n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead if not self.fixed_PH: self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) else: # Should be called after loading state dict Pinv, H = self.get_PH() self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs): qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H]) X0 = self.warm_starter(qd, bd, Pinvd, Hd) gt = solver_Xs[:, -1, :].detach() return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean()) def parallel_controller_creation(self, controller_creator, xref_np, bs): """ Create robust MPC controlller in parallel """ # Helper function for parallel execution def task_creator(index): return controller_creator(self.mpc_baseline, xref_np[index, :]) with ThreadPoolExecutor() as executor: # Executing the tasks in parallel results = executor.map(task_creator, range(bs)) # Collecting the results self.robust_controllers.extend(results) def run_mpc_baseline(self, x, use_osqp_oracle=False): robust_method = self.mpc_baseline.get("robust_method", None) x0, xref = self.mpc_baseline["obs_to_state_and_ref"](x) bs = x.shape[0] # Conversions between torch and np t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float) f = lambda t: t.detach().cpu().numpy() f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy()) if robust_method is None: # Run vanilla MPC without robustness eps = 1e-3 n, m, P, q, H, b = mpc2qp( self.mpc_baseline["n_mpc"], self.mpc_baseline["m_mpc"], self.mpc_baseline["N"], t(self.mpc_baseline["A"]), t(self.mpc_baseline["B"]), t(self.mpc_baseline["Q"]), t(self.mpc_baseline["R"]), self.mpc_baseline["x_min"] + eps, self.mpc_baseline["x_max"] - eps, self.mpc_baseline["u_min"], self.mpc_baseline["u_max"], x0, xref, normalize=self.mpc_baseline.get("normalize", False), Qf=self.mpc_baseline.get("terminal_coef", 0.) * t(np.eye(self.mpc_baseline["n_mpc"])) if self.mpc_baseline.get("Qf", None) is None else t(self.mpc_baseline["Qf"]), ) if not use_osqp_oracle: solver = QPSolver(x.device, n, m, P=P, H=H) Xs, primal_sols = solver(q, b, iters=100) sol = primal_sols[:, -1, :] else: osqp_oracle_with_iter_count = functools.partial(osqp_oracle, return_iter_count=True) if q.shape[0] > 1:
class StrictAffineLayer(nn.Module): """ Layer mapping from obs to (q, b) in the strict affine form. """ def __init__(self, input_size, n, m, obs_has_half_ref): super().__init__() self.obs_has_half_ref = obs_has_half_ref self.input_size = input_size self.q_layer = nn.Linear(self.input_size, n, bias=False) if not self.obs_has_half_ref: self.b_layer = nn.Linear(self.input_size // 2, m, bias=True) else: self.b_layer = nn.Linear(self.input_size, m, bias=True) def forward(self, x): if not self.obs_has_half_ref: x0 = x[:, :self.input_size // 2] else: x0 = x q = self.q_layer(x) b = self.b_layer(x0) return torch.cat([q, b], dim=1) class QPUnrolledNetwork(nn.Module): """ Learn a QP problem from the input using a MLP, then solve the QP using fixed number of unrolled PDHG iterations. Form of QP: minimize (1/2)x'Px + q'x subject to Hx + b >= 0, where x in R^n, b in R^m. """ def __init__( self, device, input_size, n_qp, m_qp, qp_iter, mlp_builder, shared_PH=False, affine_qb=False, strict_affine_layer=False, obs_has_half_ref=False, symmetric=False, no_b=False, use_warm_starter=False, train_warm_starter=False, ws_loss_coef=1., ws_update_rate=0.01, ws_loss_shaper=lambda x: x ** (1 / 2), mpc_baseline=None, use_osqp_for_mpc=False, imitate_mpc=False, use_residual_loss=False, force_feasible=False, feasible_lambda=10, is_test=False, ): """mlp_builder is a function mapping (input_size, output_size) to a nn.Sequential object. If shared_PH == True, P and H are parameters indepedent of input, and q and b are functions of input; Otherwise, (P, H, q, b) are all functions of input. If affine_qb == True, then q and b are restricted to be affine functions of input. If strict_affine_layer == True (only effective when affine_qb=True), then: 1. q is linear w.r.t. (x0, xref) (no bias) 2. b is affine w.r.t. x0 (no dependence on xref) If obs_has_half_ref == True, the policy knows that the observation is in the form (x0, xref), with each taking up half of the dimension of the observation. If symmetric == True (only effective when affine_qb=True), then: 1. The bias terms are disabled in the modeling of q and b, i.e., q = Wq * x, b = Wb * x. 2. The constraint is assumed to be -1 <= Hx + b <= 1, instead of Hx + b >= 0. If no_b == True in addition to symmetric == True, then b is skipped altogether, i.e., the constraint is assumed to be -1 <= Hx <= 1. If mpc_baseline != None and imitate_mpc == False, then the forward function directly returns the solution of the MPC problem, instead of solving the learned QP problem. Can be used for benchmarking MPC. If mpc_baseline != None and imitate_mpc == True, then the forward function returns the solution of the learned QP problem, but a loss term is computed using the MPC problem. Can be used for supervised imitation learning. If force_feasible == True, solve the following problem instead of the original QP problem: minimize_{x,y} (1/2)x'Px + q'x + lambda * y^2 s.t. Hx + b + y * 1 >= 0, y >= 0, where x in R^n, y in R. In this case, the solution returned will be of dimension (n + 1). """ super().__init__() self.shared_PH = shared_PH self.affine_qb = affine_qb self.strict_affine_layer = strict_affine_layer self.obs_has_half_ref = obs_has_half_ref self.device = device self.input_size = input_size # QP dimensions: there are the number of variables and constraints WITHOUT considering the slack variable self.n_qp = n_qp self.m_qp = m_qp self.qp_iter = qp_iter self.symmetric = symmetric self.no_b = no_b self.n_P_param = n_qp * (n_qp + 1) // 2 self.n_q_param = n_qp self.n_H_param = m_qp * n_qp self.n_b_param = m_qp if not self.no_b else 0 self.n_mlp_output = 0 if not self.shared_PH: self.n_mlp_output += (self.n_P_param + self.n_H_param) self.P_params = None self.H_params = None else: self.P_params = nn.Parameter(torch.randn((self.n_P_param,), device=device)) self.H_params = nn.Parameter(torch.randn((self.n_H_param,), device=device)) if not self.affine_qb: self.n_mlp_output += (self.n_q_param + self.n_b_param) self.qb_affine_layer = None else: if not self.strict_affine_layer: self.qb_affine_layer = nn.Linear(input_size, self.n_q_param + self.n_b_param, bias=not self.symmetric) else: self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref) if self.n_mlp_output > 0: self.mlp = mlp_builder(input_size, self.n_mlp_output) else: self.mlp = None # TODO: add preconditioner self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.train_warm_starter = train_warm_starter self.ws_loss_coef = ws_loss_coef self.ws_update_rate = ws_update_rate self.ws_loss_shaper = ws_loss_shaper # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True) self.fixed_PH = is_test and shared_PH # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning self.autonomous_losses = {} self.mpc_baseline = mpc_baseline self.use_osqp_for_mpc = use_osqp_for_mpc self.imitate_mpc = imitate_mpc # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem self.use_residual_loss = use_residual_loss # Whether to force the problem to be feasible self.force_feasible = force_feasible self.feasible_lambda = feasible_lambda self.solver = None self.info = {} # Reserved for storing the controllers for each simulation instance when robust MPC is enabled self.robust_controllers = [] # Store info returned by env self.env_info = {} # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization self.is_active = None def initialize_solver(self): # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable) n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead if not self.fixed_PH: self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) else: # Should be called after loading state dict Pinv, H = self.get_PH() self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs): qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H]) X0 = self.warm_starter(qd, bd, Pinvd, Hd) gt = solver_Xs[:, -1, :].detach() return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean()) def parallel_controller_creation(self, controller_creator, xref_np, bs): """ Create robust MPC controlller in parallel """ # Helper function for parallel execution def task_creator(index): return controller_creator(self.mpc_baseline, xref_np[index, :]) with ThreadPoolExecutor() as executor: # Executing the tasks in parallel results = executor.map(task_creator, range(bs)) # Collecting the results self.robust_controllers.extend(results) def run_mpc_baseline(self, x, use_osqp_oracle=False): robust_method = self.mpc_baseline.get("robust_method", None) x0, xref = self.mpc_baseline["obs_to_state_and_ref"](x) bs = x.shape[0] # Conversions between torch and np t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float) f = lambda t: t.detach().cpu().numpy() f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy()) if robust_method is None: # Run vanilla MPC without robustness eps = 1e-3 n, m, P, q, H, b = mpc2qp( self.mpc_baseline["n_mpc"], self.mpc_baseline["m_mpc"], self.mpc_baseline["N"], t(self.mpc_baseline["A"]), t(self.mpc_baseline["B"]), t(self.mpc_baseline["Q"]), t(self.mpc_baseline["R"]), self.mpc_baseline["x_min"] + eps, self.mpc_baseline["x_max"] - eps, self.mpc_baseline["u_min"], self.mpc_baseline["u_max"], x0, xref, normalize=self.mpc_baseline.get("normalize", False), Qf=self.mpc_baseline.get("terminal_coef", 0.) * t(np.eye(self.mpc_baseline["n_mpc"])) if self.mpc_baseline.get("Qf", None) is None else t(self.mpc_baseline["Qf"]), ) if not use_osqp_oracle: solver = QPSolver(x.device, n, m, P=P, H=H) Xs, primal_sols = solver(q, b, iters=100) sol = primal_sols[:, -1, :] else: osqp_oracle_with_iter_count = functools.partial(osqp_oracle, return_iter_count=True) if q.shape[0] > 1:
sol_np, iter_counts = np_batch_op(osqp_oracle_with_iter_count, f(q), f(b), f_sparse(P), f_sparse(H))
8
2023-11-28 05:56:22+00:00
16k
Fraunhofer-SCAI/llamol
sample.py
[ { "identifier": "Transformer", "path": "model.py", "snippet": "class Transformer(nn.Module):\n last_loss: Optional[torch.Tensor]\n\n def __init__(self, params: ModelArgs, context_params: ContextArgs):\n super().__init__()\n self.params = params\n self.context_params = context_params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n\n self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim)\n\n self.frag_embeddings = nn.Embedding(params.vocab_size, params.dim)\n self.frag_type_embedding = nn.Embedding(1, params.dim)\n\n self.context_lookup = {k: i for i, k in enumerate(context_params.context_keys)}\n self.conditions_type_embeddings = nn.Embedding(\n len(context_params.context_keys), params.dim\n )\n self.conditions_embeddings_lookup = nn.ModuleDict(\n {\n k: nn.Sequential(\n nn.Linear(dim, params.dim, bias=True),\n )\n for k, dim in zip(\n context_params.context_keys, context_params.context_dims\n )\n }\n )\n\n self.dropout = nn.Dropout(params.dropout)\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(TransformerBlock(layer_id, params))\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = nn.Linear(params.dim, params.vocab_size, bias=False)\n\n # share the unembedding parameters with the embedding parameters\n self.tok_embeddings.weight = (\n self.output.weight\n ) # https://paperswithcode.com/method/weight-tying\n\n # some useful precompute for the RoPE relative positional embeddings\n freqs_cos, freqs_sin = precompute_freqs_cis(\n self.params.dim // self.params.n_heads, self.params.max_seq_len\n )\n self.register_buffer(\"freqs_cos\", freqs_cos, persistent=False)\n self.register_buffer(\"freqs_sin\", freqs_sin, persistent=False)\n\n # init all weights\n self.apply(self._init_weights)\n # apply special scaled init to the residual projections, per GPT-2 paper\n for pn, p in self.named_parameters():\n if pn.endswith(\"w3.weight\") or pn.endswith(\"wo.weight\"):\n torch.nn.init.normal_(\n p, mean=0.0, std=0.02 / math.sqrt(2 * params.n_layers)\n )\n\n # Initialize attribute for the loss of the last forward call. This will be set if the forward is called with a targets tensor.\n self.last_loss = None\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n\n def forward(\n self,\n tokens: torch.Tensor,\n targets: Optional[torch.Tensor] = None,\n context: Optional[Dict[str, torch.Tensor]] = None,\n fragment: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n bsz, seqlen = tokens.shape\n device = tokens.device\n\n h = self._add_context_to_seq(tokens, context, fragment, bsz, device)\n\n context_seq_len = h.shape[1] - seqlen\n\n bsz, seqlen, _ = h.shape\n\n freqs_cos = self.freqs_cos[:seqlen]\n freqs_sin = self.freqs_sin[:seqlen]\n\n for layer in self.layers:\n h = layer(h, freqs_cos, freqs_sin)\n h = self.norm(h)\n\n h = h[:, context_seq_len:]\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.output(h)\n tmp_last_loss = F.cross_entropy(\n logits.reshape(-1, logits.size(-1)),\n targets.reshape(-1),\n ignore_index=0, # Ignore Pad Tokens\n )\n\n # NOTE: This essentially does nothing for the computation,\n # because we are multiplying the weights by zero.\n # This *needs* to be done, so that we can train with DDP\n # As due to the random training process some of the weights are not used in the forward pass\n # That is unacceptable for the for the c10 backend and the training errors out.\n # Maybe there is a better fix in the future, see:\n # https://github.com/pytorch/pytorch/issues/43259\n ddp_fix = sum(p.sum() for p in self.parameters())\n zero_sum = ddp_fix * 0.0\n\n self.last_loss = tmp_last_loss + zero_sum\n else:\n # inference-time mini-optimization: only forward the output on the very last position\n logits = self.output(\n h[:, [-1], :]\n ) # note: using list [-1] to preserve the time dim\n self.last_loss = None\n\n return logits\n\n def forward_with_kvcache(\n self,\n tokens: torch.Tensor,\n targets: Optional[torch.Tensor] = None,\n context: Optional[Dict[str, torch.Tensor]] = None,\n fragment: Optional[torch.Tensor] = None,\n cache_id: int = 1,\n pos_seq_len: Optional[int] = None,\n ) -> torch.Tensor:\n bsz, seqlen = tokens.shape\n device = tokens.device\n\n h = self._add_context_to_seq(tokens, context, fragment, bsz, device)\n\n context_seq_len = h.shape[1] - seqlen\n\n bsz, seqlen, _ = h.shape\n if pos_seq_len is None:\n pos_seq_len = seqlen\n else:\n pos_seq_len = max(seqlen, pos_seq_len + context_seq_len)\n\n freqs_cos = self.freqs_cos[:pos_seq_len]\n freqs_sin = self.freqs_sin[:pos_seq_len]\n\n for layer in self.layers:\n h = layer.forward_with_kvcache(h, freqs_cos, freqs_sin, cache_id=cache_id)\n h = self.norm(h)\n\n h = h[:, context_seq_len:]\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.output(h)\n tmp_last_loss = F.cross_entropy(\n logits.reshape(-1, logits.size(-1)),\n targets.reshape(-1),\n ignore_index=0, # Ignore Pad Tokens\n )\n\n # NOTE: This essentially does nothing for the computation,\n # because we are multiplying the weights by zero.\n # This *needs* to be done, so that we can train with DDP\n # As due to the random training process some of the weights are not used in the forward pass\n # That is unacceptable for the for the c10 backend and the training errors out.\n # Maybe there is a better fix in the future, see:\n # https://github.com/pytorch/pytorch/issues/43259\n ddp_fix = sum(p.sum() for p in self.parameters())\n zero_sum = ddp_fix * 0.0\n\n self.last_loss = tmp_last_loss + zero_sum\n else:\n # inference-time mini-optimization: only forward the output on the very last position\n logits = self.output(\n h[:, [-1], :]\n ) # note: using list [-1] to preserve the time dim\n self.last_loss = None\n\n return logits\n\n def _add_context_to_seq(self, tokens, context, fragment, bsz, device):\n h = self.tok_embeddings(tokens)\n h = self.dropout(h)\n\n if fragment is not None:\n fragment_type_enc = torch.zeros_like(\n fragment, dtype=torch.long, device=device\n )\n\n h = torch.concat(\n (\n self.tok_embeddings(fragment)\n + self.frag_embeddings(fragment)\n + self.frag_type_embedding(fragment_type_enc),\n h,\n ),\n dim=1,\n )\n\n if context is not None and len(context) != 0:\n # context is a dictionary with key : context_tensor of shape (batch_size, context_dim)\n type_ids = []\n context_vals = []\n\n for emb_key, context_val in context.items():\n emb_context_val = self.conditions_embeddings_lookup[emb_key](\n context_val.unsqueeze(1).to(device)\n ).unsqueeze(1)\n\n context_vals.append(emb_context_val)\n type_ids_tensor = torch.tensor(\n [self.context_lookup[emb_key]], device=device, dtype=torch.long\n )\n type_ids.append(type_ids_tensor)\n\n context_types = (\n torch.concat(type_ids, dim=0).reshape(-1, 1).expand(-1, bsz).T\n )\n # shape(len(context),batch_size, emb_size)\n context_types = self.conditions_type_embeddings(context_types)\n\n context_vals = torch.concat(context_vals, dim=1).to(device)\n\n # SHAPE\n h = torch.concat([context_vals + context_types, h], dim=1)\n return h\n\n def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):\n # start with all of the candidate parameters\n param_dict = {pn: p for pn, p in self.named_parameters()}\n # filter out those that do not require grad\n param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}\n # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.\n # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.\n decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]\n nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]\n optim_groups = [\n {\"params\": decay_params, \"weight_decay\": weight_decay},\n {\"params\": nodecay_params, \"weight_decay\": 0.0},\n ]\n num_decay_params = sum(p.numel() for p in decay_params)\n num_nodecay_params = sum(p.numel() for p in nodecay_params)\n print(\n f\"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters\"\n )\n print(\n f\"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters\"\n )\n # Create AdamW optimizer and use the fused version if it is available\n fused_available = \"fused\" in inspect.signature(torch.optim.AdamW).parameters\n use_fused = fused_available and device_type == \"cuda\"\n extra_args = dict(fused=True) if use_fused else dict()\n optimizer = torch.optim.AdamW(\n optim_groups, lr=learning_rate, betas=betas, **extra_args\n )\n print(f\"using fused AdamW: {use_fused}\")\n\n return optimizer\n\n def estimate_mfu(self, fwdbwd_per_iter, dt):\n \"\"\"estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS\"\"\"\n # first estimate the number of flops we do per iteration.\n # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311\n N = sum(p.numel() for p in self.parameters())\n cfg = self.params\n L, H, Q, T = cfg.n_layers, cfg.n_heads, cfg.dim // cfg.n_heads, cfg.max_seq_len\n flops_per_token = 6 * N + 12 * L * H * Q * T\n flops_per_fwdbwd = flops_per_token * T\n flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter\n # express our flops throughput as ratio of A100 bfloat16 peak flops\n flops_achieved = flops_per_iter * (1.0 / dt) # per second\n flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS\n mfu = flops_achieved / flops_promised\n return mfu\n\n @torch.inference_mode()\n def generate(\n self,\n tokenizer: SmilesTokenizer,\n context: Union[torch.Tensor, None] = None,\n fragments: Union[torch.Tensor, None] = None,\n max_length: int = 50,\n num_gen: int = 200,\n start_smiles: Union[str, None] = None,\n temperature: float = 1.0,\n top_k: Union[int, None] = None,\n device: torch.device = torch.device(\"cpu\"),\n cache_kv: bool = False,\n ) -> List[str]:\n batch_size = num_gen\n if start_smiles is not None:\n tokenized_start_selfie = tokenizer.encode(start_smiles)[\n :-1\n ] # remove <eos> token\n tokenized_start_selfie = torch.tensor(\n tokenized_start_selfie, device=device, dtype=torch.long\n ).view(-1, 1)\n tokenized_start_selfie = tokenized_start_selfie.repeat(1, batch_size)\n\n outputs = tokenized_start_selfie.T\n else:\n outputs = (\n torch.LongTensor([[tokenizer.cls_token_id] * batch_size]).to(device)\n ).T # batch_size\n self.eval()\n\n start_len = outputs.shape[1]\n has_end_idx = np.array([0] * batch_size)\n cache_id = np.random.randint(0, int(1e10), 1).item()\n with torch.no_grad():\n with tqdm(total=max_length, desc=\"Generation\") as pbar:\n for i in range(start_len, max_length):\n # trg_tensor = #torch.LongTensor(outputs).to(model.device)\n if not cache_kv:\n logits = self(outputs, context=context, fragment=fragments)\n else:\n # logits_ = self(outputs, context=context, fragment=fragments)\n if i == start_len:\n # When starting pass the whole input, so that \"start_smiles\" works, then only the newly generated token, because of the cache\n func_input = outputs\n else:\n func_input = outputs[:, -1].unsqueeze(-1)\n logits = self.forward_with_kvcache(\n func_input,\n context=context,\n fragment=fragments,\n cache_id=cache_id,\n pos_seq_len=outputs.size(-1),\n )\n\n # raise NotImplementedError(\"Currently not working / right implemented\")\n # logits = self.forward_with_kvcache(outputs, context=context, fragment=fragments,cache_id = cache_id)\n\n logits = logits[:, -1, :] # crop to just the final time step\n if temperature == 0.0:\n # \"sample\" the single most likely index\n _, logits = torch.topk(logits, k=1, dim=-1)\n else:\n # pluck the logits at the final step and scale by desired temperature\n logits = logits / temperature\n # optionally crop the logits to only the top k options\n if top_k is not None:\n v, _ = torch.topk(logits, min(top_k, logits.size(-1)))\n logits[logits < v[:, [-1]]] = -float(\"Inf\")\n\n probs = F.softmax(logits, dim=-1)\n idx_next = torch.multinomial(probs, num_samples=1)\n\n ended_sentences = idx_next == tokenizer.sep_token_id\n if torch.count_nonzero(ended_sentences) != 0:\n indicies = torch.nonzero(ended_sentences)\n indicies = indicies.cpu().numpy()\n for end_idx in indicies[:, 0]:\n if has_end_idx[end_idx] == 0:\n has_end_idx[end_idx] = i\n\n # print(has_end_idx)\n\n if all([idx != 0 for idx in has_end_idx]):\n break\n\n # outputs.append(best_guesses)\n # outputs = torch.row_stack((outputs, idx_next))\n outputs = torch.cat((outputs, idx_next), dim=1)\n pbar.update(1)\n\n out_selfies = []\n for output, end_idx in zip(outputs.cpu().numpy(), has_end_idx):\n # Incase of limiting the max_len\n if end_idx == 0:\n selfie = [tokenizer._convert_id_to_token(idx) for idx in output[:]]\n else:\n selfie = [\n tokenizer._convert_id_to_token(idx) for idx in output[:end_idx]\n ]\n selfie = \"\".join(selfie[1:])\n out_selfies.append(selfie)\n\n # for indicies in outputs:\n # translated_sentence = [tokenizer.idx_to_tokens[idx] for idx in outputs]\n # remove start token\n return out_selfies\n\n @staticmethod\n def load(path, device: torch.device = torch.device(\"cpu\")) -> Transformer:\n data = torch.load(path, map_location=device)\n\n newinstace = Transformer(data[\"model_params\"], data[\"context_params\"])\n newinstace.load_state_dict(data[\"state_dict\"])\n return newinstace.to(device)\n\n def save(self, filepath):\n torch.save(\n {\n \"state_dict\": self.state_dict(),\n **dict(model_params=self.params, context_params=self.context_params),\n },\n filepath,\n )\n\n def getNumberTrainableParams(self) -> int:\n return sum(p.numel() for p in self.parameters() if p.requires_grad)\n\n def getNumberParams(self) -> int:\n return sum(p.numel() for p in self.parameters())" }, { "identifier": "check_metrics", "path": "plot_utils.py", "snippet": "def check_metrics(generated_smiles: List[str], dataset_smiles: List[str]):\n len_before = len(generated_smiles)\n generated_smiles = [g for g in generated_smiles if g is not None]\n len_after = len(generated_smiles)\n\n novel = novelty(generated_smiles, dataset_smiles)\n unique_at_1k = unique_at(generated_smiles, k=1000)\n unique_at_10k = unique_at(generated_smiles, k=10000)\n return dict(\n novelty=novel,\n unique_at_1k=unique_at_1k,\n unique_at_10k=unique_at_10k,\n validity=len_after / float(len_before),\n )" }, { "identifier": "plot_1D_condition", "path": "plot_utils.py", "snippet": "def plot_1D_condition(\n context_col,\n save_path,\n new_context,\n generated_smiles,\n temperature,\n context_dict,\n context_scaler=None,\n):\n for con_col in context_col:\n save_path = os.path.join(\n save_path, f\"{con_col}_{'-'.join(context_col)}_temp{temperature}\"\n )\n os.makedirs(save_path, exist_ok=True)\n\n current_context = new_context[con_col].cpu().detach().numpy()\n if con_col == \"mol_weight\":\n predicted_context = calcContextMolWeight(generated_smiles)\n elif con_col == \"logp\":\n predicted_context = calcContextLogP(generated_smiles)\n elif con_col == \"sascore\":\n predicted_context = calcContextSAScore(generated_smiles)\n elif con_col == \"energy\":\n # TODO: Change to something better\n predicted_context = calcContextEnergy(generated_smiles)\n\n if context_scaler is not None:\n raise NotImplementedError(\"Not implemented yet\")\n # context_list = context_scaler.inverse_transform(context_list)\n\n mean_vals_pred = []\n labels = np.unique(current_context)\n mse_value = []\n mad_value = []\n for label in labels:\n mask = (current_context == label).reshape(-1)\n mean_val = np.mean(predicted_context[mask])\n mean_vals_pred.append(mean_val)\n mse_value.extend((predicted_context[mask] - label) ** 2)\n mad_value.extend(abs(predicted_context[mask] - label))\n\n mse = np.mean(mse_value)\n mad = np.mean(mad_value)\n logger.info(f\"MSE {mse}\")\n logger.info(f\"MAD {mad}\")\n logger.info(f\"SD: {np.std(mad_value)}\")\n\n current_context = current_context.reshape(-1)\n\n # Create a figure and axes\n fig, ax1 = plt.subplots()\n\n # Scatter plot\n ax1.scatter(\n current_context,\n predicted_context,\n label=\"Ground Truth vs Prediction\",\n c=\"blue\",\n alpha=0.5,\n )\n ax1.plot(\n np.arange(np.min(current_context), np.max(current_context) + 1),\n np.arange(np.min(current_context), np.max(current_context) + 1),\n label=\"y=x\",\n c=\"black\",\n )\n ax1.scatter(labels, mean_vals_pred, label=\"Mean predicted values\", c=\"red\")\n ax1.set_xlabel(\"Ground Truth\")\n ax1.set_ylabel(\"Prediction\")\n\n # Histogram\n ax2 = ax1.twinx() # Create a twin Axes sharing the x-axis\n sns.histplot(\n context_dict[con_col],\n # bins=200,\n label=\"Dataset distribution\",\n alpha=0.5,\n # kde=True,\n # element=\"poly\",\n ax=ax2,\n )\n # ax2.hist(\n # context_dict[con_col],\n # bins=200,\n # label=\"Dataset distribution\",\n # alpha=0.5,\n # )\n ax2.set_ylabel(\"Frequency\")\n\n # Combine legends\n handles1, labels1 = ax1.get_legend_handles_labels()\n handles2, labels2 = ax2.get_legend_handles_labels()\n\n ax1.legend(handles1 + handles2, labels1 + labels2)\n\n plt.xlim((np.min(current_context), np.max(current_context) + 1))\n # Set title\n display_name = COL_TO_DISPLAY_NAME[con_col]\n plt.title(f\"{display_name} - temperature: {temperature} - mse: {round(mse, 4)}\")\n\n out_df = pd.DataFrame(\n {\n \"smiles\": generated_smiles,\n f\"{con_col}\": predicted_context.tolist(),\n f\"target_{con_col}\": current_context.tolist(),\n }\n )\n out_df.to_csv(os.path.join(save_path, \"predictions.csv\"), index=False)\n out_path = os.path.join(save_path, \"graph.png\")\n print(f\"Saved to {out_path}\")\n plt.savefig(out_path)\n plt.clf()" }, { "identifier": "plot_2D_condition", "path": "plot_utils.py", "snippet": "def plot_2D_condition(\n context_col,\n save_path,\n new_context,\n generated_smiles,\n temperature,\n label: Union[str, None] = None,\n):\n save_path = os.path.join(\n save_path, f\"multicond2_{'-'.join(context_col)}_temp={temperature}\"\n )\n if label is not None:\n save_path = os.path.join(save_path, label)\n\n os.makedirs(save_path, exist_ok=True)\n delta_dict = {c: [] for c in context_col}\n predicted_context_dict = {}\n for con_col in context_col:\n current_context = new_context[con_col].cpu().numpy()\n if con_col == \"mol_weight\":\n predicted_context = calcContextMolWeight(generated_smiles)\n elif con_col == \"logp\":\n predicted_context = calcContextLogP(generated_smiles)\n elif con_col == \"sascore\":\n predicted_context = calcContextSAScore(generated_smiles)\n elif con_col == \"energy\":\n # TODO: Change to something better\n predicted_context = calcContextEnergy(generated_smiles)\n\n predicted_context_dict[con_col] = np.array(predicted_context)\n delta_dict[con_col] = np.abs(current_context - np.array(predicted_context))\n\n # Create a DataFrame from delta_dict\n df = pd.DataFrame(delta_dict)\n real_values_prop1 = new_context[context_col[0]].cpu().numpy()\n real_values_prop2 = new_context[context_col[1]].cpu().numpy()\n # cmap = plt.get_cmap('Blues') # Choose a green color palette from Matplotlib\n mse_vals_x = []\n mad_vals_x = []\n mse_vals_y = []\n mad_vals_y = []\n fig = plt.figure()\n ax = plt.subplot(111)\n for v1 in np.unique(real_values_prop1):\n for v2 in np.unique(real_values_prop2):\n mask = (real_values_prop1 == v1) & (real_values_prop2 == v2)\n indices = np.nonzero(mask)[0]\n # print(\"Indices\", len(indices))\n # Get the color from the color palette based on the v1 value\n # color = cmap((v1 - np.min(real_values_prop1)) / (np.max(real_values_prop1) - np.min(real_values_prop1)))\n color = np.random.rand(\n 3,\n )\n # # Plot scatter plot with the specified color and label\n\n x_pred = predicted_context_dict[context_col[0]][indices].ravel()\n y_pred = predicted_context_dict[context_col[1]][indices].ravel()\n mse_vals_x.extend((x_pred - v1) ** 2)\n mad_vals_x.extend(np.abs(x_pred - v1))\n\n mse_vals_y.extend((y_pred - v2) ** 2)\n mad_vals_y.extend(np.abs(y_pred - v2))\n\n ax.scatter(x_pred, y_pred, color=color, alpha=0.5)\n\n # Plot KDE plot with the specified color\n # sns.kdeplot(\n # data=pd.DataFrame(\n # {\n # f\"x\": x_pred,\n # f\"y\": y_pred,\n # }\n # ),\n # x=f\"x\",\n # y=f\"y\",\n # color=color,\n # fill=False,\n # bw_adjust=2.25,\n # # label=f\"({v1}, {v2})\"\n # )\n\n ax.scatter(v1, v2, color=color, label=f\"({v1}, {v2})\", marker=\"^\", s=20.0)\n\n mse_x = np.mean(mse_vals_x)\n mad_x = np.mean(mad_vals_x)\n mse_y = np.mean(mse_vals_y)\n mad_y = np.mean(mad_vals_y)\n\n logger.info(f\"MSE {context_col[0]}: {mse_x}\")\n logger.info(f\"MAD {context_col[0]}: {mad_x}\")\n logger.info(f\"MSE {context_col[1]}: {mse_y}\")\n logger.info(f\"MAD {context_col[1]}: {mad_y}\")\n\n file_path = os.path.join(save_path, \"metrics.txt\")\n\n with open(file_path, \"w\") as f:\n f.write(f\"MSE {context_col[0]}: {mse_x} \\n\")\n f.write(f\"MAD {context_col[0]}: {mad_x} \\n\")\n f.write(f\"MSE {context_col[1]}: {mse_y} \\n\")\n f.write(f\"MAD {context_col[1]}: {mad_y} \\n\")\n\n ax.set_xlabel(COL_TO_DISPLAY_NAME[context_col[0]])\n ax.set_ylabel(COL_TO_DISPLAY_NAME[context_col[1]])\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n # Put a legend to the right of the current axis\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n ax.set_title(\"Multi Property Distribution of Generated Molecules\")\n out_path = os.path.join(save_path, \"graph.png\")\n logger.info(f\"Saved to {out_path}\")\n plt.savefig(out_path)\n plt.clf()\n return save_path" }, { "identifier": "plot_3D_condition", "path": "plot_utils.py", "snippet": "def plot_3D_condition(\n context_col, save_path, new_context, generated_smiles, temperature\n):\n save_path = os.path.join(\n save_path, f\"multicond3_{'-'.join(context_col)}_temp={temperature}\"\n )\n os.makedirs(save_path, exist_ok=True)\n predicted_context_dict = {}\n for con_col in context_col:\n predicted_context = calc_context_from_smiles(generated_smiles, con_col)\n\n predicted_context_dict[con_col] = np.array(predicted_context)\n\n real_values_prop1 = new_context[context_col[0]].cpu().numpy()\n real_values_prop2 = new_context[context_col[1]].cpu().numpy()\n real_values_prop3 = new_context[context_col[2]].cpu().numpy()\n # cmap = plt.get_cmap('Blues') # Choose a green color palette from Matplotlib\n\n mse_vals_x = []\n mad_vals_x = []\n mse_vals_y = []\n mad_vals_y = []\n mse_vals_z = []\n mad_vals_z = []\n\n fig = plt.figure()\n ax = fig.add_subplot(projection=\"3d\")\n for v1 in np.unique(real_values_prop1):\n for v2 in np.unique(real_values_prop2):\n for v3 in np.unique(real_values_prop3):\n mask = (\n (real_values_prop1 == v1)\n & (real_values_prop2 == v2)\n & (real_values_prop3 == v3)\n )\n indices = np.nonzero(mask)[0]\n # print(\"Indices\", len(indices))\n # Get the color from the color palette based on the v1 value\n # color = cmap((v1 - np.min(real_values_prop1)) / (np.max(real_values_prop1) - np.min(real_values_prop1)))\n color = np.random.rand(\n 3,\n )\n\n x_pred = predicted_context_dict[context_col[0]][indices].ravel()\n y_pred = predicted_context_dict[context_col[1]][indices].ravel()\n z_pred = predicted_context_dict[context_col[2]][indices].ravel()\n\n mse_vals_x.extend((x_pred - v1) ** 2)\n mad_vals_x.extend(np.abs(x_pred - v1))\n\n mse_vals_y.extend((y_pred - v2) ** 2)\n mad_vals_y.extend(np.abs(y_pred - v2))\n\n mse_vals_z.extend((z_pred - v3) ** 2)\n mad_vals_z.extend(np.abs(z_pred - v3))\n\n # # Plot scatter plot with the specified color and label\n ax.scatter(v1, v2, v3, color=color, label=f\"({v1}, {v2}, {v3})\", s=20.0)\n ax.scatter(\n x_pred,\n y_pred,\n z_pred,\n color=color,\n )\n\n mse_x = np.mean(mse_vals_x)\n mad_x = np.mean(mad_vals_x)\n mse_y = np.mean(mse_vals_y)\n mad_y = np.mean(mad_vals_y)\n mse_z = np.mean(mse_vals_z)\n mad_z = np.mean(mad_vals_z)\n\n logger.info(f\"MSE {context_col[0]}: {mse_x}\")\n logger.info(f\"MAD {context_col[0]}: {mad_x}\")\n logger.info(f\"MSE {context_col[1]}: {mse_y}\")\n logger.info(f\"MAD {context_col[1]}: {mad_y}\")\n logger.info(f\"MSE {context_col[2]}: {mse_z}\")\n logger.info(f\"MAD {context_col[2]}: {mad_z}\")\n\n file_path = os.path.join(save_path, \"metrics.txt\")\n\n with open(file_path, \"w\") as f:\n f.write(f\"MSE {context_col[0]}: {mse_x} \\n\")\n f.write(f\"MAD {context_col[0]}: {mad_x} \\n\")\n\n f.write(f\"MSE {context_col[1]}: {mse_y} \\n\")\n f.write(f\"MAD {context_col[1]}: {mad_y} \\n\")\n\n f.write(f\"MSE {context_col[2]}: {mse_z} \\n\")\n f.write(f\"MAD {context_col[2]}: {mad_z} \\n\")\n\n ax.set_xlabel(COL_TO_DISPLAY_NAME[context_col[0]])\n ax.set_ylabel(COL_TO_DISPLAY_NAME[context_col[1]])\n ax.set_zlabel(COL_TO_DISPLAY_NAME[context_col[2]])\n # plt.legend(\n # bbox_to_anchor=(1.0, 0.5),\n # loc=\"center right\",\n # bbox_transform=plt.gcf().transFigure,\n # )\n # plt.subplots_adjust(left=0.05, bottom=0.1, right=0.8)\n plt.legend(\n bbox_to_anchor=(1.035, 0.5),\n loc=\"center right\",\n bbox_transform=plt.gcf().transFigure,\n )\n plt.subplots_adjust(left=0.05, bottom=0.1, right=0.775)\n\n plt.title(\"Multi Property Distribution of Generated Molecules\")\n out_path = os.path.join(save_path, \"graph.png\")\n print(f\"Saved to {out_path}\")\n plt.savefig(out_path)\n plt.clf()\n\n return save_path" }, { "identifier": "plot_unconditional", "path": "plot_utils.py", "snippet": "def plot_unconditional(\n out_path: str = os.getcwd(),\n smiles: List[str] = [],\n temperature: float = 0.8,\n cmp_context_dict: Union[Dict[str, np.array], None] = None,\n context_cols: List[str] = [\"logp\", \"sascore\", \"mol_weight\"],\n):\n out_path = os.path.join(out_path, \"unconditional\")\n os.makedirs(out_path, exist_ok=True)\n\n for c in context_cols:\n plt.clf()\n\n context_cal = calc_context_from_smiles(smiles, c)\n\n if cmp_context_dict is not None:\n sns.histplot(\n cmp_context_dict[c],\n stat=\"density\",\n label=\"Dataset Distribution\",\n alpha=0.75,\n color=\"blue\",\n )\n sns.histplot(\n context_cal,\n stat=\"density\",\n label=\"Generated Molecules Distribution\",\n alpha=0.5,\n color=\"orange\",\n )\n\n if c == \"logp\":\n plt.xlim((-6, 8))\n else:\n plt.xlim((0, 10))\n\n plt.xlabel(COL_TO_DISPLAY_NAME[c])\n plt.title(\n f\"Unconditional Distribution {COL_TO_DISPLAY_NAME[c]} \\nwith Temperature {temperature}\"\n )\n plt.legend()\n\n out_file = os.path.join(out_path, f\"unc_{c}_temp={temperature}.png\")\n plt.savefig(out_file)\n logger.info(f\"Saved Unconditional to {out_file}\")" }, { "identifier": "SmilesTokenizer", "path": "tokenizer.py", "snippet": "class SmilesTokenizer(BertTokenizer):\n \"\"\"\n Creates the SmilesTokenizer class. The tokenizer heavily inherits from the BertTokenizer\n implementation found in Huggingface's transformers library. It runs a WordPiece tokenization\n algorithm over SMILES strings using the tokenisation SMILES regex developed by Schwaller et. al.\n\n Please see https://github.com/huggingface/transformers\n and https://github.com/rxn4chemistry/rxnfp for more details.\n\n Examples\n --------\n >>> from deepchem.feat.smiles_tokenizer import SmilesTokenizer\n >>> current_dir = os.path.dirname(os.path.realpath(__file__))\n >>> vocab_path = os.path.join(current_dir, 'tests/data', 'vocab.txt')\n >>> tokenizer = SmilesTokenizer(vocab_path)\n >>> print(tokenizer.encode(\"CC(=O)OC1=CC=CC=C1C(=O)O\"))\n [12, 16, 16, 17, 22, 19, 18, 19, 16, 20, 22, 16, 16, 22, 16, 16, 22, 16, 20, 16, 17, 22, 19, 18, 19, 13]\n\n\n References\n ----------\n .. [1] Schwaller, Philippe; Probst, Daniel; Vaucher, Alain C.; Nair, Vishnu H; Kreutter, David;\n Laino, Teodoro; et al. (2019): Mapping the Space of Chemical Reactions using Attention-Based Neural\n Networks. ChemRxiv. Preprint. https://doi.org/10.26434/chemrxiv.9897365.v3\n\n Notes\n ----\n This class requires huggingface's transformers and tokenizers libraries to be installed.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n\n def __init__(\n self,\n # unk_token=\"[UNK]\",\n # sep_token=\"[SEP]\",\n # pad_token=\"[PAD]\",\n # cls_token=\"[CLS]\",\n # mask_token=\"[MASK]\",\n **kwargs\n ):\n \"\"\"Constructs a SmilesTokenizer.\n\n Parameters\n ----------\n vocab_file: str\n Path to a SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n \"\"\"\n\n vocab_file = os.path.join(os.path.dirname(__file__), \"data\", \"vocab.txt\")\n\n super().__init__(vocab_file, **kwargs)\n\n self.sos = \"[SOS]\"\n self.eos = \"[EOS]\"\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\"Can't find a vocab file at path '{}'.\".format(vocab_file))\n self.vocab = load_vocab(vocab_file)\n self.highest_unused_index = max(\n [i for i, v in enumerate(self.vocab.keys()) if v.startswith(\"[unused\")]\n )\n self.ids_to_tokens = collections.OrderedDict(\n [(ids, tok) for tok, ids in self.vocab.items()]\n )\n self.basic_tokenizer = BasicSmilesTokenizer()\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n @property\n def vocab_list(self):\n return list(self.vocab.keys())\n\n def _tokenize(self, text: str):\n \"\"\"\n Tokenize a string into a list of tokens.\n\n Parameters\n ----------\n text: str\n Input string sequence to be tokenized.\n \"\"\"\n\n split_tokens = [token for token in self.basic_tokenizer.tokenize(text)]\n return split_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"\n Converts a token (str/unicode) in an id using the vocab.\n\n Parameters\n ----------\n token: str\n String token from a larger sequence to be converted to a numerical id.\n \"\"\"\n\n return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"\n Converts an index (integer) in a token (string/unicode) using the vocab.\n\n Parameters\n ----------\n index: int\n Integer index to be converted back to a string-based token as part of a larger sequence.\n \"\"\"\n\n return self.ids_to_tokens.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens: List[str]):\n \"\"\"Converts a sequence of tokens (string) in a single string.\n\n Parameters\n ----------\n tokens: List[str]\n List of tokens for a given string sequence.\n\n Returns\n -------\n out_string: str\n Single string from combined tokens.\n \"\"\"\n\n out_string: str = \" \".join(tokens).replace(\" ##\", \"\").strip()\n return out_string\n\n def add_special_tokens_ids_single_sequence(self, token_ids: List[int]):\n \"\"\"\n Adds special tokens to the a sequence for sequence classification tasks.\n A BERT sequence has the following format: [CLS] X [SEP]\n\n Parameters\n ----------\n\n token_ids: list[int]\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n \"\"\"\n\n return [self.cls_token_id] + token_ids + [self.sep_token_id]\n\n def add_special_tokens_single_sequence(self, tokens: List[str]):\n \"\"\"\n Adds special tokens to the a sequence for sequence classification tasks.\n A BERT sequence has the following format: [CLS] X [SEP]\n\n Parameters\n ----------\n tokens: List[str]\n List of tokens for a given string sequence.\n\n \"\"\"\n return [self.cls_token] + tokens + [self.sep_token]\n\n def add_special_tokens_ids_sequence_pair(\n self, token_ids_0: List[int], token_ids_1: List[int]\n ) -> List[int]:\n \"\"\"\n Adds special tokens to a sequence pair for sequence classification tasks.\n A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]\n\n Parameters\n ----------\n token_ids_0: List[int]\n List of ids for the first string sequence in the sequence pair (A).\n\n token_ids_1: List[int]\n List of tokens for the second string sequence in the sequence pair (B).\n \"\"\"\n\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n\n return cls + token_ids_0 + sep + token_ids_1 + sep\n\n def add_padding_tokens(\n self, token_ids: List[int], length: int, right: bool = True\n ) -> List[int]:\n \"\"\"\n Adds padding tokens to return a sequence of length max_length.\n By default padding tokens are added to the right of the sequence.\n\n Parameters\n ----------\n token_ids: list[int]\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n\n length: int\n\n right: bool (True by default)\n\n Returns\n ----------\n token_ids :\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n\n padding: int\n Integer to be added as padding token\n\n \"\"\"\n padding = [self.pad_token_id] * (length - len(token_ids))\n\n if right:\n return token_ids + padding\n else:\n return padding + token_ids\n\n def save_vocabulary(\n self, vocab_path: str\n ): # -> tuple[str]: doctest issue raised with this return type annotation\n \"\"\"\n Save the tokenizer vocabulary to a file.\n\n Parameters\n ----------\n vocab_path: obj: str\n The directory in which to save the SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n\n Returns\n ----------\n vocab_file: :obj:`Tuple(str)`:\n Paths to the files saved.\n typle with string to a SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n\n \"\"\"\n index = 0\n if os.path.isdir(vocab_path):\n vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES[\"vocab_file\"])\n else:\n vocab_file = vocab_path\n with open(vocab_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: vocabulary indices are not consecutive.\"\n \" Please check that the vocabulary is not corrupted!\".format(\n vocab_file\n )\n )\n index = token_index\n writer.write(token + \"\\n\")\n index += 1\n return (vocab_file,)" } ]
import os import sys import time import pandas as pd import torch import numpy as np import re import logging import argparse import rdkit.rdBase as rkrb import rdkit.RDLogger as rkl from contextlib import nullcontext from tqdm.auto import tqdm from model import Transformer from plot_utils import ( check_metrics, plot_1D_condition, plot_2D_condition, plot_3D_condition, plot_unconditional, ) from tokenizer import SmilesTokenizer from typing import Dict, List, Tuple, Union from rdkit import Chem from rdkit import DataStructs from rdkit.Chem.Fingerprints import FingerprintMols
11,145
# from tqdm.notebook import tqdm logger = logging.getLogger(__name__) class Sampler: def __init__( self, load_path: str, device: str = "cpu", seed: int = 1337, dtype: str = "float16", compile: bool = True, quantize: bool = False, ) -> None: self.load_path = load_path self.device = device self.dtype = dtype self.compile = compile self.quantize = quantize self.seed = seed self._init_model() def _init_model(self): np.random.seed(self.seed) torch.cuda.manual_seed(self.seed) torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn self.device_type = ( "cuda" if "cuda" in self.device else "cpu" ) # for later use in torch.autocast ptdtype = { "float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16, }[self.dtype] self.ptdtype = ptdtype self.ctx = self._autocast() # init from a model saved in a specific directory # ckpt_path = os.path.join(out_dir, "ckpt_full_dim=256.pt") self.model = Transformer.load(self.load_path, device=self.device) self.model.eval() if self.quantize: raise NotImplementedError("Not properly implemented for CPU / GPU") self.model = torch.ao.quantization.quantize_dynamic( self.model, # the original model {torch.nn.Linear}, # a set of layers to dynamically quantize dtype=torch.qint8, ) if self.compile: logger.info("Compiling the model...") self.model = torch.compile(self.model) # requires PyTorch 2.0 (optional) self.model = self.model.to(self.device) # load the tokenizer
# from tqdm.notebook import tqdm logger = logging.getLogger(__name__) class Sampler: def __init__( self, load_path: str, device: str = "cpu", seed: int = 1337, dtype: str = "float16", compile: bool = True, quantize: bool = False, ) -> None: self.load_path = load_path self.device = device self.dtype = dtype self.compile = compile self.quantize = quantize self.seed = seed self._init_model() def _init_model(self): np.random.seed(self.seed) torch.cuda.manual_seed(self.seed) torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn self.device_type = ( "cuda" if "cuda" in self.device else "cpu" ) # for later use in torch.autocast ptdtype = { "float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16, }[self.dtype] self.ptdtype = ptdtype self.ctx = self._autocast() # init from a model saved in a specific directory # ckpt_path = os.path.join(out_dir, "ckpt_full_dim=256.pt") self.model = Transformer.load(self.load_path, device=self.device) self.model.eval() if self.quantize: raise NotImplementedError("Not properly implemented for CPU / GPU") self.model = torch.ao.quantization.quantize_dynamic( self.model, # the original model {torch.nn.Linear}, # a set of layers to dynamically quantize dtype=torch.qint8, ) if self.compile: logger.info("Compiling the model...") self.model = torch.compile(self.model) # requires PyTorch 2.0 (optional) self.model = self.model.to(self.device) # load the tokenizer
self.tokenizer = SmilesTokenizer()
6
2023-11-28 09:50:31+00:00
16k
lampmerchant/tashrouter
tashrouter/router/router.py
[ { "identifier": "RoutingTable", "path": "tashrouter/router/routing_table.py", "snippet": "class RoutingTable:\n '''A Router's routing table.'''\n \n STATE_GOOD = 1\n STATE_SUS = 2\n STATE_BAD = 3\n STATE_WORST = 4\n \n def __init__(self, router):\n self._router = router\n self._entry_by_network = {}\n self._state_by_entry = {}\n self._lock = Lock()\n \n def __contains__(self, entry):\n with self._lock:\n return True if entry in self._state_by_entry else False\n \n def __iter__(self):\n with self._lock:\n retval = deque(self._state_by_entry.keys())\n yield from retval\n \n def get_by_network(self, network):\n '''Look up and return an entry in this RoutingTable by network number. Returns (entry, is_bad).'''\n with self._lock:\n entry = self._entry_by_network.get(network)\n if entry is None: return None, None\n return entry, True if self._state_by_entry[entry] in (self.STATE_BAD, self.STATE_WORST) else False\n \n def mark_bad(self, network_min, network_max):\n '''If this RoutingTable has an entry with the given network range, mark it bad. Return True if it existed, else False.'''\n with self._lock:\n cur_entries = set(self._entry_by_network.get(network) for network in range(network_min, network_max + 1))\n if len(cur_entries) != 1: return False\n cur_entry = cur_entries.pop() # this is either None or an entry with a coincident range to the new one\n if not cur_entry: return False\n if self._state_by_entry[cur_entry] != self.STATE_WORST: self._state_by_entry[cur_entry] = self.STATE_BAD\n return True\n \n def consider(self, entry):\n '''Consider a new entry for addition to the table. Return True if added, False if not.'''\n \n with self._lock:\n if entry in self._state_by_entry:\n self._state_by_entry[entry] = self.STATE_GOOD\n return True\n cur_entries = set(self._entry_by_network.get(network) for network in range(entry.network_min, entry.network_max + 1))\n if len(cur_entries) != 1: return False # this network range overlaps one that's already defined, can't do anything with it\n cur_entry = cur_entries.pop()\n \n # range currently undefined, add new entry to the table\n if cur_entry is None:\n pass\n # range fully defined by an entry that is either bad or further away, add new entry to the table\n elif cur_entry.distance >= entry.distance or self._state_by_entry[cur_entry] in (self.STATE_BAD, self.STATE_WORST):\n pass\n # range fully defined by an entry representing a route that is now further than we thought, add new entry to the table\n elif (cur_entry.next_network, cur_entry.next_node, cur_entry.port) == (entry.next_network, entry.next_node, entry.port):\n pass\n # range fully defined by a good entry that is closer than the new one, ignore new entry\n else:\n return False\n \n if cur_entry: self._state_by_entry.pop(cur_entry)\n self._state_by_entry[entry] = self.STATE_GOOD\n for network in range(entry.network_min, entry.network_max + 1): self._entry_by_network[network] = entry\n logging.debug('%s adding: %s', str(self._router), str(entry))\n return True\n \n def age(self):\n '''Age the RoutingTableEntries in this RoutingTable.'''\n entries_to_delete = set()\n networks_to_delete = deque()\n with self._lock:\n for entry in set(self._entry_by_network.values()):\n if self._state_by_entry[entry] == self.STATE_WORST:\n logging.debug('%s aging out: %s', str(self._router), str(entry))\n entries_to_delete.add(entry)\n self._state_by_entry.pop(entry)\n try:\n self._router.zone_information_table.remove_networks(entry.network_min, entry.network_max)\n except ValueError as e:\n logging.warning(\"%s couldn't remove networks from zone information table: %s\", str(self._router), e.args[0])\n elif self._state_by_entry[entry] == self.STATE_BAD:\n self._state_by_entry[entry] = self.STATE_WORST\n elif self._state_by_entry[entry] == self.STATE_SUS:\n self._state_by_entry[entry] = self.STATE_BAD\n elif self._state_by_entry[entry] == self.STATE_GOOD and entry.distance != 0:\n self._state_by_entry[entry] = self.STATE_SUS\n for network, entry in self._entry_by_network.items():\n if entry in entries_to_delete: networks_to_delete.append(network)\n for network in networks_to_delete: self._entry_by_network.pop(network)\n \n def entries(self):\n '''Yield entries from this RoutingTable along with their badness state.'''\n with self._lock: retval = deque(self._state_by_entry.items())\n for entry, state in retval: yield entry, True if state in (self.STATE_BAD, self.STATE_WORST) else False\n \n def set_port_range(self, port, network_min, network_max):\n '''Set the network range for a given port, unsetting any previous entries in the table that defined it.'''\n entries_to_delete = set()\n networks_to_delete = deque()\n with self._lock:\n for network, entry in self._entry_by_network.items():\n if entry.port is port and entry.distance == 0:\n entries_to_delete.add(entry)\n networks_to_delete.append(network)\n for entry in entries_to_delete:\n logging.debug('%s deleting: %s', str(self._router), str(entry))\n self._state_by_entry.pop(entry)\n try:\n self._router.zone_information_table.remove_networks(entry.network_min, entry.network_max)\n except ValueError as e:\n logging.warning(\"%s couldn't remove networks from zone information table: %s\", str(self._router), e.args[0])\n for network in networks_to_delete: self._entry_by_network.pop(network)\n entry = RoutingTableEntry(extended_network=port.extended_network,\n network_min=network_min,\n network_max=network_max,\n distance=0,\n port=port,\n next_network=0,\n next_node=0)\n logging.debug('%s adding: %s', str(self._router), str(entry))\n for network in range(network_min, network_max + 1): self._entry_by_network[network] = entry\n self._state_by_entry[entry] = self.STATE_GOOD" }, { "identifier": "ZoneInformationTable", "path": "tashrouter/router/zone_information_table.py", "snippet": "class ZoneInformationTable:\n '''Zone Information Table (ZIT).'''\n \n def __init__(self, router):\n self._router = router\n self._network_min_to_network_max = {}\n self._network_min_to_zone_name_set = {}\n self._network_min_to_default_zone_name = {}\n self._zone_name_to_network_min_set = {}\n self._ucased_zone_name_to_zone_name = {}\n self._lock = Lock()\n \n def _check_range(self, network_min, network_max=None):\n looked_up_network_max = self._network_min_to_network_max.get(network_min)\n if network_max is None:\n if looked_up_network_max is None:\n raise ValueError('network range %d-? does not exist' % network_min)\n else:\n return looked_up_network_max\n elif looked_up_network_max == network_max: # if network range exists as given\n return network_max\n elif looked_up_network_max is not None:\n raise ValueError('network range %d-%d overlaps %d-%d' % (network_min, network_max, network_min, looked_up_network_max))\n else: # check for overlap\n for existing_min, existing_max in self._network_min_to_network_max.items():\n if existing_min > network_max or existing_max < network_min: continue\n raise ValueError('network range %d-%d overlaps %d-%d' % (network_min, network_max, existing_min, existing_max))\n return None\n \n def add_networks_to_zone(self, zone_name, network_min, network_max=None):\n '''Add a range of networks to a zone, adding the zone if it isn't in the table.'''\n \n if network_max and network_max < network_min: raise ValueError('range %d-%d is backwards' % (network_min, network_max))\n ucased_zone_name = ucase(zone_name)\n \n with self._lock:\n \n if ucased_zone_name in self._ucased_zone_name_to_zone_name:\n zone_name = self._ucased_zone_name_to_zone_name[ucased_zone_name]\n else:\n self._ucased_zone_name_to_zone_name[ucased_zone_name] = zone_name\n self._zone_name_to_network_min_set[zone_name] = set()\n \n check_range = self._check_range(network_min, network_max)\n if check_range:\n network_max = check_range\n self._network_min_to_zone_name_set[network_min].add(zone_name)\n now_default = False\n else:\n self._network_min_to_network_max[network_min] = network_max\n self._network_min_to_zone_name_set[network_min] = set((zone_name,))\n self._network_min_to_default_zone_name[network_min] = zone_name\n now_default = True\n \n logging.debug('%s adding network range %d-%d to zone %s%s', str(self._router), network_min, network_max,\n zone_name.decode('mac_roman', 'replace'), ' (now default zone for this range)' if now_default else '')\n self._zone_name_to_network_min_set[zone_name].add(network_min)\n \n def remove_networks(self, network_min, network_max=None):\n '''Remove a range of networks from all zones, removing associated zones if now empty of networks.'''\n if network_max and network_max < network_min: raise ValueError('range %d-%d is backwards' % (network_min, network_max))\n with self._lock:\n network_max = self._check_range(network_min, network_max)\n if not network_max: return\n logging.debug('%s removing network range %d-%d from all zones', str(self._router), network_min, network_max)\n for zone_name in self._network_min_to_zone_name_set[network_min]:\n s = self._zone_name_to_network_min_set[zone_name]\n s.remove(network_min)\n if not s:\n logging.debug('%s removing zone %s because it no longer contains any networks', str(self._router),\n zone_name.decode('mac_roman', 'replace'))\n self._zone_name_to_network_min_set.pop(zone_name)\n self._ucased_zone_name_to_zone_name.pop(ucase(zone_name))\n self._network_min_to_default_zone_name.pop(network_min)\n self._network_min_to_zone_name_set.pop(network_min)\n self._network_min_to_network_max.pop(network_min)\n \n def zones(self):\n '''Return the zones in this ZIT.'''\n with self._lock:\n return list(self._zone_name_to_network_min_set.keys())\n \n def zones_in_network_range(self, network_min, network_max=None):\n '''Return a deque containing the names of all zones in the given range of networks, default zone name first.'''\n if network_max and network_max < network_min: raise ValueError('range %d-%d is backwards' % (network_min, network_max))\n with self._lock:\n if not self._check_range(network_min, network_max): return deque()\n default_zone_name = self._network_min_to_default_zone_name[network_min]\n retval = deque(zone_name for zone_name in self._network_min_to_zone_name_set[network_min] if zone_name != default_zone_name)\n retval.appendleft(default_zone_name)\n return retval\n \n def networks_in_zone(self, zone_name):\n '''Return a deque containing the network numbers of all networks in the given zone.'''\n with self._lock:\n zone_name = self._ucased_zone_name_to_zone_name.get(ucase(zone_name))\n if zone_name is None: return deque()\n retval = deque()\n for network_min in self._zone_name_to_network_min_set[zone_name]:\n retval.extend(range(network_min, self._network_min_to_network_max[network_min] + 1))\n return retval" }, { "identifier": "Datagram", "path": "tashrouter/datagram.py", "snippet": "class Datagram:\n '''DDP datagram.'''\n \n MAX_DATA_LENGTH = 586\n \n hop_count: int\n destination_network: int\n source_network: int\n destination_node: int\n source_node: int\n destination_socket: int\n source_socket: int\n ddp_type: int\n data: bytes\n \n @classmethod\n def from_long_header_bytes(cls, data):\n '''Construct a Datagram object from bytes in the long-header format and raise ValueErrors if there are issues.'''\n if len(data) < 13: raise ValueError('data too short, must be at least 13 bytes for long-header DDP datagram')\n (first, second, checksum, destination_network, source_network, destination_node, source_node, destination_socket, source_socket,\n ddp_type) = struct.unpack('>BBHHHBBBBB', data[:13])\n if first & 0xC0: raise ValueError('invalid long DDP header, top two bits of first byte must be zeroes')\n hop_count = (first & 0x3C) >> 2\n length = (first & 0x3) << 8 | second\n if length > 13 + cls.MAX_DATA_LENGTH:\n raise ValueError('invalid long DDP header, length %d is greater than %d' % (length, cls.MAX_DATA_LENGTH))\n if length != len(data):\n raise ValueError('invalid long DDP header, length field says %d but actual length is %d' % (length, len(data)))\n if checksum != 0:\n calc_checksum = ddp_checksum(data[4:])\n if calc_checksum != checksum:\n raise ValueError('invalid long DDP header, checksum is 0x%04X but should be 0x%04X' % (checksum, calc_checksum))\n return cls(hop_count=hop_count,\n destination_network=destination_network,\n source_network=source_network,\n destination_node=destination_node,\n source_node=source_node,\n destination_socket=destination_socket,\n source_socket=source_socket,\n ddp_type=ddp_type,\n data=data[13:])\n \n @classmethod\n def from_short_header_bytes(cls, destination_node, source_node, data):\n '''Construct a Datagram object from bytes in the short-header format and raise ValueErrors if there are issues.'''\n if len(data) < 5: raise ValueError('data too short, must be at least 5 bytes for short-header DDP datagram')\n first, second, destination_socket, source_socket, ddp_type = struct.unpack('>BBBBB', data[0:5])\n if first & 0xFC: raise ValueError('invalid short DDP header, top six bits of first byte must be zeroes')\n length = (first & 0x3) << 8 | second\n if length > 5 + cls.MAX_DATA_LENGTH:\n raise ValueError('invalid short DDP header, length %d is greater than %d' % (length, cls.MAX_DATA_LENGTH))\n if length != len(data):\n raise ValueError('invalid short DDP header, length field says %d but actual length is %d' % (length, len(data)))\n return cls(hop_count=0,\n destination_network=0,\n source_network=0,\n destination_node=destination_node,\n source_node=source_node,\n destination_socket=destination_socket,\n source_socket=source_socket,\n ddp_type=ddp_type,\n data=data[5:])\n \n def _check_ranges(self):\n '''Check that the Datagram's parameters are in range, raise ValueError if not.'''\n for name, min_value, max_value in (('hop count', 0, 15),\n ('destination network', 0, 65534),\n ('source network', 0, 65534),\n ('destination node', 0, 255),\n ('source node', 1, 254),\n ('destination socket', 0, 255),\n ('source socket', 0, 255),\n ('DDP type', 0, 255)):\n value = getattr(self, name.lower().replace(' ', '_'))\n if not min_value <= value <= max_value:\n raise ValueError('invalid %s %d, must be in range %d-%d' % (name, value, min_value, max_value))\n \n def as_long_header_bytes(self):\n '''Return this Datagram in long-header format as bytes and raise ValueErrors if there are issues.'''\n self._check_ranges()\n if len(self.data) > self.MAX_DATA_LENGTH:\n raise ValueError('data length %d is greater than max length %d' % (len(self.data), self.MAX_DATA_LENGTH))\n header = struct.pack('>HHBBBBB',\n self.destination_network,\n self.source_network,\n self.destination_node,\n self.source_node,\n self.destination_socket,\n self.source_socket,\n self.ddp_type)\n data = header + self.data\n length = 4 + len(data)\n checksum = 0\n for byte in data:\n checksum += byte\n checksum = (checksum & 0x7FFF) << 1 | (1 if checksum & 0x8000 else 0)\n checksum = checksum or 0xFFFF # because a zero value in the checksum field means one was not calculated\n header = struct.pack('>BBH',\n (self.hop_count & 0xF) << 2 | (length & 0x300) >> 8,\n length & 0xFF,\n checksum)\n return header + data\n \n def as_short_header_bytes(self):\n '''Return this Datagram in short-header format as bytes and raise ValueErrors if there are issues.'''\n if self.hop_count > 0:\n raise ValueError('invalid hop count %d, short-header datagrams may not have non-zero hop count' % self.hop_count)\n self._check_ranges()\n if len(self.data) > self.MAX_DATA_LENGTH:\n raise ValueError('data length %d is greater than max length %d' % (len(self.data), self.MAX_DATA_LENGTH))\n length = 5 + len(self.data)\n header = struct.pack('>BBBBB',\n (length & 0x300) >> 8,\n length & 0xFF,\n self.destination_socket,\n self.source_socket,\n self.ddp_type)\n return header + self.data\n \n def copy(self, **kwargs):\n '''Return a copy of this Datagram, replacing params specified by kwargs, if any.'''\n return dataclasses.replace(self, **kwargs)\n \n def hop(self):\n '''Return a copy of this Datagram with the hop count incremented by one.'''\n return self.copy(hop_count=self.hop_count + 1)" }, { "identifier": "EchoService", "path": "tashrouter/service/echo.py", "snippet": "class EchoService(Service):\n '''A Service which implements AppleTalk Echo Protocol (AEP).'''\n \n ECHO_SAS = 4\n ECHO_DDP_TYPE = 4\n \n ECHO_FUNC_REQUEST_BYTE = b'\\x01'\n ECHO_FUNC_REPLY_BYTE = b'\\x02'\n \n def __init__(self):\n self.thread = None\n self.queue = Queue()\n self.stop_flag = object()\n self.started_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.stopped_event.wait()\n \n def _run(self, router):\n self.started_event.set()\n while True:\n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n if datagram.ddp_type != self.ECHO_DDP_TYPE: continue\n if not datagram.data: continue\n if datagram.data[0:1] != self.ECHO_FUNC_REQUEST_BYTE: continue\n router.reply(datagram, rx_port, self.ECHO_DDP_TYPE, self.ECHO_FUNC_REPLY_BYTE + datagram.data[1:])\n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "NameInformationService", "path": "tashrouter/service/name_information.py", "snippet": "class NameInformationService(Service):\n '''A Service that implements Name Binding Protocol (NBP).'''\n \n NBP_SAS = 2\n NBP_DDP_TYPE = 2\n \n NBP_CTRL_BRRQ = 1\n NBP_CTRL_LKUP = 2\n NBP_CTRL_LKUP_REPLY = 3\n NBP_CTRL_FWDREQ = 4\n \n MAX_FIELD_LEN = 32\n \n def __init__(self):\n self.thread = None\n self.queue = Queue()\n self.stop_flag = object()\n self.started_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.stopped_event.wait()\n \n def _run(self, router):\n \n self.started_event.set()\n \n while True:\n \n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n \n if datagram.ddp_type != self.NBP_DDP_TYPE: continue\n if len(datagram.data) < 12: continue\n func_tuple_count, nbp_id, req_network, req_node, req_socket, _, object_field = struct.unpack('>BBHBBBB', datagram.data[:8])\n func = func_tuple_count >> 4\n tuple_count = func_tuple_count & 0xF\n if tuple_count != 1 or func not in (self.NBP_CTRL_BRRQ, self.NBP_CTRL_FWDREQ): continue\n if object_field < 1 or object_field > self.MAX_FIELD_LEN: continue\n if len(datagram.data) < 8 + object_field: continue\n type_field = datagram.data[8 + object_field]\n if type_field < 1 or type_field > self.MAX_FIELD_LEN: continue\n if len(datagram.data) < 9 + object_field + type_field: continue\n zone_field = datagram.data[9 + object_field + type_field]\n if zone_field > self.MAX_FIELD_LEN: continue\n if len(datagram.data) < 10 + object_field + type_field + zone_field: continue\n zone_field = datagram.data[10 + object_field + type_field:10 + object_field + type_field + zone_field] or b'*'\n type_field = datagram.data[9 + object_field:9 + object_field + type_field]\n object_field = datagram.data[8:8 + object_field]\n \n common_data = b''.join((struct.pack('>BHBBBB', nbp_id, req_network, req_node, req_socket, 0, len(object_field)),\n object_field,\n struct.pack('>B', len(type_field)),\n type_field,\n struct.pack('>B', len(zone_field)),\n zone_field))\n lkup_data = struct.pack('>B', (self.NBP_CTRL_LKUP << 4) | 1) + common_data\n fwdreq_data = struct.pack('>B', (self.NBP_CTRL_FWDREQ << 4) | 1) + common_data\n \n if func == self.NBP_CTRL_BRRQ:\n \n # if zone is *, try to sub in the zone name associated with the nonextended network whence the BrRq comes\n if zone_field == b'*':\n if rx_port.extended_network: continue # BrRqs from extended networks must provide zone name\n if rx_port.network:\n entry, _ = router.routing_table.get_by_network(rx_port.network)\n if entry:\n try:\n zones = router.zone_information_table.zones_in_network_range(entry.network_min)\n except ValueError:\n pass\n else:\n if len(zones) == 1: zone_field = zones[0] # there should not be more than one zone\n \n # if zone is still *, just broadcast a LkUp on the requesting network and call it done\n if zone_field == b'*':\n rx_port.send(0x0000, 0xFF, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=rx_port.network,\n destination_node=0xFF,\n source_node=rx_port.node,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=lkup_data))\n # we know the zone, so multicast LkUps to directly-connected networks and send FwdReqs to non-directly-connected ones\n else:\n entries = set(router.routing_table.get_by_network(network)\n for network in router.zone_information_table.networks_in_zone(zone_field))\n entries.discard((None, None))\n for entry, _ in entries:\n if entry.distance == 0:\n entry.port.multicast(zone_field, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=entry.port.network,\n destination_node=0xFF,\n source_node=entry.port.node,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=lkup_data))\n else:\n router.route(Datagram(hop_count=0,\n destination_network=entry.network_min,\n source_network=0,\n destination_node=0x00,\n source_node=0,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=fwdreq_data))\n \n elif func == self.NBP_CTRL_FWDREQ:\n \n entry, _ = router.routing_table.get_by_network(datagram.destination_network)\n if entry is None or entry.distance != 0: continue # FwdReq thinks we're directly connected to this network but we're not\n entry.port.multicast(zone_field, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=entry.port.network,\n destination_node=0xFF,\n source_node=entry.port.node,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=lkup_data))\n \n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "RoutingTableAgingService", "path": "tashrouter/service/routing_table_aging.py", "snippet": "class RoutingTableAgingService(Service):\n '''A Service which ages the Router's RoutingTable on a regular basis.'''\n \n DEFAULT_TIMEOUT = 20 # seconds\n \n def __init__(self, timeout=DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.thread = None\n self.started_event = Event()\n self.stop_requested_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.stop_requested_event.set()\n self.stopped_event.wait()\n \n def _run(self, router):\n self.started_event.set()\n while True:\n if self.stop_requested_event.wait(timeout=self.timeout): break\n router.routing_table.age()\n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n pass" }, { "identifier": "RtmpRespondingService", "path": "tashrouter/service/rtmp/responding.py", "snippet": "class RtmpRespondingService(Service, RtmpService):\n '''A Service which responds to inbound RTMP Datagrams and maintains the Router's RoutingTable.'''\n \n def __init__(self):\n self.thread = None\n self.started_event = Event()\n self.queue = Queue()\n self.stop_flag = object()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.queue.join()\n \n def _run(self, router):\n \n while True:\n \n if self.started_event.is_set():\n self.queue.task_done()\n else:\n self.started_event.set()\n \n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n \n if datagram.ddp_type == self.RTMP_DDP_TYPE_DATA:\n \n # process header\n if len(datagram.data) < 4: continue # invalid, datagram too short\n sender_network, id_length, sender_node = struct.unpack('>HBB', datagram.data[0:4])\n if id_length != 8: continue # invalid, AppleTalk node numbers are only 8 bits in length\n data = datagram.data[4:]\n if rx_port.extended_network:\n if len(data) < 6: continue # invalid, datagram too short to contain at least one extended network tuple\n sender_network_min, range_distance, sender_network_max, rtmp_version = struct.unpack('>HBHB', data[0:6])\n if range_distance != 0x80: continue # invalid, first tuple must be the sender's extended network tuple\n else:\n if len(data) < 3: continue\n sender_network_min = sender_network_max = sender_network\n zero, rtmp_version = struct.unpack('>HB', data[0:3])\n if zero != 0: continue # invalid, this word must be zero on a nonextended network\n data = data[3:]\n if rtmp_version != self.RTMP_VERSION: continue # invalid, don't recognize this RTMP format\n \n # interpret tuples\n tuples = deque()\n data_idx = 0\n while True:\n packed = data[data_idx:data_idx + 3]\n if len(packed) != 3: break\n network_min, range_distance = struct.unpack('>HB', packed)\n if range_distance & 0x80:\n extended_network = True\n packed = data[data_idx + 3:data_idx + 6]\n if len(packed) != 3: break\n network_max, _ = struct.unpack('>HB', packed)\n data_idx += 6\n else:\n extended_network = False\n network_max = network_min\n data_idx += 3\n tuples.append((extended_network, network_min, network_max, range_distance & 0x1F))\n if data_idx != len(data): continue # invalid, tuples did not end where expected\n \n # if this Port doesn't know its network range yet, accept that this is from the network's seed router\n if rx_port.network_min == rx_port.network_max == 0: rx_port.set_network_range(sender_network_min, sender_network_max)\n \n # resolve the given tuples with the current RoutingTable\n for extended_network, network_min, network_max, distance in tuples:\n # if the entry is too many hops away or is a notify-neighbor entry, mark any entry we have as bad\n if distance >= 15:\n router.routing_table.mark_bad(network_min, network_max)\n # otherwise have the table consider a new entry based on this tuple\n else:\n router.routing_table.consider(RoutingTableEntry(extended_network=extended_network,\n network_min=network_min,\n network_max=network_max,\n distance=distance + 1,\n port=rx_port,\n next_network=sender_network,\n next_node=sender_node))\n \n elif datagram.ddp_type != self.RTMP_DDP_TYPE_REQUEST or not datagram.data:\n \n continue\n \n elif datagram.data[0] == self.RTMP_FUNC_REQUEST:\n \n if 0 in (rx_port.network_min, rx_port.network_max): continue\n if datagram.hop_count != 0: continue # we have to send responses out of the same port they came in, no routing\n response_data = struct.pack('>HBB', rx_port.network, 8, rx_port.node)\n if rx_port.extended_network:\n response_data += struct.pack('>HBHB', rx_port.network_min, 0x80, rx_port.network_max, self.RTMP_VERSION)\n router.reply(datagram, rx_port, self.RTMP_DDP_TYPE_DATA, response_data)\n \n elif datagram.data[0] in (self.RTMP_FUNC_RDR_SPLIT_HORIZON, self.RTMP_FUNC_RDR_NO_SPLIT_HORIZON):\n \n split_horizon = True if datagram.data[0] == self.RTMP_FUNC_RDR_SPLIT_HORIZON else False\n for datagram_data in self.make_routing_table_datagram_data(router, rx_port, split_horizon):\n router.reply(datagram, rx_port, self.RTMP_DDP_TYPE_DATA, datagram_data)\n \n self.queue.task_done()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "RtmpSendingService", "path": "tashrouter/service/rtmp/sending.py", "snippet": "class RtmpSendingService(Service, RtmpService):\n '''A Service which sends RTMP Datagrams containing the Router's RoutingTable to its Ports on a regular basis.'''\n \n DEFAULT_TIMEOUT = 10 # seconds\n \n def __init__(self, timeout=DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.thread = None\n self.started_event = Event()\n self.queue = Queue()\n self.stop_flag = object()\n self.force_send_flag = object()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.queue.join()\n \n def _run(self, router):\n self.started_event.set()\n while True:\n try:\n item = self.queue.get(timeout=self.timeout)\n except Empty:\n item = None\n if item is self.stop_flag: break\n for port in router.ports:\n if 0 in (port.node, port.network): continue\n for datagram_data in self.make_routing_table_datagram_data(router, port):\n port.send(0x0000, 0xFF, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=port.network,\n destination_node=0xFF,\n source_node=port.node,\n destination_socket=self.RTMP_SAS,\n source_socket=self.RTMP_SAS,\n ddp_type=self.RTMP_DDP_TYPE_DATA,\n data=datagram_data))\n if item is not None: self.queue.task_done()\n self.queue.task_done()\n \n def inbound(self, datagram, rx_port):\n pass\n \n def force_send(self):\n '''Force this service to immediately send an RTMP Datagram for testing purposes.'''\n self.queue.put(self.force_send_flag)\n self.queue.join()" }, { "identifier": "ZipRespondingService", "path": "tashrouter/service/zip/responding.py", "snippet": "class ZipRespondingService(Service, ZipService):\n '''A Service that implements Zone Information Protocol (ZIP).'''\n \n def __init__(self):\n self.thread = None\n self.queue = Queue()\n self.stop_flag = object()\n self.started_event = Event()\n self.stopped_event = Event()\n self._pending_network_zone_name_set = {}\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.stopped_event.wait()\n \n def _reply(self, router, datagram):\n \n if len(datagram.data) < 2: return\n func, count = struct.unpack('>BB', datagram.data[:2])\n data = datagram.data[2:]\n \n networks_and_zone_names = deque()\n while len(data) >= 3:\n network_min, zone_name_length = struct.unpack('>HB', data[:3])\n zone_name = data[3:3 + zone_name_length]\n if len(zone_name) != zone_name_length: break\n data = data[3 + zone_name_length:]\n if zone_name_length == 0: continue\n networks_and_zone_names.append((network_min, zone_name))\n if not networks_and_zone_names: return\n \n network_min_to_network_max = {}\n for entry in router.routing_table:\n network_min_to_network_max[entry.network_min] = entry.network_max\n \n if func == self.ZIP_FUNC_REPLY:\n for network_min, zone_name in networks_and_zone_names:\n try:\n network_max = network_min_to_network_max[network_min]\n except KeyError:\n logging.warning('%s ZIP reply refers to a network range (starting with %d) with which we are not familiar', str(router), \n network_min)\n else:\n try:\n router.zone_information_table.add_networks_to_zone(zone_name, network_min, network_max)\n except ValueError as e:\n logging.warning(\"%s ZIP reply couldn't be added to zone information table: %s\", str(router), e.args[0])\n elif func == self.ZIP_FUNC_EXT_REPLY:\n #TODO this code is fragile and I do not like it\n network_min = None\n for network_min, zone_name in networks_and_zone_names:\n if network_min not in self._pending_network_zone_name_set: self._pending_network_zone_name_set[network_min] = set()\n self._pending_network_zone_name_set[network_min].add(zone_name)\n if network_min is not None and len(self._pending_network_zone_name_set.get(network_min, ())) >= count and count >= 1:\n for zone_name in self._pending_network_zone_name_set.pop(network_min):\n try:\n network_max = network_min_to_network_max[network_min]\n except KeyError:\n logging.warning('%s ZIP reply refers to a network range (starting with %d) with which we are not familiar', str(router),\n network_min)\n else:\n try:\n router.zone_information_table.add_networks_to_zone(zone_name, network_min, network_max)\n except ValueError as e:\n logging.warning(\"%s ZIP reply couldn't be added to zone information table: %s\", str(router), e.args[0])\n \n @classmethod\n def _query(cls, router, datagram, rx_port):\n if len(datagram.data) < 4: return\n network_count = datagram.data[1]\n if len(datagram.data) != (network_count * 2) + 2: return\n # in imitation of AppleTalk Internet Router, we only respond with extended replies even if a regular reply would fit\n # we also give one list per requested network even if the requested networks are in the same range and the lists are the same;\n # that is, if the sender requests zones for networks 3 and 4 and there is a zones list for networks 3-5, we will reply with the\n # zone list for network 3 twice... seems silly, but this is how ATIR does it so *shrug*\n for network_idx in range(network_count):\n requested_network = struct.unpack('>H', datagram.data[(network_idx * 2) + 2:(network_idx * 2) + 4])[0]\n entry, _ = router.routing_table.get_by_network(requested_network)\n if entry is None: continue\n try:\n zone_names = router.zone_information_table.zones_in_network_range(entry.network_min)\n except ValueError:\n continue\n datagram_data = deque()\n datagram_data_length = 0\n for zone_name in chain(zone_names, (None,)):\n list_item = None if zone_name is None else struct.pack('>HB', entry.network_min, len(zone_name)) + zone_name\n if list_item is None or datagram_data_length + len(list_item) > Datagram.MAX_DATA_LENGTH - 2:\n router.reply(datagram, rx_port, cls.ZIP_DDP_TYPE, struct.pack('>BB', cls.ZIP_FUNC_EXT_REPLY,\n len(zone_names)) + b''.join(datagram_data))\n datagram_data = deque()\n datagram_data_length = 0\n if list_item is not None:\n datagram_data.append(list_item)\n datagram_data_length += len(list_item)\n \n @classmethod\n def _get_net_info(cls, router, datagram, rx_port):\n if 0 in (rx_port.network, rx_port.network_min, rx_port.network_max): return\n if len(datagram.data) < 7: return\n if datagram.data[1:6] != b'\\0\\0\\0\\0\\0': return\n given_zone_name = datagram.data[7:7 + datagram.data[6]]\n given_zone_name_ucase = ucase(given_zone_name)\n flags = cls.ZIP_GETNETINFO_ZONE_INVALID | cls.ZIP_GETNETINFO_ONLY_ONE_ZONE\n default_zone_name = None\n number_of_zones = 0\n multicast_address = b''\n try:\n zone_names = router.zone_information_table.zones_in_network_range(rx_port.network_min, rx_port.network_max)\n except ValueError as e:\n logging.warning(\"%s couldn't get zone names in port network range for GetNetInfo: %s\", router, e.args[0])\n return\n for zone_name in zone_names:\n number_of_zones += 1\n if default_zone_name is None:\n # zones_in_network_range returns the default zone first\n default_zone_name = zone_name\n multicast_address = rx_port.multicast_address(zone_name)\n if ucase(zone_name) == given_zone_name_ucase:\n flags &= ~cls.ZIP_GETNETINFO_ZONE_INVALID\n multicast_address = rx_port.multicast_address(zone_name)\n if number_of_zones > 1:\n flags &= ~cls.ZIP_GETNETINFO_ONLY_ONE_ZONE\n if not flags & cls.ZIP_GETNETINFO_ZONE_INVALID: break\n if number_of_zones == 0: return\n if not multicast_address: flags |= cls.ZIP_GETNETINFO_USE_BROADCAST\n reply_data = b''.join((\n struct.pack('>BBHHB', cls.ZIP_FUNC_GETNETINFO_REPLY, flags, rx_port.network_min, rx_port.network_max, len(given_zone_name)),\n given_zone_name,\n struct.pack('>B', len(multicast_address)),\n multicast_address,\n struct.pack('>B', len(default_zone_name)) if flags & cls.ZIP_GETNETINFO_ZONE_INVALID else b'',\n default_zone_name if flags & cls.ZIP_GETNETINFO_ZONE_INVALID else b''))\n router.reply(datagram, rx_port, cls.ZIP_DDP_TYPE, reply_data)\n \n @classmethod\n def _get_my_zone(cls, router, datagram, rx_port):\n _, _, tid, _, _, start_index = struct.unpack('>BBHBBH', datagram.data)\n if start_index != 0: return\n entry, _ = router.routing_table.get_by_network(datagram.source_network)\n if entry is None: return\n try:\n zone_name = next(iter(router.zone_information_table.zones_in_network_range(entry.network_min)), None)\n except ValueError:\n return\n if not zone_name: return\n router.reply(datagram, rx_port, cls.ATP_DDP_TYPE, struct.pack('>BBHBBHB',\n cls.ATP_FUNC_TRESP | cls.ATP_EOM,\n 0,\n tid,\n 0,\n 0,\n 1,\n len(zone_name)) + zone_name)\n \n @classmethod\n def _get_zone_list(cls, router, datagram, rx_port, local=False):\n _, _, tid, _, _, start_index = struct.unpack('>BBHBBH', datagram.data)\n if local:\n try:\n zone_iter = iter(router.zone_information_table.zones_in_network_range(rx_port.network_min, rx_port.network_max))\n except ValueError as e:\n logging.warning(\"%s couldn't get zone names in port network range for GetLocalZones: %s\", router, e.args[0])\n return\n else:\n zone_iter = iter(router.zone_information_table.zones())\n for _ in range(start_index - 1): next(zone_iter, None) # skip over start_index-1 entries (index is 1-relative)\n last_flag = 0\n zone_list = deque()\n num_zones = 0\n data_length = 8\n while zone_name := next(zone_iter, None):\n if data_length + 1 + len(zone_name) > Datagram.MAX_DATA_LENGTH: break\n zone_list.append(struct.pack('>B', len(zone_name)))\n zone_list.append(zone_name)\n num_zones += 1\n data_length += 1 + len(zone_name)\n else:\n last_flag = 1\n router.reply(datagram, rx_port, cls.ATP_DDP_TYPE, struct.pack('>BBHBBH',\n cls.ATP_FUNC_TRESP | cls.ATP_EOM,\n 0,\n tid,\n last_flag,\n 0,\n num_zones) + b''.join(zone_list))\n \n def _run(self, router):\n self.started_event.set()\n while True:\n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n if datagram.ddp_type == self.ZIP_DDP_TYPE:\n if not datagram.data: continue\n if datagram.data[0] in (self.ZIP_FUNC_REPLY, self.ZIP_FUNC_EXT_REPLY):\n self._reply(router, datagram)\n elif datagram.data[0] == self.ZIP_FUNC_QUERY:\n self._query(router, datagram, rx_port)\n elif datagram.data[0] == self.ZIP_FUNC_GETNETINFO_REQUEST:\n self._get_net_info(router, datagram, rx_port)\n elif datagram.ddp_type == self.ATP_DDP_TYPE:\n if len(datagram.data) != 8: continue\n control, bitmap, _, func, zero, _ = struct.unpack('>BBHBBH', datagram.data)\n if control != self.ATP_FUNC_TREQ or bitmap != 1 or zero != 0: continue\n if func == self.ZIP_ATP_FUNC_GETMYZONE:\n self._get_my_zone(router, datagram, rx_port)\n elif func == self.ZIP_ATP_FUNC_GETZONELIST:\n self._get_zone_list(router, datagram, rx_port, local=False)\n elif func == self.ZIP_ATP_FUNC_GETLOCALZONES:\n self._get_zone_list(router, datagram, rx_port, local=True)\n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "ZipSendingService", "path": "tashrouter/service/zip/sending.py", "snippet": "class ZipSendingService(Service, ZipService):\n '''A Service which sends ZIP queries to fill out its router's Zone Information Table.'''\n \n DEFAULT_TIMEOUT = 10 # seconds\n \n def __init__(self, timeout=DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.thread = None\n self.started_event = Event()\n self.stop_requested_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.stop_requested_event.set()\n self.stopped_event.wait()\n \n def _run(self, router):\n \n self.started_event.set()\n \n while True:\n \n if self.stop_requested_event.wait(timeout=self.timeout): break\n \n queries = {} # (port, network, node) -> network_mins\n for entry in router.routing_table:\n try:\n if next(iter(router.zone_information_table.zones_in_network_range(entry.network_min, entry.network_max)), None): continue\n except ValueError as e:\n logging.warning('%s apparent disjoin between routing table and zone information table: %s', router, e.args[0])\n continue\n if entry.distance == 0:\n key = (entry.port, 0x0000, 0xFF)\n else:\n key = (entry.port, entry.next_network, entry.next_node)\n if key not in queries: queries[key] = deque()\n queries[key].append(entry.network_min)\n \n for port_network_node, network_mins in queries.items():\n port, network, node = port_network_node\n if 0 in (port.node, port.network): continue\n datagram_data = deque()\n for network_min in chain(network_mins, (None,)):\n if network_min is None or len(datagram_data) * 2 + 4 > Datagram.MAX_DATA_LENGTH:\n datagram_data.appendleft(struct.pack('>BB', self.ZIP_FUNC_QUERY, len(datagram_data)))\n port.send(network, node, Datagram(hop_count=0,\n destination_network=network,\n source_network=port.network,\n destination_node=node,\n source_node=port.node,\n destination_socket=self.ZIP_SAS,\n source_socket=self.ZIP_SAS,\n ddp_type=self.ZIP_DDP_TYPE,\n data=b''.join(datagram_data)))\n if network_min is not None: datagram_data = deque((struct.pack('>H', network_min),))\n else:\n datagram_data.append(struct.pack('>H', network_min))\n \n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n pass" } ]
import logging from .routing_table import RoutingTable from .zone_information_table import ZoneInformationTable from ..datagram import Datagram from ..service.echo import EchoService from ..service.name_information import NameInformationService from ..service.routing_table_aging import RoutingTableAgingService from ..service.rtmp.responding import RtmpRespondingService from ..service.rtmp.sending import RtmpSendingService from ..service.zip.responding import ZipRespondingService from ..service.zip.sending import ZipSendingService
12,099
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = ( (EchoService.ECHO_SAS, EchoService()), (NameInformationService.NBP_SAS, NameInformationService()), (None, RoutingTableAgingService()), (RtmpRespondingService.RTMP_SAS, RtmpRespondingService()), (None, RtmpSendingService()), (ZipRespondingService.ZIP_SAS, ZipRespondingService()), (None, ZipSendingService()), ) self.zone_information_table = ZoneInformationTable(self) self._services_by_sas = {} for sas, service in self._services: if sas is not None: self._services_by_sas[sas] = service
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = ( (EchoService.ECHO_SAS, EchoService()), (NameInformationService.NBP_SAS, NameInformationService()), (None, RoutingTableAgingService()), (RtmpRespondingService.RTMP_SAS, RtmpRespondingService()), (None, RtmpSendingService()), (ZipRespondingService.ZIP_SAS, ZipRespondingService()), (None, ZipSendingService()), ) self.zone_information_table = ZoneInformationTable(self) self._services_by_sas = {} for sas, service in self._services: if sas is not None: self._services_by_sas[sas] = service
self.routing_table = RoutingTable(self)
0
2023-12-02 15:17:07+00:00
16k
andryyy/ehlocomputer
models/listeners.py
[ { "identifier": "defaults", "path": "config/defaults.py", "snippet": "ACCEPT_LANGUAGES = [\"en\", \"de\"]\nMAX_HISTORIC_REVISIONS = 5\nWEBAUTHN_CHALLENGE_TIMEOUT = 30 # seconds\nPROXY_AUTH_TIMEOUT = 300 # seconds\nTABLE_PAGE_SIZE = 10\nTINYDB = {\n \"storage\": RedisLockMiddleware(JSONStorage),\n \"sort_keys\": True,\n \"indent\": 2,\n}\nPODMAN_BINARY = \"/usr/bin/podman\"\nTRUSTED_PROXIES = [\"127.0.0.1\", \"::1\"]" }, { "identifier": "lego", "path": "config/lego.py", "snippet": "DNS_PROVIDERS = {\n \"allinkl\": [\n {\"ALL_INKL_LOGIN\": \"KAS login\"},\n {\"ALL_INKL_PASSWORD\": \"KAS password\"},\n {\"ALL_INKL_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ALL_INKL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ALL_INKL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"arvancloud\": [\n {\"ARVANCLOUD_API_KEY\": \"API key\"},\n {\"ARVANCLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ARVANCLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ARVANCLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ARVANCLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"autodns\": [\n {\"AUTODNS_API_PASSWORD\": \"User Password\"},\n {\"AUTODNS_API_USER\": \"Username\"},\n {\"AUTODNS_CONTEXT\": \"API context (4 for production, 1 for testing. Defaults to 4)\"},\n {\"AUTODNS_HTTP_TIMEOUT\": \"API request timeout, defaults to 30 seconds\"},\n {\"AUTODNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"AUTODNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"AUTODNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"azure\": [\n {\"AZURE_CLIENT_ID\": \"Client ID\"},\n {\"AZURE_CLIENT_SECRET\": \"Client secret\"},\n {\"AZURE_ENVIRONMENT\": \"Azure environment, one of: public, usgovernment, and china\"},\n {\"AZURE_RESOURCE_GROUP\": \"DNS zone resource group\"},\n {\"AZURE_SUBSCRIPTION_ID\": \"DNS zone subscription ID\"},\n {\"AZURE_TENANT_ID\": \"Tenant ID\"},\n {\"AZURE_METADATA_ENDPOINT\": \"Metadata Service endpoint URL\"},\n {\"AZURE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"AZURE_PRIVATE_ZONE\": \"Set to true to use Azure Private DNS Zones and not public\"},\n {\"AZURE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"AZURE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"AZURE_ZONE_NAME\": \"Zone name to use inside Azure DNS service to add the TXT record in\"},\n ],\n \"bindman\": [\n {\"BINDMAN_MANAGER_ADDRESS\": \"The server URL, should have scheme, hostname, and port (if required) of the Bindman-DNS Manager server\"},\n {\"BINDMAN_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"BINDMAN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BINDMAN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"bluecat\": [\n {\"BLUECAT_CONFIG_NAME\": \"Configuration name\"},\n {\"BLUECAT_DNS_VIEW\": \"External DNS View Name\"},\n {\"BLUECAT_PASSWORD\": \"API password\"},\n {\"BLUECAT_SERVER_URL\": \"The server URL, should have scheme, hostname, and port (if required) of the authoritative Bluecat BAM serve\"},\n {\"BLUECAT_USER_NAME\": \"API username\"},\n {\"BLUECAT_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"BLUECAT_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BLUECAT_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"BLUECAT_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"brandit\": [\n {\"BRANDIT_API_KEY\": \"The API key\"},\n {\"BRANDIT_API_USERNAME\": \"The API username\"},\n {\"BRANDIT_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"BRANDIT_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BRANDIT_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"BRANDIT_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"bunny\": [\n {\"BUNNY_API_KEY\": \"API key\"},\n {\"BUNNY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BUNNY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"BUNNY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"checkdomain\": [\n {\"CHECKDOMAIN_TOKEN\": \"API token\"},\n {\"CHECKDOMAIN_HTTP_TIMEOUT\": \"API request timeout, defaults to 30 seconds\"},\n {\"CHECKDOMAIN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CHECKDOMAIN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CHECKDOMAIN_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"civo\": [\n {\"CIVO_TOKEN\": \"Authentication token\"},\n {\"CIVO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CIVO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CIVO_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"clouddns\": [\n {\"CLOUDDNS_CLIENT_ID\": \"Client ID\"},\n {\"CLOUDDNS_EMAIL\": \"Account email\"},\n {\"CLOUDDNS_PASSWORD\": \"Account password\"},\n {\"CLOUDDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudflare\": [\n {\"CLOUDFLARE_API_KEY\": \"Alias to CF_API_KEY\"},\n {\"CLOUDFLARE_DNS_API_TOKEN\": \"Alias to CF_DNS_API_TOKEN\"},\n {\"CLOUDFLARE_EMAIL\": \"Alias to CF_API_EMAIL\"},\n {\"CLOUDFLARE_ZONE_API_TOKEN\": \"Alias to CF_ZONE_API_TOKEN\"},\n {\"CLOUDFLARE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDFLARE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDFLARE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDFLARE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudns\": [\n {\"CLOUDNS_AUTH_ID\": \"The API user ID\"},\n {\"CLOUDNS_AUTH_PASSWORD\": \"The password for API user ID\"},\n {\"CLOUDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDNS_SUB_AUTH_ID\": \"The API sub user ID\"},\n {\"CLOUDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudru\": [\n {\"CLOUDRU_KEY_ID\": \"Key ID (login)\"},\n {\"CLOUDRU_SECRET\": \"Key Secret\"},\n {\"CLOUDRU_SERVICE_INSTANCE_ID\": \"Service Instance ID (parentId)\"},\n {\"CLOUDRU_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDRU_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDRU_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDRU_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"CLOUDRU_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudxns\": [\n {\"CLOUDXNS_API_KEY\": \"The API key\"},\n {\"CLOUDXNS_SECRET_KEY\": \"The API secret key\"},\n {\"CLOUDXNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDXNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDXNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDXNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"conoha\": [\n {\"CONOHA_API_PASSWORD\": \"The API password\"},\n {\"CONOHA_API_USERNAME\": \"The API username\"},\n {\"CONOHA_TENANT_ID\": \"Tenant ID\"},\n {\"CONOHA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CONOHA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CONOHA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CONOHA_REGION\": \"The region\"},\n {\"CONOHA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"constellix\": [\n {\"CONSTELLIX_API_KEY\": \"User API key\"},\n {\"CONSTELLIX_SECRET_KEY\": \"User secret key\"},\n {\"CONSTELLIX_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CONSTELLIX_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CONSTELLIX_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CONSTELLIX_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"derak\": [\n {\"DERAK_API_KEY\": \"The API key\"},\n {\"DERAK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DERAK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DERAK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DERAK_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"DERAK_WEBSITE_ID\": \"Force the zone/website ID\"},\n ],\n \"desec\": [\n {\"DESEC_TOKEN\": \"Domain token\"},\n {\"DESEC_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DESEC_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DESEC_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DESEC_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"designate\": [\n {\"DESIGNATE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DESIGNATE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DESIGNATE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dnshomede\": [\n {\"DNSHOMEDE_CREDENTIALS\": \"Comma-separated list of domain:password credential pairs\"}\n ],\n \"dnsimple\": [\n {\"DNSIMPLE_OAUTH_TOKEN\": \"OAuth token\"},\n {\"DNSIMPLE_BASE_URL\": \"API endpoint URL\"},\n {\"DNSIMPLE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DNSIMPLE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DNSIMPLE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dnsmadeeasy\": [\n {\"DNSMADEEASY_API_KEY\": \"The API key\"},\n {\"DNSMADEEASY_API_SECRET\": \"The API Secret key\"},\n {\"DNSMADEEASY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DNSMADEEASY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DNSMADEEASY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DNSMADEEASY_SANDBOX\": \"Activate the sandbox (boolean)\"},\n {\"DNSMADEEASY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dnspod\": [\n {\"DNSPOD_API_KEY\": \"The user token\"},\n {\"DNSPOD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DNSPOD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DNSPOD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DNSPOD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dode\": [\n {\"DODE_TOKEN\": \"API token\"},\n {\"DODE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DODE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DODE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DODE_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"DODE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"domeneshop\": [\n {\"DOMENESHOP_API_SECRET\": \"API secret\"},\n {\"DOMENESHOP_API_TOKEN\": \"API token\"},\n {\"DOMENESHOP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DOMENESHOP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DOMENESHOP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"dreamhost\": [\n {\"DREAMHOST_API_KEY\": \"The API key\"},\n {\"DREAMHOST_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DREAMHOST_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DREAMHOST_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DREAMHOST_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"duckdns\": [\n {\"DUCKDNS_TOKEN\": \"Account token\"},\n {\"DUCKDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DUCKDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DUCKDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DUCKDNS_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"DUCKDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dyn\": [\n {\"DYN_CUSTOMER_NAME\": \"Customer name\"},\n {\"DYN_PASSWORD\": \"Password\"},\n {\"DYN_USER_NAME\": \"User name\"},\n {\"DYN_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DYN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DYN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DYN_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dynu\": [\n {\"DYNU_API_KEY\": \"API key\"},\n {\"DYNU_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DYNU_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DYNU_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DYNU_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"easydns\": [\n {\"EASYDNS_KEY\": \"API Key\"},\n {\"EASYDNS_TOKEN\": \"API Token\"},\n {\"EASYDNS_ENDPOINT\": \"The endpoint URL of the API Server\"},\n {\"EASYDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EASYDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EASYDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EASYDNS_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"EASYDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"efficientip\": [\n {\"EFFICIENTIP_DNS_NAME\": \"DNS name (ex: dns.smart)\"},\n {\"EFFICIENTIP_HOSTNAME\": \"Hostname (ex: foo.example.com)\"},\n {\"EFFICIENTIP_PASSWORD\": \"Password\"},\n {\"EFFICIENTIP_USERNAME\": \"Username\"},\n {\"EFFICIENTIP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EFFICIENTIP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EFFICIENTIP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EFFICIENTIP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"EFFICIENTIP_VIEW_NAME\": \"View name (ex: external)\"},\n ],\n \"epik\": [\n {\"EPIK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EPIK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EPIK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EPIK_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"exoscale\": [\n {\"EXOSCALE_API_KEY\": \"API key\"},\n {\"EXOSCALE_API_SECRET\": \"API secret\"},\n {\"EXOSCALE_API_ZONE\": \"API zone\"},\n {\"EXOSCALE_ENDPOINT\": \"API endpoint URL\"},\n {\"EXOSCALE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EXOSCALE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EXOSCALE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EXOSCALE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"freemyip\": [\n {\"FREEMYIP_TOKEN\": \"Account token\"},\n {\"FREEMYIP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"FREEMYIP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"FREEMYIP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"FREEMYIP_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"FREEMYIP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"gandi\": [\n {\"GANDI_API_KEY\": \"API key\"},\n {\"GANDI_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GANDI_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GANDI_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GANDI_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"gandiv5\": [\n {\"GANDIV5_API_KEY\": \"API key\"},\n {\"GANDIV5_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GANDIV5_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GANDIV5_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GANDIV5_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"gcore\": [\n {\"GCORE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GCORE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GCORE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GCORE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"glesys\": [\n {\"GLESYS_API_KEY\": \"API key\"},\n {\"GLESYS_API_USER\": \"API user\"},\n {\"GLESYS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GLESYS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GLESYS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GLESYS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"godaddy\": [\n {\"GODADDY_API_KEY\": \"API key\"},\n {\"GODADDY_API_SECRET\": \"API secret\"},\n {\"GODADDY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GODADDY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GODADDY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GODADDY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"hetzner\": [\n {\"HETZNER_API_KEY\": \"API key\"},\n {\"HETZNER_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HETZNER_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HETZNER_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HETZNER_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"hostingde\": [\n {\"HOSTINGDE_API_KEY\": \"API key\"},\n {\"HOSTINGDE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HOSTINGDE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HOSTINGDE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HOSTINGDE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"HOSTINGDE_ZONE_NAME\": \"Zone name in ACE format\"},\n ],\n \"hosttech\": [\n {\"HOSTTECH_API_KEY\": \"API login\"},\n {\"HOSTTECH_PASSWORD\": \"API password\"},\n {\"HOSTTECH_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HOSTTECH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HOSTTECH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HOSTTECH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"httpreq\": [\n {\"HTTPREQ_ENDPOINT\": \"The URL of the server\"},\n {\"HTTPREQ_MODE\": \"'RAW', none\"},\n {\"HTTPREQ_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HTTPREQ_PASSWORD\": \"Basic authentication password\"},\n {\"HTTPREQ_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HTTPREQ_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HTTPREQ_USERNAME\": \"Basic authentication username\"},\n ],\n \"hurricane\": [\n {\"HURRICANE_TOKENS\": \"TXT record names and tokens\"}\n ],\n \"hyperone\": [\n {\"HYPERONE_LOCATION_ID\": \"Specifies location (region) to be used in API calls. (default pl-waw-1)\"},\n {\"HYPERONE_PASSPORT_LOCATION\": \"Allows to pass custom passport file location (default ~/.h1/passport.json)\"},\n {\"HYPERONE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HYPERONE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HYPERONE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"iij\": [\n {\"IIJ_API_ACCESS_KEY\": \"API access key\"},\n {\"IIJ_API_SECRET_KEY\": \"API secret key\"},\n {\"IIJ_DO_SERVICE_CODE\": \"DO service code\"},\n {\"IIJ_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IIJ_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IIJ_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"IIJ_DPF_API_TOKEN\": \"API token\"},\n {\"IIJ_DPF_DPM_SERVICE_CODE\": \"IIJ Managed DNS Service's service code\"},\n {\"IIJ_DPF_POLLING_INTERVAL\": \"Time between DNS propagation check, defaults to 5 second\"},\n {\"IIJ_DPF_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation, defaults to 660 second\"},\n {\"IIJ_DPF_TTL\": \"The TTL of the TXT record used for the DNS challenge, default to 300\"},\n ],\n \"infoblox\": [\n {\"INFOBLOX_HOST\": \"Host URI\"},\n {\"INFOBLOX_PASSWORD\": \"Account Password\"},\n {\"INFOBLOX_USERNAME\": \"Account Username\"},\n {\"INFOBLOX_DNS_VIEW\": \"The view for the TXT records, default: External\"},\n {\"INFOBLOX_HTTP_TIMEOUT\": \"HTTP request timeout\"},\n {\"INFOBLOX_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"INFOBLOX_PORT\": \"The port for the infoblox grid manager, default: 443\"},\n {\"INFOBLOX_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"INFOBLOX_SSL_VERIFY\": \"Whether or not to verify the TLS certificate, default: true\"},\n {\"INFOBLOX_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"INFOBLOX_WAPI_VERSION\": \"The version of WAPI being used, default: 2.11\"},\n ],\n \"infomaniak\": [\n {\"INFOMANIAK_ACCESS_TOKEN\": \"Access token\"},\n {\"INFOMANIAK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"INFOMANIAK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"INFOMANIAK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"INFOMANIAK_TTL\": \"The TTL of the TXT record used for the DNS challenge in seconds\"},\n ],\n \"inwx\": [\n {\"INWX_PASSWORD\": \"Password\"},\n {\"INWX_USERNAME\": \"Username\"},\n {\"INWX_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"INWX_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation (default 360s)\"},\n {\"INWX_SANDBOX\": \"Activate the sandbox (boolean)\"},\n {\"INWX_SHARED_SECRET\": \"shared secret related to 2FA\"},\n {\"INWX_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ionos\": [\n {\"IONOS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"IONOS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IONOS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IONOS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ipv64\": [\n {\"IPV64_API_KEY\": \"Account API Key\"},\n {\"IPV64_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"IPV64_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IPV64_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IPV64_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"IPV64_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"iwantmyname\": [\n {\"IWANTMYNAME_PASSWORD\": \"API password\"},\n {\"IWANTMYNAME_USERNAME\": \"API username\"},\n {\"IWANTMYNAME_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"IWANTMYNAME_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IWANTMYNAME_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IWANTMYNAME_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"joker\": [\n {\"JOKER_API_KEY\": \"API key (only with DMAPI mode)\"},\n {\"JOKER_API_MODE\": \"'DMAPI' or 'SVC'. DMAPI is for resellers accounts. (Default: DMAPI)\"},\n {\"JOKER_PASSWORD\": \"Joker.com password\"},\n {\"JOKER_USERNAME\": \"Joker.com username\"},\n {\"JOKER_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"JOKER_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"JOKER_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"JOKER_SEQUENCE_INTERVAL\": \"Time between sequential requests (only with 'SVC' mode)\"},\n {\"JOKER_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"liara\": [\n {\"LIARA_API_KEY\": \"The API key\"},\n {\"LIARA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LIARA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LIARA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LIARA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"lightsail\": [\n {\"LIGHTSAIL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LIGHTSAIL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"linode\": [\n {\"LINODE_TOKEN\": \"API token\"},\n {\"LINODE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LINODE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LINODE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LINODE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"loopia\": [\n {\"LOOPIA_API_PASSWORD\": \"API password\"},\n {\"LOOPIA_API_USER\": \"API username\"},\n {\"LOOPIA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LOOPIA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LOOPIA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LOOPIA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"luadns\": [\n {\"LUADNS_API_TOKEN\": \"API token\"},\n {\"LUADNS_API_USERNAME\": \"Username (your email)\"},\n {\"LUADNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LUADNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LUADNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LUADNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"metaname\": [\n {\"METANAME_ACCOUNT_REFERENCE\": \"The four-digit reference of a Metaname account\"},\n {\"METANAME_API_KEY\": \"API Key\"},\n {\"METANAME_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"METANAME_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"METANAME_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"mydnsjp\": [\n {\"MYDNSJP_MASTER_ID\": \"Master ID\"},\n {\"MYDNSJP_PASSWORD\": \"Password\"},\n {\"MYDNSJP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"MYDNSJP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"MYDNSJP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"MYDNSJP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"mythicbeasts\": [\n {\"MYTHICBEASTS_PASSWORD\": \"Password\"},\n {\"MYTHICBEASTS_USERNAME\": \"User name\"},\n {\"MYTHICBEASTS_API_ENDPOINT\": \"The endpoint for the API (must implement v2)\"},\n {\"MYTHICBEASTS_AUTH_API_ENDPOINT\": \"The endpoint for Mythic Beasts' Authentication\"},\n {\"MYTHICBEASTS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"MYTHICBEASTS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"MYTHICBEASTS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"MYTHICBEASTS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"namecheap\": [\n {\"NAMECHEAP_API_KEY\": \"API key\"},\n {\"NAMECHEAP_API_USER\": \"API user\"},\n {\"NAMECHEAP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NAMECHEAP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NAMECHEAP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NAMECHEAP_SANDBOX\": \"Activate the sandbox (boolean)\"},\n {\"NAMECHEAP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"namesilo\": [\n {\"NAMESILO_API_KEY\": \"Client ID\"},\n {\"NAMESILO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NAMESILO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation, it is better to set larger than 15m\"},\n {\"NAMESILO_TTL\": \"The TTL of the TXT record used for the DNS challenge, should be in [3600, 2592000]\"},\n ],\n \"nearlyfreespeech\": [\n {\"NEARLYFREESPEECH_API_KEY\": \"API Key for API requests\"},\n {\"NEARLYFREESPEECH_LOGIN\": \"Username for API requests\"},\n {\"NEARLYFREESPEECH_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NEARLYFREESPEECH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NEARLYFREESPEECH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NEARLYFREESPEECH_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"NEARLYFREESPEECH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"netcup\": [\n {\"NETCUP_API_KEY\": \"API key\"},\n {\"NETCUP_API_PASSWORD\": \"API password\"},\n {\"NETCUP_CUSTOMER_NUMBER\": \"Customer number\"},\n {\"NETCUP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NETCUP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NETCUP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NETCUP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"netlify\": [\n {\"NETLIFY_TOKEN\": \"Token\"},\n {\"NETLIFY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NETLIFY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NETLIFY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NETLIFY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"nicmanager\": [\n {\"NICMANAGER_API_EMAIL\": \"Email-based login\"},\n {\"NICMANAGER_API_LOGIN\": \"Login, used for Username-based login\"},\n {\"NICMANAGER_API_PASSWORD\": \"Password, always required\"},\n {\"NICMANAGER_API_USERNAME\": \"Username, used for Username-based login\"},\n {\"NICMANAGER_API_MODE\": \"mode: 'anycast' or 'zone' (default: 'anycast')\"},\n {\"NICMANAGER_API_OTP\": \"TOTP Secret (optional)\"},\n {\"NICMANAGER_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NICMANAGER_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NICMANAGER_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NICMANAGER_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"nifcloud\": [\n {\"NIFCLOUD_ACCESS_KEY_ID\": \"Access key\"},\n {\"NIFCLOUD_SECRET_ACCESS_KEY\": \"Secret access key\"},\n {\"NIFCLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NIFCLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NIFCLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NIFCLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"njalla\": [\n {\"NJALLA_TOKEN\": \"API token\"},\n {\"NJALLA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NJALLA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NJALLA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NJALLA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"nodion\": [\n {\"NODION_API_TOKEN\": \"The API token\"},\n {\"NODION_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NODION_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NODION_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NODION_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ns1\": [\n {\"NS1_API_KEY\": \"API key\"},\n {\"NS1_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NS1_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NS1_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NS1_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"otc\": [\n {\"OTC_DOMAIN_NAME\": \"Domain name\"},\n {\"OTC_IDENTITY_ENDPOINT\": \"Identity endpoint URL\"},\n {\"OTC_PASSWORD\": \"Password\"},\n {\"OTC_PROJECT_NAME\": \"Project name\"},\n {\"OTC_USER_NAME\": \"User name\"},\n {\"OTC_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"OTC_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"OTC_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"OTC_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ovh\": [\n {\"OVH_APPLICATION_KEY\": \"Application key\"},\n {\"OVH_APPLICATION_SECRET\": \"Application secret\"},\n {\"OVH_CONSUMER_KEY\": \"Consumer key\"},\n {\"OVH_ENDPOINT\": \"Endpoint URL (ovh-eu or ovh-ca)\"},\n {\"OVH_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"OVH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"OVH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"OVH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"pdns\": [\n {\"PDNS_API_KEY\": \"API key\"},\n {\"PDNS_API_URL\": \"API URL\"},\n {\"PDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"PDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"PDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"PDNS_SERVER_NAME\": \"Name of the server in the URL, 'localhost' by default\"},\n {\"PDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"plesk\": [\n {\"PLESK_PASSWORD\": \"API password\"},\n {\"PLESK_USERNAME\": \"API username\"},\n {\"PLESK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"PLESK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"PLESK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"PLESK_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"porkbun\": [\n {\"PORKBUN_API_KEY\": \"API key\"},\n {\"PORKBUN_SECRET_API_KEY\": \"secret API key\"},\n {\"PORKBUN_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"PORKBUN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"PORKBUN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"PORKBUN_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rackspace\": [\n {\"RACKSPACE_API_KEY\": \"API key\"},\n {\"RACKSPACE_USER\": \"API user\"},\n {\"RACKSPACE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"RACKSPACE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RACKSPACE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RACKSPACE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rcodezero\": [\n {\"RCODEZERO_API_TOKEN\": \"API token\"},\n {\"RCODEZERO_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"RCODEZERO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RCODEZERO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RCODEZERO_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"regru\": [\n {\"REGRU_PASSWORD\": \"API password\"},\n {\"REGRU_USERNAME\": \"API username\"},\n {\"REGRU_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"REGRU_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"REGRU_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"REGRU_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rfc2136\": [\n {\"RFC2136_NAMESERVER\": \"Network address in the form 'host' or 'host:port'\"},\n {\"RFC2136_TSIG_KEY\": \"Name of the secret key as defined in DNS server configuration. To disable TSIG authentication, leave the 'RFC2136_TSIG*' variables unset.\"},\n {\"RFC2136_TSIG_SECRET\": \"Secret key payload. To disable TSIG authentication, leave the' RFC2136_TSIG*' variables unset.\"},\n {\"RFC2136_DNS_TIMEOUT\": \"API request timeout\"},\n {\"RFC2136_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RFC2136_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RFC2136_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"RFC2136_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rimuhosting\": [\n {\"RIMUHOSTING_API_KEY\": \"User API key\"},\n {\"RIMUHOSTING_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"RIMUHOSTING_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RIMUHOSTING_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RIMUHOSTING_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"safedns\": [\n {\"SAFEDNS_AUTH_TOKEN\": \"Authentication token\"},\n {\"SAFEDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SAFEDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SAFEDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SAFEDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"sakuracloud\": [\n {\"SAKURACLOUD_ACCESS_TOKEN\": \"Access token\"},\n {\"SAKURACLOUD_ACCESS_TOKEN_SECRET\": \"Access token secret\"},\n {\"SAKURACLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SAKURACLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SAKURACLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SAKURACLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"scaleway\": [\n {\"SCALEWAY_API_TOKEN\": \"API token\"},\n {\"SCALEWAY_PROJECT_ID\": \"Project to use (optional)\"},\n {\"SCALEWAY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SCALEWAY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SCALEWAY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"selectel\": [\n {\"SELECTEL_API_TOKEN\": \"API token\"},\n {\"SELECTEL_BASE_URL\": \"API endpoint URL\"},\n {\"SELECTEL_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SELECTEL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SELECTEL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SELECTEL_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"servercow\": [\n {\"SERVERCOW_PASSWORD\": \"API password\"},\n {\"SERVERCOW_USERNAME\": \"API username\"},\n {\"SERVERCOW_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SERVERCOW_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SERVERCOW_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SERVERCOW_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"simply\": [\n {\"SIMPLY_ACCOUNT_NAME\": \"Account name\"},\n {\"SIMPLY_API_KEY\": \"API key\"},\n {\"SIMPLY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SIMPLY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SIMPLY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SIMPLY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"sonic\": [\n {\"SONIC_API_KEY\": \"API Key\"},\n {\"SONIC_USER_ID\": \"User ID\"},\n {\"SONIC_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SONIC_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SONIC_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SONIC_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"SONIC_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"stackpath\": [\n {\"STACKPATH_CLIENT_ID\": \"Client ID\"},\n {\"STACKPATH_CLIENT_SECRET\": \"Client secret\"},\n {\"STACKPATH_STACK_ID\": \"Stack ID\"},\n {\"STACKPATH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"STACKPATH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"STACKPATH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"tencentcloud\": [\n {\"TENCENTCLOUD_SECRET_ID\": \"Access key ID\"},\n {\"TENCENTCLOUD_SECRET_KEY\": \"Access Key secret\"},\n {\"TENCENTCLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"TENCENTCLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"TENCENTCLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"TENCENTCLOUD_REGION\": \"Region\"},\n {\"TENCENTCLOUD_SESSION_TOKEN\": \"Access Key token\"},\n {\"TENCENTCLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"transip\": [\n {\"TRANSIP_ACCOUNT_NAME\": \"Account name\"},\n {\"TRANSIP_PRIVATE_KEY_PATH\": \"Private key path\"},\n {\"TRANSIP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"TRANSIP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"TRANSIP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ultradns\": [\n {\"ULTRADNS_PASSWORD\": \"API Password\"},\n {\"ULTRADNS_USERNAME\": \"API Username\"},\n {\"ULTRADNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ULTRADNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ULTRADNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vegadns\": [\n {\"VEGADNS_URL\": \"API endpoint URL\"},\n {\"VEGADNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VEGADNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VEGADNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vercel\": [\n {\"VERCEL_API_TOKEN\": \"Authentication token\"},\n {\"VERCEL_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VERCEL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VERCEL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VERCEL_TEAM_ID\": \"Team ID (ex: team_xxxxxxxxxxxxxxxxxxxxxxxx)\"},\n {\"VERCEL_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"versio\": [\n {\"VERSIO_PASSWORD\": \"Basic authentication password\"},\n {\"VERSIO_USERNAME\": \"Basic authentication username\"},\n {\"VERSIO_ENDPOINT\": \"The endpoint URL of the API Server\"},\n {\"VERSIO_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VERSIO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VERSIO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VERSIO_SEQUENCE_INTERVAL\": \"Time between sequential requests, default 60s\"},\n {\"VERSIO_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vinyldns\": [\n {\"VINYLDNS_ACCESS_KEY\": \"The VinylDNS API key\"},\n {\"VINYLDNS_HOST\": \"The VinylDNS API URL\"},\n {\"VINYLDNS_SECRET_KEY\": \"The VinylDNS API Secret key\"},\n {\"VINYLDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VINYLDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VINYLDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vscale\": [\n {\"VSCALE_API_TOKEN\": \"API token\"},\n {\"VSCALE_BASE_URL\": \"API endpoint URL\"},\n {\"VSCALE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VSCALE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VSCALE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VSCALE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vultr\": [\n {\"VULTR_API_KEY\": \"API key\"},\n {\"VULTR_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VULTR_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VULTR_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VULTR_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"websupport\": [\n {\"WEBSUPPORT_API_KEY\": \"API key\"},\n {\"WEBSUPPORT_SECRET\": \"API secret\"},\n {\"WEBSUPPORT_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"WEBSUPPORT_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"WEBSUPPORT_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"WEBSUPPORT_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"WEBSUPPORT_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"wedos\": [\n {\"WEDOS_USERNAME\": \"Username is the same as for the admin account\"},\n {\"WEDOS_WAPI_PASSWORD\": \"Password needs to be generated and IP allowed in the admin interface\"},\n {\"WEDOS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"WEDOS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"WEDOS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"WEDOS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"zoneee\": [\n {\"ZONEEE_API_KEY\": \"API key\"},\n {\"ZONEEE_API_USER\": \"API user\"},\n {\"ZONEEE_ENDPOINT\": \"API endpoint URL\"},\n {\"ZONEEE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ZONEEE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ZONEEE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ZONEEE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"zonomi\": [\n {\"ZONOMI_API_KEY\": \"User API key\"},\n {\"ZONOMI_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ZONOMI_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ZONOMI_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ZONOMI_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n}" }, { "identifier": "utc_now_as_str", "path": "utils/helpers.py", "snippet": "def utc_now_as_str():\n return datetime.now(timezone.utc).strftime(\"%Y-%m-%dT%H:%M:%S%z\")" }, { "identifier": "ensure_list", "path": "utils/helpers.py", "snippet": "@validate_call\ndef ensure_list(s: str | list[str] | None) -> list:\n if s:\n if isinstance(s, str):\n return [s]\n if isinstance(s, list):\n return s\n return []" }, { "identifier": "to_unique_sorted_str_list", "path": "utils/helpers.py", "snippet": "@validate_call\ndef to_unique_sorted_str_list(l: list[str]) -> list:\n _l = [x for x in set(l) if x != \"\"]\n return sorted(_l, key=lambda x: str(x))" }, { "identifier": "get_validated_fqdn", "path": "utils/helpers.py", "snippet": "@validate_call\ndef get_validated_fqdn(hostname: str) -> str:\n regex = re.compile(\n r\"^((?![-])[-A-Z\\d]{1,63}(?<!-)[.])*(?!-)[-A-Z\\d]{1,63}(?<!-)?$\", re.IGNORECASE\n )\n if len(hostname) > 253:\n raise ValueError(f\"{hostname} is too long\")\n if regex.match(hostname):\n return hostname\n else:\n raise ValueError(f\"{hostname} is not a valid FQDN\")" }, { "identifier": "flatten", "path": "utils/helpers.py", "snippet": "@validate_call\ndef flatten(l: list[list]):\n return [i for sub_list in l for i in sub_list]" } ]
import json import os import re import uuid from config import defaults from config import lego from config.database import * from email_validator import validate_email from pydantic import ( AfterValidator, BaseModel, EmailStr, Field, FilePath, HttpUrl, field_validator, model_validator, validator, ) from pydantic.networks import IPv4Address, IPv6Address from typing import Annotated, Any, Literal from . import ( utc_now_as_str, ensure_list, to_unique_sorted_str_list, get_validated_fqdn, flatten, )
12,793
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = [] created: Annotated[str, Field(default_factory=utc_now_as_str)] updated: Annotated[str, Field(default_factory=utc_now_as_str)] class ListenerLegoConfig(BaseModel): lego_provider: str acme_terms_agreed: Literal[True, "true"] provider_config: dict acme_server: Annotated[str, AfterValidator(lambda x: str(HttpUrl(x)))] acme_email: EmailStr key_type: Literal["EC256", "EC384", "RSA2048", "RSA4096", "RSA8192"] = "RSA2048" domains: str @model_validator(mode="before") @classmethod def check_lego(self, data: Any) -> Any: if data.get("lego_provider") not in lego.LEGO_DNS_PROVIDERS.keys(): raise ValueError( f"Value {data.get('lego_provider')} is not a lego provider" ) _envs_available = flatten( [ list(p.keys()) for p in lego.LEGO_DNS_PROVIDERS.get(data.get("lego_provider"), []) ] ) for _k, _v in data.get("provider_config").items(): if _k not in _envs_available: raise ValueError( f"{_k} is not a valid environment variable for the given lego DNS client" ) if not isinstance(_v, str): raise ValueError(f"Value of {_k} is not a string") return data class ListenerServerListener(BaseModel): hostname: Annotated[ str,
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = [] created: Annotated[str, Field(default_factory=utc_now_as_str)] updated: Annotated[str, Field(default_factory=utc_now_as_str)] class ListenerLegoConfig(BaseModel): lego_provider: str acme_terms_agreed: Literal[True, "true"] provider_config: dict acme_server: Annotated[str, AfterValidator(lambda x: str(HttpUrl(x)))] acme_email: EmailStr key_type: Literal["EC256", "EC384", "RSA2048", "RSA4096", "RSA8192"] = "RSA2048" domains: str @model_validator(mode="before") @classmethod def check_lego(self, data: Any) -> Any: if data.get("lego_provider") not in lego.LEGO_DNS_PROVIDERS.keys(): raise ValueError( f"Value {data.get('lego_provider')} is not a lego provider" ) _envs_available = flatten( [ list(p.keys()) for p in lego.LEGO_DNS_PROVIDERS.get(data.get("lego_provider"), []) ] ) for _k, _v in data.get("provider_config").items(): if _k not in _envs_available: raise ValueError( f"{_k} is not a valid environment variable for the given lego DNS client" ) if not isinstance(_v, str): raise ValueError(f"Value of {_k} is not a string") return data class ListenerServerListener(BaseModel): hostname: Annotated[ str,
AfterValidator(lambda x: get_validated_fqdn(x)),
5
2023-12-01 08:36:45+00:00
16k
fzmi/ubdd
models/dino/models/dino/dino.py
[ { "identifier": "box_ops", "path": "models/dino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "NestedTensor", "path": "models/dino/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == 'auto':\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\"tensors dim must be 3 or 4 but {}({})\".format(self.tensors.dim(), self.tensors.shape))\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\n 'tensors.shape': self.tensors.shape,\n 'mask.shape': self.mask.shape\n }" }, { "identifier": "nested_tensor_from_tensor_list", "path": "models/dino/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)" }, { "identifier": "accuracy", "path": "models/dino/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "models/dino/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "models/dino/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "is_dist_avail_and_initialized", "path": "models/dino/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "inverse_sigmoid", "path": "models/dino/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)" }, { "identifier": "build_backbone", "path": "models/dino/models/dino/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone: \n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords: \n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]\n backbone_freeze_keywords = args.backbone_freeze_keywords\n use_checkpoint = getattr(args, 'use_checkpoint', False)\n\n if args.backbone in ['resnet50', 'resnet101']:\n backbone = Backbone(args.backbone, train_backbone, args.dilation, \n return_interm_indices, \n batch_norm=FrozenBatchNorm2d)\n bb_num_channels = backbone.num_channels\n elif args.backbone in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']:\n pretrain_img_size = int(args.backbone.split('_')[-2])\n backbone = build_swin_transformer(args.backbone, \\\n pretrain_img_size=pretrain_img_size, \\\n out_indices=tuple(return_interm_indices), \\\n dilation=args.dilation, use_checkpoint=use_checkpoint)\n\n # freeze some layers\n if backbone_freeze_keywords is not None:\n for name, parameter in backbone.named_parameters():\n for keyword in backbone_freeze_keywords:\n if keyword in name:\n parameter.requires_grad_(False)\n break\n if \"backbone_dir\" in args:\n pretrained_dir = args.backbone_dir\n PTDICT = {\n 'swin_T_224_1k': 'swin_tiny_patch4_window7_224.pth',\n 'swin_B_384_22k': 'swin_base_patch4_window12_384.pth',\n 'swin_L_384_22k': 'swin_large_patch4_window12_384_22k.pth',\n }\n pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])\n checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']\n from collections import OrderedDict\n def key_select_function(keyname):\n if 'head' in keyname:\n return False\n if args.dilation and 'layers.3' in keyname:\n return False\n return True\n _tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})\n _tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)\n print(str(_tmp_st_output))\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]\n elif args.backbone in ['convnext_xlarge_22k']:\n backbone = build_convnext(modelname=args.backbone, pretrained=True, out_indices=tuple(return_interm_indices),backbone_dir=args.backbone_dir)\n bb_num_channels = backbone.dims[4 - len(return_interm_indices):]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n \n\n assert len(bb_num_channels) == len(return_interm_indices), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels \n assert isinstance(bb_num_channels, List), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n return model" }, { "identifier": "build_matcher", "path": "models/dino/models/dino/matcher.py", "snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))" }, { "identifier": "DETRsegm", "path": "models/dino/models/dino/segmentation.py", "snippet": "class DETRsegm(nn.Module):\n def __init__(self, detr, freeze_detr=False):\n super().__init__()\n self.detr = detr\n\n if freeze_detr:\n for p in self.parameters():\n p.requires_grad_(False)\n\n hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead\n self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)\n self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)\n\n def forward(self, samples: NestedTensor):\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.detr.backbone(samples)\n\n bs = features[-1].tensors.shape[0]\n\n src, mask = features[-1].decompose()\n assert mask is not None\n src_proj = self.detr.input_proj(src)\n hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])\n\n outputs_class = self.detr.class_embed(hs)\n outputs_coord = self.detr.bbox_embed(hs).sigmoid()\n out = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.detr.aux_loss:\n out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)\n\n # FIXME h_boxes takes the last one computed, keep this in mind\n bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)\n\n seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])\n outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])\n\n out[\"pred_masks\"] = outputs_seg_masks\n return out" }, { "identifier": "PostProcessPanoptic", "path": "models/dino/models/dino/segmentation.py", "snippet": "class PostProcessPanoptic(nn.Module):\n \"\"\"This class converts the output of the model to the final panoptic result, in the format expected by the\n coco panoptic API \"\"\"\n\n def __init__(self, is_thing_map, threshold=0.85):\n \"\"\"\n Parameters:\n is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether\n the class is a thing (True) or a stuff (False) class\n threshold: confidence threshold: segments with confidence lower than this will be deleted\n \"\"\"\n super().__init__()\n self.threshold = threshold\n self.is_thing_map = is_thing_map\n\n def forward(self, outputs, processed_sizes, target_sizes=None):\n \"\"\" This function computes the panoptic prediction from the model's predictions.\n Parameters:\n outputs: This is a dict coming directly from the model. See the model doc for the content.\n processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the\n model, ie the size after data augmentation but before batching.\n target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size\n of each prediction. If left to None, it will default to the processed_sizes\n \"\"\"\n if target_sizes is None:\n target_sizes = processed_sizes\n assert len(processed_sizes) == len(target_sizes)\n out_logits, raw_masks, raw_boxes = outputs[\"pred_logits\"], outputs[\"pred_masks\"], outputs[\"pred_boxes\"]\n assert len(out_logits) == len(raw_masks) == len(target_sizes)\n preds = []\n\n def to_tuple(tup):\n if isinstance(tup, tuple):\n return tup\n return tuple(tup.cpu().tolist())\n\n for cur_logits, cur_masks, cur_boxes, size, target_size in zip(\n out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes\n ):\n # we filter empty queries and detection below threshold\n scores, labels = cur_logits.softmax(-1).max(-1)\n keep = labels.ne(outputs[\"pred_logits\"].shape[-1] - 1) & (scores > self.threshold)\n cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n cur_scores = cur_scores[keep]\n cur_classes = cur_classes[keep]\n cur_masks = cur_masks[keep]\n cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode=\"bilinear\").squeeze(1)\n cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n\n h, w = cur_masks.shape[-2:]\n assert len(cur_boxes) == len(cur_classes)\n\n # It may be that we have several predicted masks for the same stuff class.\n # In the following, we track the list of masks ids for each stuff class (they are merged later on)\n cur_masks = cur_masks.flatten(1)\n stuff_equiv_classes = defaultdict(lambda: [])\n for k, label in enumerate(cur_classes):\n if not self.is_thing_map[label.item()]:\n stuff_equiv_classes[label.item()].append(k)\n\n def get_ids_area(masks, scores, dedup=False):\n # This helper function creates the final panoptic segmentation image\n # It also returns the area of the masks that appears on the image\n\n m_id = masks.transpose(0, 1).softmax(-1)\n\n if m_id.shape[-1] == 0:\n # We didn't detect any mask :(\n m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n else:\n m_id = m_id.argmax(-1).view(h, w)\n\n if dedup:\n # Merge the masks corresponding to the same stuff class\n for equiv in stuff_equiv_classes.values():\n if len(equiv) > 1:\n for eq_id in equiv:\n m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n\n final_h, final_w = to_tuple(target_size)\n\n seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)\n\n np_seg_img = (\n torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()\n )\n m_id = torch.from_numpy(rgb2id(np_seg_img))\n\n area = []\n for i in range(len(scores)):\n area.append(m_id.eq(i).sum().item())\n return area, seg_img\n\n area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n if cur_classes.numel() > 0:\n # We know filter empty masks as long as we find some\n while True:\n filtered_small = torch.as_tensor(\n [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device\n )\n if filtered_small.any().item():\n cur_scores = cur_scores[~filtered_small]\n cur_classes = cur_classes[~filtered_small]\n cur_masks = cur_masks[~filtered_small]\n area, seg_img = get_ids_area(cur_masks, cur_scores)\n else:\n break\n\n else:\n cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n\n segments_info = []\n for i, a in enumerate(area):\n cat = cur_classes[i].item()\n segments_info.append({\"id\": i, \"isthing\": self.is_thing_map[cat], \"category_id\": cat, \"area\": a})\n del cur_classes\n\n with io.BytesIO() as out:\n seg_img.save(out, format=\"PNG\")\n predictions = {\"png_string\": out.getvalue(), \"segments_info\": segments_info}\n preds.append(predictions)\n return preds" }, { "identifier": "PostProcessSegm", "path": "models/dino/models/dino/segmentation.py", "snippet": "class PostProcessSegm(nn.Module):\n def __init__(self, threshold=0.5):\n super().__init__()\n self.threshold = threshold\n\n @torch.no_grad()\n def forward(self, results, outputs, orig_target_sizes, max_target_sizes):\n assert len(orig_target_sizes) == len(max_target_sizes)\n max_h, max_w = max_target_sizes.max(0)[0].tolist()\n outputs_masks = outputs[\"pred_masks\"].squeeze(2)\n outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode=\"bilinear\", align_corners=False)\n outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()\n\n for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):\n img_h, img_w = t[0], t[1]\n results[i][\"masks\"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)\n results[i][\"masks\"] = F.interpolate(\n results[i][\"masks\"].float(), size=tuple(tt.tolist()), mode=\"nearest\"\n ).byte()\n\n return results" }, { "identifier": "dice_loss", "path": "models/dino/models/dino/segmentation.py", "snippet": "def dice_loss(inputs, targets, num_boxes):\n \"\"\"\n Compute the DICE loss, similar to generalized IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_boxes" }, { "identifier": "build_deformable_transformer", "path": "models/dino/models/dino/deformable_transformer.py", "snippet": "def build_deformable_transformer(args):\n decoder_query_perturber = None\n if args.decoder_layer_noise:\n from .utils import RandomBoxPerturber\n decoder_query_perturber=RandomBoxPerturber(\n x_noise_scale=args.dln_xy_noise, y_noise_scale=args.dln_xy_noise, \n w_noise_scale=args.dln_hw_noise, h_noise_scale=args.dln_hw_noise)\n\n use_detached_boxes_dec_out = False\n try:\n use_detached_boxes_dec_out = args.use_detached_boxes_dec_out\n except:\n use_detached_boxes_dec_out =False\n\n return DeformableTransformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_unicoder_layers=args.unic_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n modulate_hw_attn=True,\n\n deformable_encoder=True,\n deformable_decoder=True,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n use_deformable_box_attn=args.use_deformable_box_attn,\n box_attn_type=args.box_attn_type,\n\n learnable_tgt_init=True,\n decoder_query_perturber=decoder_query_perturber,\n\n add_channel_attention=args.add_channel_attention,\n add_pos_value=args.add_pos_value,\n random_refpoints_xy=args.random_refpoints_xy,\n\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n two_stage_pat_embed=args.two_stage_pat_embed,\n two_stage_add_query_num=args.two_stage_add_query_num,\n two_stage_learn_wh=args.two_stage_learn_wh,\n two_stage_keep_all_tokens=args.two_stage_keep_all_tokens,\n dec_layer_number=args.dec_layer_number,\n rm_self_attn_layers=None,\n key_aware_type=None,\n layer_share_type=None,\n\n rm_detach=None,\n decoder_sa_type=args.decoder_sa_type,\n module_seq=args.decoder_module_seq,\n\n embed_init_tgt=args.embed_init_tgt,\n use_detached_boxes_dec_out=use_detached_boxes_dec_out\n )" }, { "identifier": "sigmoid_focal_loss", "path": "models/dino/models/dino/utils.py", "snippet": "def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "MLP", "path": "models/dino/models/dino/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/dino/models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')" }, { "identifier": "prepare_for_cdn", "path": "models/dino/models/dino/dn_components.py", "snippet": "def prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):\n \"\"\"\n A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector\n forward function and use learnable tgt embedding, so we change this function a little bit.\n :param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale\n :param training: if it is training or inference\n :param num_queries: number of queires\n :param num_classes: number of classes\n :param hidden_dim: transformer hidden dim\n :param label_enc: encode labels in dn\n :return:\n \"\"\"\n if training:\n targets, dn_number, label_noise_ratio, box_noise_scale = dn_args\n # positive and negative dn queries\n dn_number = dn_number * 2\n known = [(torch.ones_like(t['labels'])).cuda() for t in targets]\n batch_size = len(known)\n known_num = [sum(k) for k in known]\n if int(max(known_num)) == 0:\n dn_number = 1\n else:\n if dn_number >= 100:\n dn_number = dn_number // (int(max(known_num) * 2))\n elif dn_number < 1:\n dn_number = 1\n if dn_number == 0:\n dn_number = 1\n unmask_bbox = unmask_label = torch.cat(known)\n labels = torch.cat([t['labels'] for t in targets])\n boxes = torch.cat([t['boxes'] for t in targets])\n batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])\n\n known_indice = torch.nonzero(unmask_label + unmask_bbox)\n known_indice = known_indice.view(-1)\n\n known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)\n known_labels = labels.repeat(2 * dn_number, 1).view(-1)\n known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)\n known_bboxs = boxes.repeat(2 * dn_number, 1)\n known_labels_expaned = known_labels.clone()\n known_bbox_expand = known_bboxs.clone()\n\n if label_noise_ratio > 0:\n p = torch.rand_like(known_labels_expaned.float())\n chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1) # half of bbox prob\n new_label = torch.randint_like(chosen_indice, 0, num_classes) # randomly put a new one here\n known_labels_expaned.scatter_(0, chosen_indice, new_label)\n single_pad = int(max(known_num))\n\n pad_size = int(single_pad * 2 * dn_number)\n positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)\n positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)\n positive_idx = positive_idx.flatten()\n negative_idx = positive_idx + len(boxes)\n if box_noise_scale > 0:\n known_bbox_ = torch.zeros_like(known_bboxs)\n known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2\n known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2\n\n diff = torch.zeros_like(known_bboxs)\n diff[:, :2] = known_bboxs[:, 2:] / 2\n diff[:, 2:] = known_bboxs[:, 2:] / 2\n\n rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0\n rand_part = torch.rand_like(known_bboxs)\n rand_part[negative_idx] += 1.0\n rand_part *= rand_sign\n known_bbox_ = known_bbox_ + torch.mul(rand_part,\n diff).cuda() * box_noise_scale\n known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)\n known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2\n known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]\n\n m = known_labels_expaned.long().to('cuda')\n input_label_embed = label_enc(m)\n input_bbox_embed = inverse_sigmoid(known_bbox_expand)\n\n padding_label = torch.zeros(pad_size, hidden_dim).cuda()\n padding_bbox = torch.zeros(pad_size, 4).cuda()\n\n input_query_label = padding_label.repeat(batch_size, 1, 1)\n input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)\n\n map_known_indice = torch.tensor([]).to('cuda')\n if len(known_num):\n map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]\n map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()\n if len(known_bid):\n input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed\n input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed\n\n tgt_size = pad_size + num_queries\n attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0\n # match query cannot see the reconstruct\n attn_mask[pad_size:, :pad_size] = True\n # reconstruct cannot see each other\n for i in range(dn_number):\n if i == 0:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n if i == dn_number - 1:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True\n else:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True\n\n dn_meta = {\n 'pad_size': pad_size,\n 'num_dn_group': dn_number,\n }\n else:\n\n input_query_label = None\n input_query_bbox = None\n attn_mask = None\n dn_meta = None\n\n return input_query_label, input_query_bbox, attn_mask, dn_meta" }, { "identifier": "dn_post_process", "path": "models/dino/models/dino/dn_components.py", "snippet": "def dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):\n \"\"\"\n post process of dn after output from the transformer\n put the dn part in the dn_meta\n \"\"\"\n if dn_meta and dn_meta['pad_size'] > 0:\n output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]\n output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]\n outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]\n outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]\n out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}\n if aux_loss:\n out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)\n dn_meta['output_known_lbs_bboxes'] = out\n return outputs_class, outputs_coord" } ]
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from models.dino.util import box_ops from models.dino.util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
11,759
if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] src_masks = src_masks[src_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(src_masks) target_masks = target_masks[tgt_idx] # upsample predictions to the target size src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False) src_masks = src_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(src_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks, } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} device=next(iter(outputs.values())).device indices = self.matcher(outputs_without_aux, targets) if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device)
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) if self.random_refpoints_xy: self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False if self.fix_refpoints_hw > 0: print("fix_refpoints_hw: {}".format(self.fix_refpoints_hw)) assert self.random_refpoints_xy self.refpoint_embed.weight.data[:, 2:] = self.fix_refpoints_hw self.refpoint_embed.weight.data[:, 2:] = inverse_sigmoid(self.refpoint_embed.weight.data[:, 2:]) self.refpoint_embed.weight.data[:, 2:].requires_grad = False elif int(self.fix_refpoints_hw) == -1: pass elif int(self.fix_refpoints_hw) == -2: print('learn a shared h and w') assert self.random_refpoints_xy self.refpoint_embed = nn.Embedding(use_num_queries, 2) self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False self.hw_embed = nn.Embedding(1, 1) else: raise NotImplementedError('Unknown fix_refpoints_hw {}'.format(self.fix_refpoints_hw)) def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0] += self.label_enc.weight[0,0]*0.0 # deformable-detr-like anchor update # reference_before_sigmoid = inverse_sigmoid(reference[:-1]) # n_dec, bs, nq, 4 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = layer_outputs_unsig.sigmoid() outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) outputs_class = torch.stack([layer_cls_embed(layer_hs) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)]) if self.dn_number > 0 and dn_meta is not None: outputs_class, outputs_coord_list = \ dn_post_process(outputs_class, outputs_coord_list, dn_meta,self.aux_loss,self._set_aux_loss) out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]} if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = layer_enc_outputs_coord_unsig.sigmoid() layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] src_masks = src_masks[src_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(src_masks) target_masks = target_masks[tgt_idx] # upsample predictions to the target size src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False) src_masks = src_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(src_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks, } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} device=next(iter(outputs.values())).device indices = self.matcher(outputs_without_aux, targets) if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device)
if is_dist_avail_and_initialized():
6
2023-12-04 00:27:58+00:00
16k
girgle/DouZero_For_New_HLDDZ
GOOD.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n self.Interrupt = False\n self.RealRate = (1440, 810)\n self.GetZoomRate()\n for file in os.listdir(\"./pics\"):\n info = file.split(\".\")\n if info[1] == \"png\":\n tmpImage = Image.open(\"./pics/\" + file)\n imgCv = cv2.imread(\"./pics/\" + file)\n self.Pics.update({info[0]: tmpImage})\n self.PicsCV.update({info[0]: imgCv})\n\n def sleep(self, ms):\n self.counter.restart()\n while self.counter.elapsed() < ms:\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 50)\n\n def Screenshot(self, region=None): # -> (im, (left, top))\n try_count = 3\n success = False\n while try_count > 0 and not success:\n try:\n try_count -= 1\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n hwnd = self.Handle\n left, top, right, bot = win32gui.GetWindowRect(hwnd)\n width = right - left\n height = bot - top\n self.RealRate = (width, height)\n width = int(width)\n height = int(height)\n hwndDC = win32gui.GetWindowDC(hwnd)\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)\n saveDC.SelectObject(saveBitMap)\n result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 3)\n bmpinfo = saveBitMap.GetInfo()\n bmpstr = saveBitMap.GetBitmapBits(True)\n im = Image.frombuffer(\n \"RGB\",\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\n bmpstr, 'raw', 'BGRX', 0, 1)\n win32gui.DeleteObject(saveBitMap.GetHandle())\n saveDC.DeleteDC()\n mfcDC.DeleteDC()\n win32gui.ReleaseDC(hwnd, hwndDC)\n im = im.resize((1440, 810))\n if region is not None:\n im = im.crop((region[0], region[1], region[0] + region[2], region[1] + region[3]))\n if result:\n success = True\n return im, (left, top)\n except Exception as e:\n print(\"截图时出现错误:\", repr(e))\n self.sleep(200)\n return None, (0, 0)\n\n def GetZoomRate(self):\n self.ScreenZoomRate = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100\n\n def LocateOnScreen(self, templateName, region, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n return LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n def ClickOnImage(self, templateName, region=None, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n result = LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n if result is not None:\n self.LeftClick(result)\n print(result)\n\n def LeftClick(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')\n time.sleep(0.1)\n pyautogui.moveTo(int(left + 1000), int(top + 550))\n\n '''win32gui.SetActiveWindow(self.Handle)\n lParam = win32api.MAKELONG(x, y)\n\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_MOUSEMOVE, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONDOWN, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONUP, MK_LBUTTON, lParam)'''\n\n def LeftClick2(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')" }, { "identifier": "get_move_type", "path": "douzero/env/move_detector.py", "snippet": "def get_move_type(move):\n move_size = len(move)\n move_dict = collections.Counter(move)\n\n if move_size == 0:\n return {'type': TYPE_0_PASS}\n\n if move_size == 1:\n return {'type': TYPE_1_SINGLE, 'rank': move[0]}\n\n if move_size == 2:\n if move[0] == move[1]:\n return {'type': TYPE_2_PAIR, 'rank': move[0]}\n elif move == [20, 30]: # Kings\n return {'type': TYPE_5_KING_BOMB}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 3:\n if len(move_dict) == 1:\n return {'type': TYPE_3_TRIPLE, 'rank': move[0]}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 4:\n if len(move_dict) == 1:\n return {'type': TYPE_4_BOMB, 'rank': move[0]}\n elif len(move_dict) == 2:\n if move[0] == move[1] == move[2] or move[1] == move[2] == move[3]:\n return {'type': TYPE_6_3_1, 'rank': move[1]}\n else:\n return {'type': TYPE_15_WRONG}\n else:\n return {'type': TYPE_15_WRONG}\n\n if is_continuous_seq(move):\n return {'type': TYPE_8_SERIAL_SINGLE, 'rank': move[0], 'len': len(move)}\n\n if move_size == 5:\n if len(move_dict) == 2:\n return {'type': TYPE_7_3_2, 'rank': move[2]}\n else:\n return {'type': TYPE_15_WRONG}\n\n count_dict = collections.defaultdict(int)\n for c, n in move_dict.items():\n count_dict[n] += 1\n\n if move_size == 6:\n if (len(move_dict) == 2 or len(move_dict) == 3) and count_dict.get(4) == 1 and \\\n (count_dict.get(2) == 1 or count_dict.get(1) == 2):\n return {'type': TYPE_13_4_2, 'rank': move[2]}\n\n if move_size == 8 and (((len(move_dict) == 3 or len(move_dict) == 2) and\n (count_dict.get(4) == 1 and count_dict.get(2) == 2)) or count_dict.get(4) == 2):\n return {'type': TYPE_14_4_22, 'rank': max([c for c, n in move_dict.items() if n == 4])}\n\n mdkeys = sorted(move_dict.keys())\n if len(move_dict) == count_dict.get(2) and is_continuous_seq(mdkeys):\n return {'type': TYPE_9_SERIAL_PAIR, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n if len(move_dict) == count_dict.get(3) and is_continuous_seq(mdkeys):\n return {'type': TYPE_10_SERIAL_TRIPLE, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n # Check Type 11 (serial 3+1) and Type 12 (serial 3+2)\n if count_dict.get(3, 0) >= MIN_TRIPLES:\n serial_3 = list()\n single = list()\n pair = list()\n\n for k, v in move_dict.items():\n if v == 3:\n serial_3.append(k)\n elif v == 1:\n single.append(k)\n elif v == 2:\n pair.append(k)\n else: # no other possibilities\n return {'type': TYPE_15_WRONG}\n\n serial_3.sort()\n if is_continuous_seq(serial_3):\n if len(serial_3) == len(single)+len(pair)*2:\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3)}\n if len(serial_3) == len(pair) and len(move_dict) == len(serial_3) * 2:\n return {'type': TYPE_12_SERIAL_3_2, 'rank': serial_3[0], 'len': len(serial_3)}\n\n if len(serial_3) == 4:\n if is_continuous_seq(serial_3[1:]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[1], 'len': len(serial_3) - 1}\n if is_continuous_seq(serial_3[:-1]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3) - 1}\n\n return {'type': TYPE_15_WRONG}" }, { "identifier": "Ui_Form", "path": "MainWindow.py", "snippet": "class Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(677, 450)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(9)\n font.setBold(True)\n font.setItalic(False)\n font.setWeight(75)\n Form.setFont(font)\n Form.setWindowOpacity(0.8)\n self.WinRate = QtWidgets.QLabel(Form)\n self.WinRate.setGeometry(QtCore.QRect(320, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.WinRate.setFont(font)\n self.WinRate.setAlignment(QtCore.Qt.AlignCenter)\n self.WinRate.setObjectName(\"WinRate\")\n self.UserHandCards = QtWidgets.QLabel(Form)\n self.UserHandCards.setGeometry(QtCore.QRect(30, 330, 351, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.UserHandCards.setFont(font)\n self.UserHandCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.UserHandCards.setObjectName(\"UserHandCards\")\n self.ThreeLandlordCards = QtWidgets.QLabel(Form)\n self.ThreeLandlordCards.setGeometry(QtCore.QRect(30, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.ThreeLandlordCards.setFont(font)\n self.ThreeLandlordCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.ThreeLandlordCards.setObjectName(\"ThreeLandlordCards\")\n self.BidWinrate = QtWidgets.QLabel(Form)\n self.BidWinrate.setGeometry(QtCore.QRect(30, 220, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.BidWinrate.setFont(font)\n self.BidWinrate.setObjectName(\"BidWinrate\")\n self.PreWinrate = QtWidgets.QLabel(Form)\n self.PreWinrate.setGeometry(QtCore.QRect(30, 280, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PreWinrate.setFont(font)\n self.PreWinrate.setObjectName(\"PreWinrate\")\n self.label = QtWidgets.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(490, 320, 101, 41))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.LPlayedCard = QtWidgets.QLabel(Form)\n self.LPlayedCard.setGeometry(QtCore.QRect(170, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LPlayedCard.setFont(font)\n self.LPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.LPlayedCard.setObjectName(\"LPlayedCard\")\n self.splitter_2 = QtWidgets.QSplitter(Form)\n self.splitter_2.setGeometry(QtCore.QRect(20, 380, 621, 41))\n self.splitter_2.setOrientation(QtCore.Qt.Horizontal)\n self.splitter_2.setObjectName(\"splitter_2\")\n self.SingleButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.SingleButton.setFont(font)\n self.SingleButton.setObjectName(\"SingleButton\")\n self.LoopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LoopButton.setFont(font)\n self.LoopButton.setObjectName(\"LoopButton\")\n self.StopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.StopButton.setFont(font)\n self.StopButton.setObjectName(\"StopButton\")\n self.tableWidget = QtWidgets.QTableWidget(Form)\n self.tableWidget.setGeometry(QtCore.QRect(20, 10, 611, 75))\n self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 75))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(12)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.tableWidget.setFont(font)\n self.tableWidget.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.tableWidget.setStyleSheet(\"QTableWidget{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:#444444;\\n\"\n\"border:1px solid #242424;\\n\"\n\"alternate-background-color:#525252;\\n\"\n\"gridline-color:#242424;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:selected{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:qlineargradient(spread:pad,x1:0,y1:0,x2:0,y2:1,stop:0 #484848,stop:1 #383838);\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:hover{\\n\"\n\"background:#5B5B5B;\\n\"\n\"}\\n\"\n\"QHeaderView::section{\\n\"\n\"text-align:center;\\n\"\n\"background:#5E5E5E;\\n\"\n\"padding:3px;\\n\"\n\"margin:0px;\\n\"\n\"color:#DCDCDC;\\n\"\n\"border:1px solid #242424;\\n\"\n\"border-left-width:0;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar:vertical{\\n\"\n\"background:#484848;\\n\"\n\"padding:0px;\\n\"\n\"border-radius:6px;\\n\"\n\"max-width:12px;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::handle:vertical{\\n\"\n\"background:#CCCCCC;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::handle:hover:vertical,QScrollBar::handle:pressed:vertical{\\n\"\n\"background:#A7A7A7;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-page:vertical{\\n\"\n\"background:444444;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::add-page:vertical{\\n\"\n\"background:5B5B5B;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::add-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\")\n self.tableWidget.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.tableWidget.setMidLineWidth(-1)\n self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setAutoScroll(False)\n self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)\n self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.tableWidget.setTextElideMode(QtCore.Qt.ElideNone)\n self.tableWidget.setObjectName(\"tableWidget\")\n self.tableWidget.setColumnCount(15)\n self.tableWidget.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n self.tableWidget.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(4, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(5, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(6, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(7, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(8, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(9, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(10, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(11, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(12, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(13, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(14, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 0, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 1, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 2, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 3, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 4, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 5, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 6, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 7, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 8, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 9, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 10, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 11, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 12, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 13, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 14, item)\n self.tableWidget.horizontalHeader().setVisible(True)\n self.tableWidget.horizontalHeader().setCascadingSectionResizes(True)\n self.tableWidget.horizontalHeader().setDefaultSectionSize(41)\n self.tableWidget.horizontalHeader().setStretchLastSection(True)\n self.tableWidget.verticalHeader().setVisible(False)\n self.tableWidget.verticalHeader().setCascadingSectionResizes(False)\n self.tableWidget.verticalHeader().setDefaultSectionSize(40)\n self.tableWidget.verticalHeader().setHighlightSections(True)\n self.tableWidget.verticalHeader().setMinimumSectionSize(40)\n self.tableWidget.verticalHeader().setSortIndicatorShown(False)\n self.RPlayedCard = QtWidgets.QLabel(Form)\n self.RPlayedCard.setGeometry(QtCore.QRect(490, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.RPlayedCard.setFont(font)\n self.RPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.RPlayedCard.setObjectName(\"RPlayedCard\")\n self.PredictedCard = QtWidgets.QLabel(Form)\n self.PredictedCard.setGeometry(QtCore.QRect(320, 190, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PredictedCard.setFont(font)\n self.PredictedCard.setStyleSheet(\"\")\n self.PredictedCard.setFrameShape(QtWidgets.QFrame.Panel)\n self.PredictedCard.setLineWidth(1)\n self.PredictedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.PredictedCard.setObjectName(\"PredictedCard\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Hi\"))\n self.WinRate.setText(_translate(\"Form\", \"评分\"))\n self.UserHandCards.setText(_translate(\"Form\", \"手牌\"))\n self.ThreeLandlordCards.setText(_translate(\"Form\", \"地主牌\"))\n self.BidWinrate.setText(_translate(\"Form\", \"叫牌胜率:\"))\n self.PreWinrate.setText(_translate(\"Form\", \"局前胜率:\"))\n self.label.setText(_translate(\"Form\", \"游戏状态\"))\n self.LPlayedCard.setText(_translate(\"Form\", \"上家出牌区域\"))\n self.SingleButton.setText(_translate(\"Form\", \"单局\"))\n self.LoopButton.setText(_translate(\"Form\", \" 连续\"))\n self.StopButton.setText(_translate(\"Form\", \"停止\"))\n item = self.tableWidget.horizontalHeaderItem(0)\n item.setText(_translate(\"Form\", \"大\"))\n item = self.tableWidget.horizontalHeaderItem(1)\n item.setText(_translate(\"Form\", \"小\"))\n item = self.tableWidget.horizontalHeaderItem(2)\n item.setText(_translate(\"Form\", \"2\"))\n item = self.tableWidget.horizontalHeaderItem(3)\n item.setText(_translate(\"Form\", \"A\"))\n item = self.tableWidget.horizontalHeaderItem(4)\n item.setText(_translate(\"Form\", \"K\"))\n item = self.tableWidget.horizontalHeaderItem(5)\n item.setText(_translate(\"Form\", \"Q\"))\n item = self.tableWidget.horizontalHeaderItem(6)\n item.setText(_translate(\"Form\", \"J\"))\n item = self.tableWidget.horizontalHeaderItem(7)\n item.setText(_translate(\"Form\", \"10\"))\n item = self.tableWidget.horizontalHeaderItem(8)\n item.setText(_translate(\"Form\", \"9\"))\n item = self.tableWidget.horizontalHeaderItem(9)\n item.setText(_translate(\"Form\", \"8\"))\n item = self.tableWidget.horizontalHeaderItem(10)\n item.setText(_translate(\"Form\", \"7\"))\n item = self.tableWidget.horizontalHeaderItem(11)\n item.setText(_translate(\"Form\", \"6\"))\n item = self.tableWidget.horizontalHeaderItem(12)\n item.setText(_translate(\"Form\", \"5\"))\n item = self.tableWidget.horizontalHeaderItem(13)\n item.setText(_translate(\"Form\", \"4\"))\n item = self.tableWidget.horizontalHeaderItem(14)\n item.setText(_translate(\"Form\", \"3\"))\n __sortingEnabled = self.tableWidget.isSortingEnabled()\n self.tableWidget.setSortingEnabled(False)\n item = self.tableWidget.item(0, 0)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 1)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 2)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 3)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 4)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 5)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 6)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 7)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 8)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 9)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 10)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 11)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 12)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 13)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 14)\n item.setText(_translate(\"Form\", \"0\"))\n self.tableWidget.setSortingEnabled(__sortingEnabled)\n self.RPlayedCard.setText(_translate(\"Form\", \"下家出牌区域\"))\n self.PredictedCard.setText(_translate(\"Form\", \"AI出牌区域\"))" }, { "identifier": "GameEnv", "path": "douzero/env/game.py", "snippet": "class GameEnv(object):\n\n def __init__(self, players):\n\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.players = players\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.num_wins = {'landlord': 0,\n 'farmer': 0}\n\n self.num_scores = {'landlord': 0,\n 'farmer': 0}\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 1,\n 'landlord_up': 1,\n 'landlord_down': 1}\n self.step_count = 0\n\n\n def card_play_init(self, card_play_data):\n self.info_sets['landlord'].player_hand_cards = \\\n card_play_data['landlord']\n self.info_sets['landlord_up'].player_hand_cards = \\\n card_play_data['landlord_up']\n self.info_sets['landlord_down'].player_hand_cards = \\\n card_play_data['landlord_down']\n self.three_landlord_cards = card_play_data['three_landlord_cards']\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n\n\n def game_done(self):\n if len(self.info_sets['landlord'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_up'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_down'].player_hand_cards) == 0:\n # if one of the three players discards his hand,\n # then game is over.\n self.compute_player_utility()\n self.update_num_wins_scores()\n\n self.game_over = True\n\n def compute_player_utility(self):\n\n if len(self.info_sets['landlord'].player_hand_cards) == 0:\n self.player_utility_dict = {'landlord': 2,\n 'farmer': -1}\n else:\n self.player_utility_dict = {'landlord': -2,\n 'farmer': 1}\n\n def update_num_wins_scores(self):\n for pos, utility in self.player_utility_dict.items():\n base_score = 2 if pos == 'landlord' else 1\n if utility > 0:\n self.num_wins[pos] += 1\n self.winner = pos\n self.num_scores[pos] += base_score * (2 ** self.bomb_num)\n else:\n self.num_scores[pos] -= base_score * (2 ** self.bomb_num)\n\n def get_winner(self):\n return self.winner\n\n def get_bomb_num(self):\n return self.bomb_num\n\n def step(self, position, action=[]):\n win_rate = 0\n if self.acting_player_position == position:\n action, actions_confidence = self.players[1].act(self.game_infoset)\n # 计算胜率\n win_rate = actions_confidence\n # win_rate = max(actions_confidence, -1)\n # win_rate = min(win_rate, 1)\n # win_rate = str(round(float((win_rate + 1) / 2), 4))\n\n if len(action) > 0:\n self.last_pid = self.acting_player_position\n\n if action in bombs:\n self.bomb_num += 1\n\n self.last_move_dict[\n self.acting_player_position] = action.copy()\n\n self.card_play_action_seq.append((position, action))\n self.update_acting_player_hand_cards(action)\n\n self.played_cards[self.acting_player_position] += action\n\n if self.acting_player_position == 'landlord' and \\\n len(action) > 0 and \\\n len(self.three_landlord_cards) > 0:\n for card in action:\n if len(self.three_landlord_cards) > 0:\n if card in self.three_landlord_cards:\n self.three_landlord_cards.remove(card)\n else:\n break\n self.game_done()\n if not self.game_over:\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n # 返回动作和胜率,只有玩家角色会接受返回值\n action_message = {\"action\": str(''.join([EnvCard2RealCard[c] for c in action])),\n \"win_rate\": str(round(float(win_rate), 4))}\n return action_message\n\n def get_last_move(self):\n last_move = []\n if len(self.card_play_action_seq) != 0:\n if len(self.card_play_action_seq[-1][1]) == 0:\n last_move = self.card_play_action_seq[-2][1]\n else:\n last_move = self.card_play_action_seq[-1][1]\n\n return last_move\n\n def get_last_two_moves(self):\n last_two_moves = [[], []]\n for card in self.card_play_action_seq[-2:]:\n last_two_moves.insert(0, card[1])\n last_two_moves = last_two_moves[:2]\n return last_two_moves\n\n def get_acting_player_position(self):\n if self.acting_player_position is None:\n self.acting_player_position = 'landlord'\n\n else:\n if self.acting_player_position == 'landlord':\n self.acting_player_position = 'landlord_down'\n\n elif self.acting_player_position == 'landlord_down':\n self.acting_player_position = 'landlord_up'\n\n else:\n self.acting_player_position = 'landlord'\n\n return self.acting_player_position\n\n def update_acting_player_hand_cards(self, action):\n if action != []:\n # 更新玩家手牌,删除对应的牌\n if self.acting_player_position == self.players[0]:\n for card in action:\n self.info_sets[self.acting_player_position].player_hand_cards.remove(card)\n # 更新另外两个玩家手牌,删除相同数量的牌\n else:\n del self.info_sets[self.acting_player_position].player_hand_cards[0:len(action)]\n self.info_sets[self.acting_player_position].player_hand_cards.sort()\n\n def get_legal_card_play_actions(self):\n mg = MovesGener(\n self.info_sets[self.acting_player_position].player_hand_cards)\n\n action_sequence = self.card_play_action_seq\n\n rival_move = []\n if len(action_sequence) != 0:\n if len(action_sequence[-1][1]) == 0:\n rival_move = action_sequence[-2][1]\n else:\n rival_move = action_sequence[-1][1]\n\n rival_type = md.get_move_type(rival_move)\n rival_move_type = rival_type['type']\n rival_move_len = rival_type.get('len', 1)\n moves = list()\n\n if rival_move_type == md.TYPE_0_PASS:\n moves = mg.gen_moves()\n\n elif rival_move_type == md.TYPE_1_SINGLE:\n all_moves = mg.gen_type_1_single()\n moves = ms.filter_type_1_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_2_PAIR:\n all_moves = mg.gen_type_2_pair()\n moves = ms.filter_type_2_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_3_TRIPLE:\n all_moves = mg.gen_type_3_triple()\n moves = ms.filter_type_3_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_4_BOMB:\n all_moves = mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n moves = ms.filter_type_4_bomb(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_5_KING_BOMB:\n moves = []\n\n elif rival_move_type == md.TYPE_6_3_1:\n all_moves = mg.gen_type_6_3_1()\n moves = ms.filter_type_6_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_7_3_2:\n all_moves = mg.gen_type_7_3_2()\n moves = ms.filter_type_7_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_8_SERIAL_SINGLE:\n all_moves = mg.gen_type_8_serial_single(repeat_num=rival_move_len)\n moves = ms.filter_type_8_serial_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_9_SERIAL_PAIR:\n all_moves = mg.gen_type_9_serial_pair(repeat_num=rival_move_len)\n moves = ms.filter_type_9_serial_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_10_SERIAL_TRIPLE:\n all_moves = mg.gen_type_10_serial_triple(repeat_num=rival_move_len)\n moves = ms.filter_type_10_serial_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_11_SERIAL_3_1:\n all_moves = mg.gen_type_11_serial_3_1(repeat_num=rival_move_len)\n moves = ms.filter_type_11_serial_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_12_SERIAL_3_2:\n all_moves = mg.gen_type_12_serial_3_2(repeat_num=rival_move_len)\n moves = ms.filter_type_12_serial_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_13_4_2:\n all_moves = mg.gen_type_13_4_2()\n moves = ms.filter_type_13_4_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_14_4_22:\n all_moves = mg.gen_type_14_4_22()\n moves = ms.filter_type_14_4_22(all_moves, rival_move)\n\n if rival_move_type not in [md.TYPE_0_PASS,\n md.TYPE_4_BOMB, md.TYPE_5_KING_BOMB]:\n moves = moves + mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n\n if len(rival_move) != 0: # rival_move is not 'pass'\n moves = moves + [[]]\n\n for m in moves:\n m.sort()\n\n return moves\n\n def reset(self):\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 0,\n 'landlord_up': 0,\n 'landlord_down': 0}\n self.step_count = 0\n\n def get_infoset(self):\n self.info_sets[\n self.acting_player_position].last_pid = self.last_pid\n\n self.info_sets[\n self.acting_player_position].legal_actions = \\\n self.get_legal_card_play_actions()\n\n self.info_sets[\n self.acting_player_position].bomb_num = self.bomb_num\n\n self.info_sets[\n self.acting_player_position].last_move = self.get_last_move()\n\n self.info_sets[\n self.acting_player_position].last_two_moves = self.get_last_two_moves()\n\n self.info_sets[\n self.acting_player_position].last_move_dict = self.last_move_dict\n\n self.info_sets[self.acting_player_position].num_cards_left_dict = \\\n {pos: len(self.info_sets[pos].player_hand_cards)\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n self.info_sets[self.acting_player_position].other_hand_cards = []\n\n '''\n 调整计算其他人手牌的方法,整副牌减去玩家手牌与出过的牌\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n if pos != self.acting_player_position:\n self.info_sets[\n self.acting_player_position].other_hand_cards += \\\n self.info_sets[pos].player_hand_cards\n '''\n # 把出过的牌中三个子列表合成一个列表\n played_cards_tmp = []\n for i in list(self.played_cards.values()):\n played_cards_tmp.extend(i)\n # 出过的牌和玩家手上的牌\n played_and_hand_cards = played_cards_tmp + self.info_sets[self.acting_player_position].player_hand_cards\n # 整副牌减去出过的牌和玩家手上的牌,就是其他人的手牌\n for i in set(AllEnvCard):\n self.info_sets[\n self.acting_player_position].other_hand_cards.extend([i] * (AllEnvCard.count(i) - played_and_hand_cards.count(i)))\n\n self.info_sets[self.acting_player_position].played_cards = \\\n self.played_cards\n self.info_sets[self.acting_player_position].three_landlord_cards = \\\n self.three_landlord_cards\n self.info_sets[self.acting_player_position].card_play_action_seq = \\\n self.card_play_action_seq\n\n self.info_sets[\n self.acting_player_position].all_handcards = \\\n {pos: self.info_sets[pos].player_hand_cards\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n # Custom bid info\n self.info_sets[self.acting_player_position].bid_info = bid_infos[self.acting_player_position]\n\n return deepcopy(self.info_sets[self.acting_player_position])" }, { "identifier": "DeepAgent", "path": "douzero/evaluation/deep_agent.py", "snippet": "class DeepAgent:\n\n def __init__(self, position, model_path):\n self.model_type = \"old\"\n if \"general\" in model_path:\n self.model_type = \"general\"\n elif \"resnet\" in model_path:\n self.model_type = \"resnet\"\n self.model = _load_model(position, model_path, self.model_type)\n\n def act(self, infoset):\n obs = get_obs(infoset, model_type=self.model_type)\n z_batch = torch.from_numpy(obs['z_batch']).float()\n x_batch = torch.from_numpy(obs['x_batch']).float()\n if torch.cuda.is_available():\n z_batch, x_batch = z_batch.cuda(), x_batch.cuda()\n y_pred = self.model.forward(z_batch, x_batch, return_value=True)['values']\n y_pred = y_pred.detach().cpu().numpy()\n\n best_action_index = np.argmax(y_pred, axis=0)[0]\n best_action = infoset.legal_actions[best_action_index]\n best_action_confidence = y_pred[best_action_index]\n return best_action, best_action_confidence" } ]
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
12,150
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper()
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper()
class MyPyQT_Form(QtWidgets.QWidget, Ui_Form):
2
2023-12-01 04:04:30+00:00
16k
yongzhuo/MacroGPT-Pretrain
macro_gpt/ft_gpt/train.pt.py
[ { "identifier": "CUDA_VISIBLE_DEVICES", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "CUDA_VISIBLE_DEVICES = \"0\"" }, { "identifier": "USE_TORCH", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "USE_TORCH = \"1\"" }, { "identifier": "CPU_NUMS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "CPU_NUMS = \"9\"" }, { "identifier": "LlamaForCausalLM", "path": "macro_gpt/models/llama/modeling_llama.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n # logits = self.lm_head(hidden_states)\n logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype))\n logits = logits.float()\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values is not None:\n past_length = past_key_values[0][0].shape[2]\n\n # Some generation methods already pass only the last input ID\n if input_ids.shape[1] > past_length:\n remove_prefix_length = past_length\n else:\n # Default to old behavior: keep only final ID\n remove_prefix_length = input_ids.shape[1] - 1\n\n input_ids = input_ids[:, remove_prefix_length:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -input_ids.shape[1] :]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past" }, { "identifier": "LlamaTokenizer", "path": "macro_gpt/models/llama/tokenization_llama.py", "snippet": "class LlamaTokenizer(PreTrainedTokenizer):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is\n no padding token in the original model.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"<unk>\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"<s>\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"</s>\"`):\n The end of sequence token.\n pad_token (`str` or `tokenizers.AddedToken`, *optional*):\n A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by\n attention mechanisms or loss computation.\n sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):\n Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for\n SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,\n to set:\n\n - `enable_sampling`: Enable subword regularization.\n - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.\n\n - `nbest_size = {0,1}`: No sampling is performed.\n - `nbest_size > 1`: samples from the nbest_size results.\n - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)\n using forward-filtering-and-backward-sampling algorithm.\n\n - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for\n BPE-dropout.\n\n add_bos_token (`bool`, *optional*, defaults to `True`):\n Whether or not to add an `bos_token` at the start of sequences.\n add_eos_token (`bool`, *optional*, defaults to `False`):\n Whether or not to add an `eos_token` at the end of sequences.\n clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):\n Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like\n extra spaces.\n use_default_system_prompt (`bool`, *optional*, defaults to `True`):\n Whether or not the default system prompt for Llama should be used.\n spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to add spaces between special tokens.\n legacy (`bool`, *optional*):\n Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622\n and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple\n example:\n\n - `legacy=True`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=True)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\")\n [8774, 32099, 3, 5, 1]\n ```\n - `legacy=False`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=False)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\") # the extra space `[3]` is no longer here\n [8774, 32099, 5, 1]\n ```\n Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.\n\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=None,\n sp_model_kwargs: Optional[Dict[str, Any]] = None,\n add_bos_token=True,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n use_default_system_prompt=True,\n spaces_between_special_tokens=False,\n legacy=None,\n **kwargs,\n ):\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token\n\n if legacy is None:\n logger.warning_once(\n f\"You are using the default legacy behaviour of the {self.__class__}. This is\"\n \" expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you.\"\n \" If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it\"\n \" means, and thouroughly read the reason why this was added as explained in\"\n \" https://github.com/huggingface/transformers/pull/24565\"\n )\n legacy = True\n\n self.legacy = legacy\n self.vocab_file = vocab_file\n self.add_bos_token = add_bos_token\n self.add_eos_token = add_eos_token\n self.use_default_system_prompt = use_default_system_prompt\n self.sp_model = self.get_spm_processor(kwargs.pop(\"from_slow\", False))\n\n super().__init__(\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n add_bos_token=add_bos_token,\n add_eos_token=add_eos_token,\n sp_model_kwargs=self.sp_model_kwargs,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n use_default_system_prompt=use_default_system_prompt,\n spaces_between_special_tokens=spaces_between_special_tokens,\n legacy=legacy,\n **kwargs,\n )\n\n @property\n def unk_token_length(self):\n return len(self.sp_model.encode(str(self.unk_token)))\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor\n def get_spm_processor(self, from_slow=False):\n tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n if self.legacy or from_slow: # no dependency on protobuf\n tokenizer.Load(self.vocab_file)\n return tokenizer\n\n with open(self.vocab_file, \"rb\") as f:\n sp_model = f.read()\n model_pb2 = import_protobuf(f\"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)\")\n model = model_pb2.ModelProto.FromString(sp_model)\n normalizer_spec = model_pb2.NormalizerSpec()\n normalizer_spec.add_dummy_prefix = False\n model.normalizer_spec.MergeFrom(normalizer_spec)\n sp_model = model.SerializeToString()\n tokenizer.LoadFromSerializedProto(sp_model)\n return tokenizer\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"sp_model\"] = None\n state[\"sp_model_proto\"] = self.sp_model.serialized_model_proto()\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.LoadFromSerializedProto(self.sp_model_proto)\n\n @property\n def vocab_size(self):\n \"\"\"Returns vocab size\"\"\"\n return self.sp_model.get_piece_size()\n\n def get_vocab(self):\n \"\"\"Returns vocab as a dict\"\"\"\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize\n def tokenize(self, text: \"TextInput\", add_special_tokens=False, **kwargs) -> List[str]:\n \"\"\"\n Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the\n first token is special.\n \"\"\"\n if self.legacy or len(text) == 0:\n return super().tokenize(text, **kwargs)\n\n tokens = super().tokenize(SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, \" \"), **kwargs)\n\n if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:\n tokens = tokens[1:]\n return tokens\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize\n def _tokenize(self, text, **kwargs):\n \"\"\"\n Returns a tokenized string.\n\n We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any\n SPIECE_UNDERLINE. For example: `self.sp_model.encode(f\"{SPIECE_UNDERLINE}Hey\", out_type = str)` will give\n `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f\"{unk_token}text\"` and strip the\n `unk_token`. Here is an example with `unk_token = \"<unk>\"` and `unk_token_length = 4`.\n `self.tokenizer.sp_model.encode(\"<unk> Hey\", out_type = str)[4:]`.\n \"\"\"\n tokens = self.sp_model.encode(text, out_type=str)\n if self.legacy or not text.startswith((SPIECE_UNDERLINE, \" \")):\n return tokens\n\n # 1. Encode string + prefix ex: \"<unk> Hey\"\n tokens = self.sp_model.encode(self.unk_token + text, out_type=str)\n # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']\n return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.sp_model.piece_to_id(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n token = self.sp_model.IdToPiece(index)\n return token\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n # since we manually add the prefix space, we have to remove it when decoding\n if tokens[0].startswith(SPIECE_UNDERLINE):\n tokens[0] = tokens[0][1:]\n\n current_sub_tokens = []\n out_string = \"\"\n prev_is_special = False\n for i, token in enumerate(tokens):\n # make sure that special tokens are not decoded using sentencepiece model\n if token in self.all_special_tokens:\n if not prev_is_special and i != 0 and self.legacy:\n out_string += \" \"\n out_string += self.sp_model.decode(current_sub_tokens) + token\n prev_is_special = True\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n prev_is_special = False\n out_string += self.sp_model.decode(current_sub_tokens)\n return out_string\n\n def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:\n \"\"\"\n Save the vocabulary and special tokens file to a directory.\n\n Args:\n save_directory (`str`):\n The directory in which to save the vocabulary.\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, \"wb\") as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n\n return (out_vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = bos_token_id + token_ids_0 + eos_token_id\n\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n\n return output\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(\n token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True\n )\n\n bos_token_id = [1] if self.add_bos_token else []\n eos_token_id = [1] if self.add_eos_token else []\n\n if token_ids_1 is None:\n return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id\n return (\n bos_token_id\n + ([0] * len(token_ids_0))\n + eos_token_id\n + bos_token_id\n + ([0] * len(token_ids_1))\n + eos_token_id\n )\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\n sequence pair mask has the following format:\n\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n\n if token_ids_1 is None, only returns the first portion of the mask (0s).\n\n Args:\n token_ids_0 (`List[int]`):\n List of ids.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n\n if token_ids_1 is not None:\n output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n\n return output\n\n @property\n def default_chat_template(self):\n \"\"\"\n LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.\n Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict\n user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering\n rather than needing special tokens. The system message is partly 'embedded' in the first user message, which\n results in an unusual token ordering when it is present. This template should definitely be changed if you wish\n to fine-tune a model with more flexible role ordering!\n\n The output should look something like:\n\n <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos> <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n \"\"\"\n\n template = (\n \"{% if messages[0]['role'] == 'system' %}\"\n \"{% set loop_messages = messages[1:] %}\" # Extract system message if it's present\n \"{% set system_message = messages[0]['content'] %}\"\n \"{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}\"\n \"{% set loop_messages = messages %}\" # Or use the default system message if the flag is set\n \"{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}\"\n \"{% else %}\"\n \"{% set loop_messages = messages %}\"\n \"{% set system_message = false %}\"\n \"{% endif %}\"\n \"{% for message in loop_messages %}\" # Loop over all non-system messages\n \"{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\"\n \"{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\"\n \"{% endif %}\"\n \"{% if loop.index0 == 0 and system_message != false %}\" # Embed system message in first message\n \"{% set content = '<<SYS>>\\\\n' + system_message + '\\\\n<</SYS>>\\\\n\\\\n' + message['content'] %}\"\n \"{% else %}\"\n \"{% set content = message['content'] %}\"\n \"{% endif %}\"\n \"{% if message['role'] == 'user' %}\" # After all of that, handle messages/roles in a fairly normal way\n \"{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}\"\n \"{% elif message['role'] == 'system' %}\"\n \"{{ '<<SYS>>\\\\n' + content.strip() + '\\\\n<</SYS>>\\\\n\\\\n' }}\"\n \"{% elif message['role'] == 'assistant' %}\"\n \"{{ ' ' + content.strip() + ' ' + eos_token }}\"\n \"{% endif %}\"\n \"{% endfor %}\"\n )\n template = template.replace(\"USE_DEFAULT_PROMPT\", \"true\" if self.use_default_system_prompt else \"false\")\n default_message = DEFAULT_SYSTEM_PROMPT.replace(\"\\n\", \"\\\\n\").replace(\"'\", \"\\\\'\")\n template = template.replace(\"DEFAULT_SYSTEM_MESSAGE\", default_message)\n\n return template" }, { "identifier": "LlamaConfig", "path": "macro_gpt/models/llama/modeling_llama.py", "snippet": "def is_flash_attn_available():\n def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]:\ndef _get_unpad_data(padding_mask):\ndef _make_causal_mask(\n input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0\n):\ndef _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n def __init__(self, hidden_size, eps=1e-6):\n def forward(self, hidden_states):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n def forward(self, x, seq_len=None):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\ndef rotate_half(x):\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids):\n def __init__(self, config):\n def forward(self, x):\ndef repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n def __init__(self, config: LlamaConfig):\n def _init_rope(self):\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n def _flash_attention_forward(\n self, query_states, key_states, value_states, padding_mask, query_length, dropout=0.0, softmax_scale=None\n ):\n def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length):\n def __init__(self, config: LlamaConfig):\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n def _init_weights(self, module):\n def _set_gradient_checkpointing(self, module, value=False):\n def __init__(self, config: LlamaConfig):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPast]:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def set_decoder(self, decoder):\n def get_decoder(self):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n def _reorder_cache(past_key_values, beam_idx):\n def __init__(self, config):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, SequenceClassifierOutputWithPast]:\n_CONFIG_FOR_DOC = \"LlamaConfig\"\nLLAMA_START_DOCSTRING = r\"\"\"\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`LlamaConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\nLLAMA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\nclass LlamaRMSNorm(nn.Module):\nclass LlamaRotaryEmbedding(nn.Module):\nclass LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):\nclass LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):\nclass LlamaMLP(nn.Module):\nclass LlamaAttention(nn.Module):\nclass LlamaFlashAttention2(LlamaAttention):\nclass LlamaDecoderLayer(nn.Module):\nclass LlamaPreTrainedModel(PreTrainedModel):\nclass LlamaModel(LlamaPreTrainedModel):\nclass LlamaForCausalLM(LlamaPreTrainedModel):\nclass LlamaForSequenceClassification(LlamaPreTrainedModel):" }, { "identifier": "PATH_MODEL_PRETRAIN", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "PATH_MODEL_PRETRAIN = \"\"" }, { "identifier": "DATA_PATH", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "DATA_PATH = \"../datasets/tigerbot-train-00001-of-00097.json\"" }, { "identifier": "MODEL_SAVE_DIR", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MODEL_SAVE_DIR = \"model_macrogpt_1b3_float32\"" }, { "identifier": "REPO_ID", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "REPO_ID = \"Macropodus/macrogpt-tokenizer\"" }, { "identifier": "MICRO_BATCH_SIZE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MICRO_BATCH_SIZE = 4 # default=4 # this could actually be 5 but i like powers of 2" }, { "identifier": "BATCH_SIZE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "BATCH_SIZE = 128" }, { "identifier": "GRADIENT_ACCUMULATION_STEPS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE" }, { "identifier": "LEARNING_RATE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LEARNING_RATE = 3e-4 # default=3e-4 # the Karpathy constant" }, { "identifier": "EPOCHS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "EPOCHS = 1 # default=3 # we don't always need 3 tbh" }, { "identifier": "SAVE_STEPS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "SAVE_STEPS = 384" }, { "identifier": "VAL_SET_SIZE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "VAL_SET_SIZE = 0" }, { "identifier": "TARGET_MODULES", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "TARGET_MODULES = [\"query_key_value\"]" }, { "identifier": "IS_PARALLELIZABLE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "IS_PARALLELIZABLE = False" }, { "identifier": "MODEL_PARALLEL", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MODEL_PARALLEL = False" }, { "identifier": "USE_CACHE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "USE_CACHE = False" }, { "identifier": "MAX_LENGTH_Q", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MAX_LENGTH_Q = 1024 - 2 # default=128 - 2" }, { "identifier": "MAX_LENGTH_A", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MAX_LENGTH_A = 1024 - 2 # default=128 - 2" }, { "identifier": "MAX_LENGTH_QA", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MAX_LENGTH_QA = MAX_LENGTH_Q + MAX_LENGTH_A + 4" }, { "identifier": "LORA_DROPOUT", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LORA_DROPOUT = 0.05" }, { "identifier": "LORA_ALPHA", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LORA_ALPHA = 16" }, { "identifier": "LORA_R", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LORA_R = 8" }, { "identifier": "PATH_MODEL_CONFIG", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "PATH_MODEL_CONFIG = \"config_macrogpt_1b3_float32.json\" or MODEL_SAVE_DIR" }, { "identifier": "PATH_TOKENIZER_PRETRAIN", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "PATH_TOKENIZER_PRETRAIN = REPO_ID or \"./macrogpt.model\"" } ]
import random import copy import sys import os import bitsandbytes as bnb import torch.nn as nn import transformers import torch from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import CUDA_VISIBLE_DEVICES, USE_TORCH, CPU_NUMS # from config from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from peft import (get_peft_model_state_dict, get_peft_model, LoraConfig) from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.modeling_utils import unwrap_model from tensorboardX import SummaryWriter from datasets import load_dataset from macro_gpt.models.llama.modeling_llama import LlamaForCausalLM as LLMForCausalLM from macro_gpt.models.llama.tokenization_llama import LlamaTokenizer as LLMTokenizer from macro_gpt.models.llama.modeling_llama import LlamaConfig as LLMConfig from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_PRETRAIN, DATA_PATH, MODEL_SAVE_DIR, REPO_ID from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MICRO_BATCH_SIZE, BATCH_SIZE, GRADIENT_ACCUMULATION_STEPS from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LEARNING_RATE, EPOCHS, SAVE_STEPS, VAL_SET_SIZE, TARGET_MODULES from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import IS_PARALLELIZABLE, MODEL_PARALLEL, USE_CACHE from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MAX_LENGTH_Q, MAX_LENGTH_A, MAX_LENGTH_QA from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LORA_DROPOUT, LORA_ALPHA, LORA_R from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_CONFIG, PATH_TOKENIZER_PRETRAIN
13,833
# ID_BOS = 64792 # ID_EOS = 64793 # ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 2 ID_SOP = 1 ID_BOS = 1 ID_EOS = 2 ID_PAD = 0 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } # model = GPT2LMHeadModel.from_pretrained(PATH_MODEL_PRETRAIN) llm_config = LLMConfig.from_json_file(PATH_MODEL_CONFIG) model = LLMForCausalLM(llm_config) model.init_weights() model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL model.config.use_cache = USE_CACHE # model.clip_grad_norm_ = 1.0 # model = model.half().cuda() ## norm, lm_head层为fp32 # prepare_model_for_half_training(model, output_embedding_layer_name="lm_head", # use_gradient_checkpointing=True, layer_norm_names=["post_attention_layernorm", # "input_layernorm", # "norm", # ]) model = model.cuda() print_rank_0_named_parameters(model) tensorboardx_witer = SummaryWriter(logdir=MODEL_SAVE_DIR) # files = dfs_file(DATA_PATH) # files = [files for file in files if "data_merge.0" in file or "data_merge.1" in file] ### 只有一个train的情况 # data = load_dataset("json", data_files={"train": files}) data = load_dataset("json", data_files=DATA_PATH) # data = load_dataset("json", data_dir=DATA_PATH) # train_val = data["train"].train_test_split(test_size=min(VAL_SET_SIZE, # int(len(data["train"])/10000)), shuffle=True, seed=42) # VAL_SET_SIZE = max(min(VAL_SET_SIZE, int(len(data["train"])/10000)), 1) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=VAL_SET_SIZE, shuffle=True, seed=42) # train_data = train_val["train"].shuffle().map(generate_prompt) # val_data = train_val["test"].shuffle().map(generate_prompt) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=1024, shuffle=True, seed=42) # train_data = train_val["test"].shuffle().map(generate_prompt) # val_data = None generate_prompt(data["train"][0], is_logger=True) train_data = data["train"].shuffle().map(generate_prompt) val_data = None class CustomTrainer(transformers.Trainer): def compute_loss(self, model, inputs, return_outputs=False): inputs = {k: v.cuda() for k, v in inputs.items()} outputs = model(**inputs) # if contain labels, will calculate loss if local_rank_is_0: logs = {} tr_loss_scalar = self._nested_gather(outputs.loss.detach()).mean().item() logs["loss"] = round(tr_loss_scalar, 4) logs["lr"] = self.lr_scheduler.get_last_lr()[0] step = self.state.global_step for k, v in logs.items(): tensorboardx_witer.add_scalar(k, v, step) self.log(logs) if self.label_smoother is not None and "labels" in inputs: labels = inputs.pop("labels") else: labels = None # Save past state if it exists # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: " f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] # if llm_config.torch_dtype == "float16": # loss = loss.half() loss = loss.half() return (loss, outputs) if return_outputs else loss trainer = CustomTrainer( # data_collator=transformers.DataCollatorForSeq2Seq( # tokenizer, pad_to_multiple_of=8, # return_tensors="pt", padding=True # ), data_collator=data_collator, train_dataset=train_data, eval_dataset=val_data, model=model, args=transformers.TrainingArguments( gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
# !/usr/bin/python # -*- coding: utf-8 -*- # @time : 2023/3/5 21:04 # @author : Mo # @function: macro-gpt path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) sys.path.append(path_root) os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:3072" os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES os.environ["USE_TORCH"] = USE_TORCH os.environ["OMP_NUM_THREADS"] = CPU_NUMS # export OMP_NUM_THREADS=1 os.environ["OPENBLAS_NUM_THREADS"] = CPU_NUMS # export OPENBLAS_NUM_THREADS=1 os.environ["MKL_NUM_THREADS"] = CPU_NUMS # export MKL_NUM_THREADS=1 os.environ["VECLIB_MAXIMUM_THREADS"] = CPU_NUMS # export VECLIB_MAXIMUM_THREADS=1 os.environ["NUMEXPR_NUM_THREADS"] = CPU_NUMS # export NUMEXPR_NUM_THREADS=1 def save_model_state(model, config=None, model_save_dir="./", model_name="adapter_model.bin"): """ 仅保存 有梯度 的 模型参数(推荐使用) """ if not os.path.exists(model_save_dir): os.makedirs(model_save_dir) # save config if config: config.save_pretrained(model_save_dir) # config.to_dict() # save model path_model = os.path.join(model_save_dir, model_name) # grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters() # if v.requires_grad == True} grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters()} torch.save(grad_params_dict, path_model) print_rank_0("******model_save_path is {}******".format(path_model)) def print_rank_0_named_parameters(model, use_print_rank_0_data=False): """ 打印模型训练参数/数据类型信息 """ trainable_params = 0 all_param = 0 for name, param in model.named_parameters(): if use_print_rank_0_data: print_rank_0((name, param.data.dtype, param.requires_grad, param.data)) else: print_rank_0((name, param.data.dtype, param.requires_grad)) num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel all_param += num_params if param.requires_grad: trainable_params += num_params print_rank_0(f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}") def prepare_model_for_half_training(model, output_embedding_layer_name="lm_head", use_gradient_checkpointing=True, layer_norm_names=["layer_norm"]): r""" This method wrapps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers` """ # 不要使用 model.half(), 这样会先截取精度再训练了, 最初data就要保持half for name, param in model.named_parameters(): # freeze base model's layers # cast layer norm in fp32 for stability for 8bit models if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names): param.data = param.data.to(torch.float32) elif output_embedding_layer_name in name: # lm_head也需要是tf.float32(最后一层) param.data = param.data.to(torch.float32) else: param.data = param.data.to(torch.half) if use_gradient_checkpointing: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable() return model def generate_prompt(data_point, is_logger=False): # sorry about the formatting disaster gotta move fast # text_1 = f"指令:\n{data_point.get('instruction', '')}\n问:\n{data_point.get('input', '')}\n答:\n" \ # if data_point.get('input', '') else f"指令:\n{data_point.get('instruction', '')}\n答:\n" # text_2 = f"{data_point.get('output', '')}" text_a = data_point.get("a", "") prompt_str_1 = text_a # end with gMASK, <sop> x = tokenizer.encode(prompt_str_1) if len(x) > MAX_LENGTH_QA - 2: x = x[:MAX_LENGTH_QA - 2] if not x: x = [ID_PAD, ID_EOS] if x and x[-1] != ID_EOS: x += [ID_EOS] out = {"input_ids": x, "labels": []} if is_logger: print_rank_0(prompt_str_1) print_rank_0(out) return out def data_collator(batch): def get_position_ids(seq, bos_token_id): seq_length = len(seq) position_ids = torch.arange(seq_length, dtype=torch.long).unsqueeze(0) return position_ids def get_masks(seq, special_ids=IDS_ORG): """ padding-mask """ # mask until ID_SOP attention_mask = torch.ones((1, len(seq), len(seq))) attention_mask.tril_() # ### 如果 padding-right, 也mask掉 # for idx, s in enumerate(seq): # if s in special_ids: # attention_mask[..., idx] = 1 attention_mask = (attention_mask < 0.5).bool() return attention_mask len_max_batch = [len(batch[i].get("input_ids")) + len(batch[i].get("labels")) + 1 for i in range(len(batch))] len_max_batch = min(MAX_LENGTH_QA, max(len_max_batch)) batch_attention_mask = [] batch_position_ids = [] batch_input_ids = [] batch_labels = [] for ba in batch: x, y = ba.get("input_ids"), ba.get("labels") len_padding = len_max_batch - len(x) - len(y) if tokenizer.padding_side and tokenizer.padding_side == "left": labels = [-100] * len_padding + x + y input_ids = [ID_PAD] * (len_padding) + x + y else: labels = x + y + [-100] * len_padding input_ids = x + y + [ID_PAD] * (len_padding) tensor_position_ids = get_position_ids(input_ids, bos_token_id=ID_SOP) tensor_attention_mask = get_masks(input_ids, special_ids=IDS_ORG) tensor_input_ids = torch.tensor(input_ids, dtype=torch.long) tensor_labels = torch.tensor(labels, dtype=torch.long) batch_attention_mask.append(tensor_attention_mask) batch_position_ids.append(tensor_position_ids) batch_input_ids.append(tensor_input_ids) batch_labels.append(tensor_labels) # print_rank_0(batch_attention_mask) batch_attention_mask = torch.stack(batch_attention_mask) batch_position_ids = torch.stack(batch_position_ids) batch_input_ids = torch.stack(batch_input_ids) batch_labels = torch.stack(batch_labels) input_dict = { # "full_attention_mask": copy.deepcopy(batch_attention_mask), # "attention_mask": batch_attention_mask, # "position_ids": batch_position_ids, "input_ids": batch_input_ids, "labels": batch_labels, } # print_rank_0(input_dict) return input_dict def dfs_file(path_dir): """ 递归获取某个目录下的所有文件(所有层, 包括子目录) Args: path_dir[String]:, path of dir, eg. "/home/data" Returns: data[List]: data of input, eg. ["2020_01_08.txt"] """ path_files = [] for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件 for file in files: # 遍历文件 file_path = os.path.join(root, file) # 获取文件绝对路径 path_files.append(file_path) # 将文件路径添加进列表 files = list(set(path_files)) files.sort() # the same list return files def print_rank_0(*args): """ 只打印 0 号GPU的 """ # if torch.distributed.get_rank() == 0: # 一般用0,当然,可以选任意的rank保存。 # print(*args) print(*args) def local_rank_is_0(): """ 判断是哪台机子的 """ # flag = False # if torch.distributed.get_rank() == 0: # flag = True # return flag return True # import torch.distributed as dist # dist.init_process_group(backend='nccl') # torch.distributed.init_process_group() tokenizer = LLMTokenizer.from_pretrained(PATH_TOKENIZER_PRETRAIN) # tokenizer.pad_token = tokenizer.eos_token # tokenizer.padding_side = "left" # Allow batched inference tokenizer.padding_side = "right" # Allow batched inference # ID_gMASK = 64790 # ID_BOS = 64792 # ID_EOS = 64793 # ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 2 ID_SOP = 1 ID_BOS = 1 ID_EOS = 2 ID_PAD = 0 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } # model = GPT2LMHeadModel.from_pretrained(PATH_MODEL_PRETRAIN) llm_config = LLMConfig.from_json_file(PATH_MODEL_CONFIG) model = LLMForCausalLM(llm_config) model.init_weights() model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL model.config.use_cache = USE_CACHE # model.clip_grad_norm_ = 1.0 # model = model.half().cuda() ## norm, lm_head层为fp32 # prepare_model_for_half_training(model, output_embedding_layer_name="lm_head", # use_gradient_checkpointing=True, layer_norm_names=["post_attention_layernorm", # "input_layernorm", # "norm", # ]) model = model.cuda() print_rank_0_named_parameters(model) tensorboardx_witer = SummaryWriter(logdir=MODEL_SAVE_DIR) # files = dfs_file(DATA_PATH) # files = [files for file in files if "data_merge.0" in file or "data_merge.1" in file] ### 只有一个train的情况 # data = load_dataset("json", data_files={"train": files}) data = load_dataset("json", data_files=DATA_PATH) # data = load_dataset("json", data_dir=DATA_PATH) # train_val = data["train"].train_test_split(test_size=min(VAL_SET_SIZE, # int(len(data["train"])/10000)), shuffle=True, seed=42) # VAL_SET_SIZE = max(min(VAL_SET_SIZE, int(len(data["train"])/10000)), 1) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=VAL_SET_SIZE, shuffle=True, seed=42) # train_data = train_val["train"].shuffle().map(generate_prompt) # val_data = train_val["test"].shuffle().map(generate_prompt) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=1024, shuffle=True, seed=42) # train_data = train_val["test"].shuffle().map(generate_prompt) # val_data = None generate_prompt(data["train"][0], is_logger=True) train_data = data["train"].shuffle().map(generate_prompt) val_data = None class CustomTrainer(transformers.Trainer): def compute_loss(self, model, inputs, return_outputs=False): inputs = {k: v.cuda() for k, v in inputs.items()} outputs = model(**inputs) # if contain labels, will calculate loss if local_rank_is_0: logs = {} tr_loss_scalar = self._nested_gather(outputs.loss.detach()).mean().item() logs["loss"] = round(tr_loss_scalar, 4) logs["lr"] = self.lr_scheduler.get_last_lr()[0] step = self.state.global_step for k, v in logs.items(): tensorboardx_witer.add_scalar(k, v, step) self.log(logs) if self.label_smoother is not None and "labels" in inputs: labels = inputs.pop("labels") else: labels = None # Save past state if it exists # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: " f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] # if llm_config.torch_dtype == "float16": # loss = loss.half() loss = loss.half() return (loss, outputs) if return_outputs else loss trainer = CustomTrainer( # data_collator=transformers.DataCollatorForSeq2Seq( # tokenizer, pad_to_multiple_of=8, # return_tensors="pt", padding=True # ), data_collator=data_collator, train_dataset=train_data, eval_dataset=val_data, model=model, args=transformers.TrainingArguments( gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
per_device_train_batch_size=MICRO_BATCH_SIZE,
10
2023-11-30 12:39:19+00:00
16k
owkin/fedeca
fedeca/scripts/dp_logreg.py
[ { "identifier": "TorchDPFedAvgAlgo", "path": "fedeca/algorithms/torch_dp_fed_avg_algo.py", "snippet": "class TorchDPFedAvgAlgo(TorchFedAvgAlgo):\n \"\"\"To be inherited.\n\n Wraps the necessary operation so a torch model can be trained in the Federated\n Averaging strategy using DP.\n \"\"\"\n\n def __init__(\n self,\n model: torch.nn.Module,\n criterion: torch.nn.modules.loss._Loss,\n optimizer: torch.optim.Optimizer,\n dataset: torch.utils.data.Dataset,\n num_updates: int,\n batch_size: int,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n with_batch_norm_parameters: bool = False,\n seed: Optional[int] = None,\n use_gpu: bool = True,\n dp_target_epsilon: float = None,\n dp_target_delta: float = None,\n dp_max_grad_norm: float = None,\n num_rounds: int = None,\n *args,\n **kwargs,\n ):\n \"\"\"Instantiate a TorchDPFedAvgAlgo.\n\n Parameters\n ----------\n model : torch.nn.modules.module.Module\n A torch model.\n criterion : torch.nn.modules.loss._Loss\n A torch criterion (loss).\n optimizer : torch.optim.Optimizer\n A torch optimizer linked to the model.\n dataset : torch.utils.data.Dataset\n Refer to the doc of the parent class.\n This behavior can be changed by re-writing the `_local_train` or\n `predict` methods.\n num_updates : int\n The number of updates to perform. Note that here we do not use\n NpIndexGenerators.\n batch_size : int\n The batch-size to target in expectation (Poisson sampling).\n scheduler : torch.optim.lr_scheduler._LRScheduler, Optional\n A torch scheduler that will be called at every batch. If None, no\n scheduler will be used. Defaults to None.\n with_batch_norm_parameters : bool\n Whether to include the batch norm layer parameters in the federated\n average strategy. Defaults to False.\n seed : typing.Optional[int]\n Seed set at the algo initialization on each organization.\n Defaults to None.\n use_gpu : bool\n Whether to use the GPUs if they are available. Defaults to True.\n dp_target_epsilon : float\n The target epsilon for (epsilon, delta)-differential private guarantee.\n Defaults to None.\n dp_target_delta : float\n The target delta for (epsilon, delta)-differential private guarantee.\n Defaults to None.\n dp_max_grad_norm : float\n The maximum L2 norm of per-sample gradients; used to enforce\n differential privacy. Defaults to None.\n num_rounds : int\n The number of rounds used to train the algo. Although this is very\n peculiar for a substra Algorithm to need access to this quantity,\n Opacus needs the number of rounds and updates used to compute the\n total number of training steps in order to compute a noise level\n respecting user constraints.\n \"\"\"\n super().__init__(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n dataset=dataset,\n scheduler=scheduler,\n seed=seed,\n use_gpu=use_gpu,\n index_generator=None,\n *args,\n **kwargs,\n )\n self._with_batch_norm_parameters = with_batch_norm_parameters\n self.dp_target_delta = dp_target_delta\n self.dp_target_epsilon = dp_target_epsilon\n self.dp_max_grad_norm = dp_max_grad_norm\n self.num_rounds = num_rounds\n\n self._apply_dp = (\n (self.dp_target_epsilon is not None)\n and (self.dp_max_grad_norm is not None)\n and (self.dp_target_delta is not None)\n )\n\n if not (self._apply_dp):\n raise ValueError(\n \"Do not use this Algo without DP you risk running into batch\"\n \" sampling issues, instead use TorchFedAvgAlgo with NpIndexGenerator\"\n )\n if self.num_rounds is None:\n raise ValueError(\n \"if you want to perform DP-training you need to prespecify the\"\n \" number of rounds in advance.\"\n )\n self.num_updates = num_updates\n self.batch_size = batch_size\n\n self.num_total_steps = self.num_updates * self.num_rounds\n\n def _local_train(\n self,\n train_dataset: torch.utils.data.Dataset,\n ):\n \"\"\"Contain the local training loop.\n\n Train the model on ``num_updates`` minibatches for the torch dataset.\n\n Parameters\n ----------\n train_dataset : torch.utils.data.Dataset\n train_dataset build from the x and y returned by the opener.\n \"\"\"\n # Create torch dataloader it is important that it has a self.batch_size\n # batch size as len(train_data_loader) will be called by opacus\n train_data_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=self.batch_size\n )\n if not hasattr(self, \"size_train_dataset\"):\n self.size_train_dataset = len(train_dataset)\n\n if not hasattr(\n self, \"accountant\"\n ): # if the attribute is not already there, need to instantiate the Engine\n # Important to use RDP to be able to use high epsilons\n # see https://github.com/pytorch/opacus/issues/604\n privacy_engine = PrivacyEngine(accountant=\"rdp\")\n\n if not hasattr(self, \"sample_rate\"):\n self.sample_rate = self.batch_size / len(train_dataset)\n else:\n assert np.allclose(\n self.sample_rate, self.batch_size / self.size_train_dataset\n ), \"The length of the dataset has changed\"\n\n # We will need it later\n self.noise_multiplier = get_noise_multiplier(\n target_epsilon=self.dp_target_epsilon,\n target_delta=self.dp_target_delta,\n sample_rate=self.sample_rate,\n steps=self.num_total_steps,\n accountant=privacy_engine.accountant.mechanism(),\n )\n\n (\n self._model,\n self._optimizer,\n train_data_loader,\n ) = privacy_engine.make_private(\n module=self._model,\n optimizer=self._optimizer,\n data_loader=train_data_loader,\n noise_multiplier=self.noise_multiplier,\n max_grad_norm=self.dp_max_grad_norm,\n poisson_sampling=True,\n )\n self.accountant = privacy_engine.accountant\n\n else:\n train_data_loader = DPDataLoader.from_data_loader(train_data_loader)\n\n for x_batch, y_batch in train_data_loader:\n x_batch = x_batch.to(self._device)\n y_batch = y_batch.to(self._device)\n # As batch-size is variable sometimes the batch is empty\n if x_batch.nelement() == 0:\n continue\n # Forward pass\n y_pred = self._model(x_batch)\n\n # Compute Loss\n loss = self._criterion(y_pred, y_batch)\n\n self._optimizer.zero_grad()\n loss.backward()\n\n self._optimizer.step()\n\n if self._scheduler is not None:\n self._scheduler.step()\n\n @remote_data\n def train(\n self,\n datasamples: Any,\n shared_state: Optional[FedAvgAveragedState] = None,\n ) -> FedAvgSharedState:\n \"\"\"Train method of the DP federated averaging strategy.\n\n This method is essentially the same as the regular federated average\n algorithm but without an index generator.\n\n Parameters\n ----------\n datasamples : typing.Any\n Input data returned by the ``get_data`` method from the opener.\n shared_state : FedAvgAveragedState, Optional\n Dictionary containing torch parameters that will be set to the model.\n Defaults to None.\n\n Returns\n -------\n FedAvgSharedState\n Weight update (delta between fine-tuned weights and previous weights).\n \"\"\"\n # Note that we don't simply inherit from the method from FedAvgTorchAlgo\n # because it assumes the existence of the NpIndexGenerator\n\n # Create torch dataset\n train_dataset = self._dataset(datasamples, is_inference=False)\n\n if shared_state is not None:\n # The shared states is the average of the model parameter updates\n # for all organizations\n # Hence we need to add it to the previous local state parameters\n parameter_updates = [\n torch.from_numpy(x).to(self._device)\n for x in shared_state.avg_parameters_update\n ]\n weight_manager.increment_parameters(\n model=self._model,\n updates=parameter_updates,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n )\n\n old_parameters = weight_manager.get_parameters(\n model=self._model,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n )\n\n # Train mode for torch model\n self._model.train()\n\n # Train the model\n self._local_train(train_dataset)\n\n self._model.eval()\n\n parameters_update = weight_manager.subtract_parameters(\n parameters=weight_manager.get_parameters(\n model=self._model,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n ),\n parameters_to_subtract=old_parameters,\n )\n\n # Re set to the previous state\n weight_manager.set_parameters(\n model=self._model,\n parameters=old_parameters,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n )\n\n return FedAvgSharedState(\n n_samples=len(train_dataset),\n parameters_update=[p.cpu().detach().numpy() for p in parameters_update],\n )\n\n def _local_predict(\n self,\n predict_dataset: torch.utils.data.Dataset,\n predictions_path,\n return_predictions=False,\n ):\n \"\"\"Predict.\n\n Parameters\n ----------\n predict_dataset : torch.utils.data.Dataset\n Predict dataset built from the `x` returned by the opener.\n\n Important\n ---------\n The responsibility is on the user to save the computed predictions.\n Substrafl provides the `TorchAlgo._save_predictions` method for this\n purpose.\n The user can load those predictions from a metric file with the command:\n `y_pred = np.load(inputs['predictions'])`.\n\n Raises\n ------\n BatchSizeNotFoundError\n No default batch size has been found to perform local prediction.\n Please override the predict function of your algorithm.\n \"\"\"\n # Note that we don't simply inherit from the method from FedAvgTorchAlgo\n # because it assumes the existence of the NpIndexGenerator\n\n predict_loader = torch.utils.data.DataLoader(\n predict_dataset, batch_size=self.batch_size, shuffle=False, drop_last=False\n )\n\n self._model.eval()\n\n predictions = []\n with torch.no_grad():\n for x in predict_loader:\n x = x.to(self._device)\n predictions.append(self._model(x))\n predictions = torch.cat(predictions, 0)\n predictions = predictions.cpu().detach()\n if return_predictions:\n return predictions\n else:\n self._save_predictions(predictions, predictions_path)\n\n def _get_state_to_save(self) -> dict:\n \"\"\"Get all attibutes to save and pass on to next state.\n\n Returns\n -------\n dict\n The dict with all quantities to persist.\n \"\"\"\n checkpoint = super()._get_state_to_save()\n\n list_attrs_to_save = [\n \"dp_max_grad_norm\",\n \"dp_target_epsilon\",\n \"dp_target_delta\",\n \"num_rounds\",\n \"num_updates\",\n \"num_total_steps\",\n \"batch_size\",\n ]\n list_of_attrs_after_train = [\n \"noise_multiplier\",\n \"sample_rate\",\n \"size_train_dataset\",\n ]\n # For some reason this method is called before ever calling train so\n # at first it doesn't have an accountant\n if hasattr(self, \"accountant\"):\n checkpoint[\"privacy_accountant_state_dict\"] = self.accountant.state_dict()\n list_attrs_to_save += list_of_attrs_after_train\n\n for attr in list_attrs_to_save:\n checkpoint[attr] = getattr(self, attr)\n\n return checkpoint\n\n def _update_from_checkpoint(self, path) -> dict:\n \"\"\"Set self attributes using saved values.\n\n Parameters\n ----------\n path : Path\n Path towards the checkpoint to use.\n\n Returns\n -------\n dict\n The emptied checkpoint.\n \"\"\"\n # One cannot simply call checkpoint = super()._update_from_checkpoint(path)\n # because we have to change the model class if it should be changed\n # (and optimizer) aka if we find a specific key in the checkpoint\n assert (\n path.is_file()\n ), f'Cannot load the model - does not exist {list(path.parent.glob(\"*\"))}'\n checkpoint = torch.load(path, map_location=self._device)\n # For some reason substrafl save and load client before calling train\n if \"privacy_accountant_state_dict\" in checkpoint:\n self.accountant = RDPAccountant()\n self.accountant.load_state_dict(\n checkpoint.pop(\"privacy_accountant_state_dict\")\n )\n self.sample_rate = checkpoint.pop(\"sample_rate\")\n self.size_train_dataset = checkpoint.pop(\"size_train_dataset\")\n self.noise_multiplier = checkpoint.pop(\"noise_multiplier\")\n # The init is messing up the fact that the model has become\n # a grad sampler and the optimizer a DPOptimizer, their classes\n # do not persist between serializations\n # Those lines will allow to load corresponding state_dicts wo errors\n if not isinstance(self._model, GradSampleModule):\n self._model = wrap_model(self._model, grad_sample_mode=\"hooks\")\n\n if not isinstance(self._optimizer, DPOptimizer):\n self._optimizer = DPOptimizer(\n self._optimizer,\n noise_multiplier=self.noise_multiplier,\n max_grad_norm=self.dp_max_grad_norm,\n expected_batch_size=self.batch_size,\n )\n\n self._optimizer.attach_step_hook(\n self.accountant.get_optimizer_hook_fn(self.sample_rate)\n )\n\n self._model.load_state_dict(checkpoint.pop(\"model_state_dict\"))\n\n if self._optimizer is not None:\n self._optimizer.load_state_dict(checkpoint.pop(\"optimizer_state_dict\"))\n\n if self._scheduler is not None:\n self._scheduler.load_state_dict(checkpoint.pop(\"scheduler_state_dict\"))\n\n self._index_generator = checkpoint.pop(\"index_generator\")\n\n if self._device == torch.device(\"cpu\"):\n torch.set_rng_state(checkpoint.pop(\"rng_state\").to(self._device))\n else:\n torch.cuda.set_rng_state(checkpoint.pop(\"rng_state\").to(\"cpu\"))\n\n attr_names = [\n \"dp_max_grad_norm\",\n \"dp_target_epsilon\",\n \"dp_target_delta\",\n \"num_rounds\",\n \"num_updates\",\n \"num_total_steps\",\n \"batch_size\",\n ]\n\n for attr in attr_names:\n setattr(self, attr, checkpoint.pop(attr))\n\n return checkpoint" }, { "identifier": "LogisticRegressionTorch", "path": "fedeca/fedeca_core.py", "snippet": "class LogisticRegressionTorch(nn.Module):\n \"\"\"Pytorch logistic regression class.\"\"\"\n\n def __init__(self, ndim, torch_dtype=torch.float64):\n \"\"\"Initialize Logistic Regression model in PyTorch.\n\n Parameters\n ----------\n ndim : int\n Number of input dimensions.\n torch_dtype : torch.dtype, optional\n Data type for PyTorch tensors, by default torch.float64.\n \"\"\"\n self.torch_dtype = torch_dtype\n self.ndim = ndim\n super(LogisticRegressionTorch, self).__init__()\n self.fc1 = nn.Linear(self.ndim, 1).to(self.torch_dtype)\n # Zero-init as in sklearn\n self.fc1.weight.data.fill_(0.0)\n self.fc1.bias.data.fill_(0.0)\n\n def forward(self, x, eval=False):\n \"\"\"Perform a forward pass through the Logistic Regression model.\n\n Parameters\n ----------\n x : torch.Tensor\n Input tensor of shape (batch_size, ndim).\n eval : bool, optional\n Set to True during evaluation, by default False.\n\n Returns\n -------\n torch.Tensor\n Predicted probabilities after passing through sigmoid activation.\n \"\"\"\n x = self.fc1(x)\n return torch.sigmoid(x)" }, { "identifier": "Experiment", "path": "fedeca/utils/substrafl_utils.py", "snippet": "class Experiment:\n \"\"\"Experiment class.\"\"\"\n\n def __init__(\n self,\n strategies: list,\n num_rounds_list: list[int],\n ds_client=None,\n train_data_nodes: Union[list[TrainDataNode], None] = None,\n metrics_dicts_list: Union[list[dict], None] = None,\n test_data_nodes: Union[list[TestDataNode], None] = None,\n aggregation_node: Union[AggregationNode, None] = None,\n evaluation_frequency: Union[int, None] = None,\n experiment_folder: str = \"./experiments\",\n clean_models: bool = False,\n fedeca_path: Union[str, None] = None,\n algo_dependencies: Union[list, None] = None,\n ):\n \"\"\"Initialize an experiment.\n\n Parameters\n ----------\n ds_client : fl.client.Client\n Federated Learning client object used to register computations.\n strategies : list\n List of strategies to run.\n train_data_nodes : Union[list[TrainDataNode], None]\n List of data nodes for training. If None cannot use the run method\n directly.\n num_rounds_list : list\n List of number of rounds for each strategy.\n metrics_dicts_list : list[dict], optional\n Dict of metric functions, by default None.\n test_data_nodes : list, optional\n List of data nodes for testing, by default None.\n aggregation_node : fl.data.DataNode, optional\n Aggregation node, by default None.\n evaluation_frequency : int, optional\n Frequency of evaluation, by default 1.\n experiment_folder : str, optional\n Folder path for experiment outputs, by default \"./experiments\".\n clean_models : bool, optional\n Whether to clean models after training, by default False.\n fedeca_path : str, optional\n Path to the FedECA package, by default None.\n algo_dependencies : list, optional\n List of algorithm dependencies, by default [].\n \"\"\"\n if metrics_dicts_list is not None:\n assert len(strategies) == len(metrics_dicts_list)\n assert len(num_rounds_list) == len(strategies)\n self.strategies = strategies\n self.metrics_dicts_list = metrics_dicts_list\n self.num_rounds_list = num_rounds_list\n self.ds_client = ds_client\n self.train_data_nodes = train_data_nodes\n self.test_data_nodes = test_data_nodes\n self.simu_mode = False\n\n if self.test_data_nodes is None:\n assert metrics_dicts_list is not None\n if self.train_data_nodes is not None:\n self.test_data_nodes = [\n TestDataNode(\n t.organization_id, t.data_manager_key, t.data_sample_keys, []\n )\n for t in self.train_data_nodes\n ]\n else:\n if metrics_dicts_list and not all(\n [len(t.metric_functions) == 0 for t in self.test_data_nodes]\n ):\n print(\n \"\"\"WARNING: you are passing metrics to test data nodes with existing\n metric_functions this will overwrite them\"\"\"\n )\n print(\n [\n (f\"Client {i}\", t.metric_functions)\n for i, t in enumerate(self.test_data_nodes)\n ]\n )\n\n self.evaluation_frequency = evaluation_frequency\n\n self.aggregation_node = aggregation_node\n self.experiment_folder = experiment_folder\n self.clean_models = clean_models\n\n # Packaging the right dependencies\n if fedeca_path is None:\n fedeca_path = os.getcwd()\n repo_folder = Path(\n git.Repo(fedeca_path, search_parent_directories=True).working_dir\n ).resolve()\n wheel_folder = repo_folder / \"temp\"\n os.makedirs(wheel_folder, exist_ok=True)\n for stale_wheel in wheel_folder.glob(\"fedeca*.whl\"):\n stale_wheel.unlink()\n process = subprocess.Popen(\n f\"python -m build --wheel --outdir {wheel_folder} {repo_folder}\",\n shell=True,\n stdout=subprocess.PIPE,\n )\n process.wait()\n assert process.returncode == 0, \"Failed to build the wheel\"\n wheel_path = next(wheel_folder.glob(\"fedeca*.whl\"))\n if algo_dependencies is None:\n algo_dependencies = []\n\n self.algo_dependencies = Dependency(\n pypi_dependencies=[\"numpy==1.23.1\", \"torch==1.11.0\", \"lifelines\", \"pandas\"]\n + algo_dependencies,\n local_dependencies=[wheel_path],\n )\n\n self.experiment_path = str(Path(self.experiment_folder))\n os.makedirs(self.experiment_path, exist_ok=True)\n self.run_strategies = 0\n self.tasks = {}\n self.compute_plan_keys = []\n self.performances_strategies = []\n\n def fit(\n self,\n data: pd.DataFrame,\n nb_clients: Union[int, None] = None,\n split_method: Union[Callable, str] = \"uniform\",\n split_method_kwargs: Union[Callable, None] = None,\n data_path: Union[str, None] = None,\n backend_type: str = \"subprocess\",\n urls: Union[list[str], None] = None,\n tokens: Union[list[str], None] = None,\n ):\n \"\"\"Fit strategies on global data split across clients.\n\n For test if provided we use test_data_nodes from int or the\n train_data_nodes in the latter train=test.\n\n Parameters\n ----------\n data : pd.DataFrame\n The global data to be split has to be a dataframe as we only support\n one opener type.\n nb_clients : Union[int, None], optional\n The number of clients used to split data across, by default None\n split_method : Union[Callable, None], optional\n How to split data across the nb_clients, by default None.\n split_method_kwargs : Union[Callable, None], optional\n Argument of the function used to split data, by default None.\n data_path : Union[str, None]\n Where to store the data on disk when backend is not remote.\n backend_type: str\n The backend to use for substra. Can be either:\n [\"subprocess\", \"docker\", \"remote\"]. Defaults to \"subprocess\".\n urls: Union[list[str], None]\n Urls corresponding to clients API if using remote backend_type.\n Defaults to None.\n tokens: Union[list[str], None]\n Tokens necessary to authenticate each client API if backend_type\n is remote. Defauts to None.\n \"\"\"\n # Reset experiment so that it can fit on a new dataset\n self.reset_experiment()\n\n if data_path is not None:\n self.experiment_path = data_path\n\n # We first have to create the TrainDataNodes objects for this we split\n # the data into nb_clients using split_method\n (\n self.clients,\n self.train_data_nodes,\n test_data_nodes,\n _,\n _,\n ) = split_dataframe_across_clients(\n df=data,\n n_clients=nb_clients,\n split_method=split_method,\n split_method_kwargs=split_method_kwargs,\n backend_type=backend_type,\n data_path=data_path,\n urls=urls,\n tokens=tokens,\n )\n if self.test_data_nodes is None:\n self.test_data_nodes = test_data_nodes\n self.run()\n\n def run(self, num_strategies_to_run=None):\n \"\"\"Run the experiment.\n\n Parameters\n ----------\n num_strategies_to_run : int, optional\n Number of strategies to run, by default None.\n \"\"\"\n assert (\n self.train_data_nodes is not None\n ), \"you have to define train_data_nodes first before running\"\n assert (\n self.test_data_nodes is not None\n ), \"you have to define test_data_nodes first before running\"\n if num_strategies_to_run is None:\n num_strategies_to_run = len(self.strategies) - self.run_strategies\n assert (self.run_strategies + num_strategies_to_run) <= len(\n self.strategies\n ), f\"\"\"You cannot run {num_strategies_to_run} strategies more there is only\n {len(self.strategies)} strategies and you have already run {self.run_strategies}\n of them.\"\"\"\n # If no client is given we take the first one\n if self.ds_client is None:\n self.ds_client = self.clients[list(self.clients.keys())[0]]\n\n # If no AggregationNode is given we take the first one\n if self.aggregation_node is None:\n print(\"Using the first client as a server.\")\n kwargs_agg_node = {\n \"organization_id\": self.train_data_nodes[0].organization_id\n }\n self.aggregation_node = AggregationNode(**kwargs_agg_node)\n\n if not hasattr(self, \"experiment_kwargs\"):\n self.experiment_kwargs = {\n \"experiment_folder\": self.experiment_path,\n \"clean_models\": self.clean_models,\n \"dependencies\": self.algo_dependencies,\n \"client\": self.ds_client,\n }\n if hasattr(self.ds_client, \"is_simu\"):\n self.simu_mode = self.ds_client.is_simu\n\n # inelegant but cannot slice on a zip object\n strategies = self.strategies[\n self.run_strategies : (self.run_strategies + num_strategies_to_run)\n ] # noqa: E203\n metrics_dicts_list = self.metrics_dicts_list[\n self.run_strategies : (\n self.run_strategies + num_strategies_to_run\n ) # noqa: E203\n ]\n num_rounds_list = self.num_rounds_list[\n self.run_strategies : (\n self.run_strategies + num_strategies_to_run\n ) # noqa: E203\n ]\n for i, (strategy, metrics_dict, num_rounds) in enumerate(\n zip(strategies, metrics_dicts_list, num_rounds_list)\n ):\n for t in self.test_data_nodes:\n t.metric_functions = metrics_dict\n\n current_kwargs = self.experiment_kwargs\n current_kwargs[\"strategy\"] = strategy\n current_kwargs[\"num_rounds\"] = num_rounds\n current_kwargs[\"train_data_nodes\"] = self.train_data_nodes\n current_kwargs[\"aggregation_node\"] = self.aggregation_node\n # Evaluation frequency depend on current strategy\n # If None evaluate once at the end of the strategy\n if self.evaluation_frequency is None:\n evaluation_strategy = EvaluationStrategy(\n test_data_nodes=self.test_data_nodes,\n eval_rounds=[num_rounds_list[i]],\n )\n else:\n evaluation_strategy = EvaluationStrategy(\n test_data_nodes=self.test_data_nodes,\n eval_frequency=self.evaluation_frequency[i],\n )\n current_kwargs[\"evaluation_strategy\"] = evaluation_strategy\n current_kwargs[\"simu_mode\"] = self.simu_mode\n current_kwargs[\"name\"] = f\"Fedeca: {strategy.__class__.__name__}\"\n xp_output = execute_experiment(**current_kwargs)\n\n if self.simu_mode:\n scores = [t.scores for t in self.test_data_nodes]\n robust_cox_variance = False\n for idx, s in enumerate(scores):\n print(f\"====Client {idx}====\")\n try:\n print(s[-1])\n except IndexError:\n robust_cox_variance = True\n print(\"No metric\")\n # TODO Check that it is well formatted it's probably not\n self.performances_strategies.append(pd.DataFrame(xp_output))\n # Hacky hacky hack\n if robust_cox_variance:\n xp_output = self.train_data_nodes\n else:\n xp_output = self.train_data_nodes[0]\n\n self.compute_plan_keys.append(xp_output)\n\n if not (self.simu_mode):\n self.tasks[self.compute_plan_keys[i].key] = {}\n tasks = self.ds_client.list_task(\n filters={\"compute_plan_key\": [self.compute_plan_keys[i].key]}\n )[::-1]\n tasks_names = [t.function.name for t in tasks]\n self.tasks[self.compute_plan_keys[i].key][\"tasks\"] = tasks\n self.tasks[self.compute_plan_keys[i].key][\"tasks_names\"] = tasks_names\n self.tasks[self.compute_plan_keys[i].key][\"num_tasks\"] = len(tasks)\n\n self.run_strategies += 1\n\n def get_outmodel(self, task_name, strategy_idx=0, idx_task=0):\n \"\"\"Get the output model.\n\n Parameters\n ----------\n task_name : str\n Name of the task.\n strategy_idx : int, optional\n Index of the strategy, by default 0.\n idx_task : int, optional\n Index of the task, by default 0.\n \"\"\"\n assert not (self.simu_mode), \"This function cannot be used in simu mode\"\n\n # We get all matches and order them chronologically\n tasks_dict_from_strategy = self.tasks[self.compute_plan_keys[strategy_idx].key]\n return get_outmodel_function(\n task_name, idx_task=idx_task, tasks_dict=tasks_dict_from_strategy\n )\n\n def reset_experiment(self):\n \"\"\"Reset the state of the object.\n\n So it can be fit with a new dataset.\n \"\"\"\n self.run_strategies = 0\n self.tasks = {}\n self.compute_plan_keys = []\n self.performances_strategies = []\n self.train_data_nodes = None\n self.test_data_nodes = None" }, { "identifier": "make_substrafl_torch_dataset_class", "path": "fedeca/utils/substrafl_utils.py", "snippet": "def make_substrafl_torch_dataset_class(\n target_cols,\n event_col,\n duration_col,\n dtype=\"float64\",\n return_torch_tensors=False,\n):\n \"\"\"Create a custom SubstraflTorchDataset class for survival analysis.\n\n Parameters\n ----------\n target_cols : list\n List of target columns.\n event_col : str\n Name of the event column.\n duration_col : str\n Name of the duration column.\n dtype : str, optional\n Data type, by default \"float64\".\n return_torch_tensors : bool, optional\n Returns torch.Tensor. Defaults to False.\n\n Returns\n -------\n type\n Custom SubstraflTorchDataset class.\n \"\"\"\n assert len(target_cols) == 1 or all(\n [t in [event_col, duration_col] for t in target_cols]\n )\n if len(target_cols) == 1:\n print(f\"Making a dataset class to fit a model to predict {target_cols[0]}\")\n columns_to_drop = [event_col, duration_col]\n elif len(target_cols) == 2:\n assert set(target_cols) == set(\n [event_col, duration_col]\n ), \"Your targets should be event_col and duration_col\"\n # DO NOT MODIFY THIS LINE !!!!!\n target_cols = [duration_col, event_col]\n columns_to_drop = []\n\n class MySubstraflTorchDataset(SubstraflTorchDataset):\n def __init__(self, datasamples, is_inference):\n super().__init__(\n datasamples=datasamples,\n is_inference=is_inference,\n target_columns=target_cols,\n columns_to_drop=columns_to_drop,\n dtype=dtype,\n return_torch_tensors=return_torch_tensors,\n )\n\n return MySubstraflTorchDataset" }, { "identifier": "make_accuracy_function", "path": "fedeca/utils/substrafl_utils.py", "snippet": "def make_accuracy_function(treatment_col: str):\n \"\"\"Build accuracy function.\n\n Parameters\n ----------\n treatment_col: str,\n Column name for the treatment allocation.\n \"\"\"\n\n def accuracy(datasamples, predictions_path):\n y_true = datasamples[treatment_col]\n if isinstance(predictions_path, str) or isinstance(predictions_path, Path):\n y_pred = np.load(predictions_path)\n else:\n y_pred = predictions_path\n return accuracy_score(y_true, y_pred > 0.5)\n\n return accuracy" }, { "identifier": "CoxData", "path": "fedeca/utils/survival_utils.py", "snippet": "class CoxData:\n \"\"\"Simulate Cox data.\n\n This class simulates survival data following Cox model assumptions.\n \"\"\"\n\n def __init__(\n self,\n n_samples: int = 1000,\n ndim: int = 10,\n features_type: Literal[\n \"cov_toeplitz\",\n \"cov_uniform\",\n \"indep_gauss\",\n ] = \"cov_toeplitz\",\n cate: float | Literal[\"random\", \"linear\"] = 1.0,\n propensity: Literal[\"constant\", \"linear\"] = \"constant\",\n prop_treated: float = 0.5,\n overlap: float = 0.0,\n cov_corr: float = 0.5,\n scale_t: float = 1.0,\n shape_t: float = 1.0,\n censoring_factor: float = 0.5,\n percent_ties: Optional[float] = None,\n random_censoring: bool = False,\n seed: _SeedType = None,\n standardize_features: bool = True,\n dtype: Literal[\"float32\", \"float64\"] = \"float64\",\n ):\n r\"\"\"Cox Data generator class.\n\n This class generates data according to a Cox proportional hazards model\n in continuous time as follows:\n .. math::\n S(t|x) = P(T > t | X=x)\n \\\\lambda(t|x) = \\\\frac{d \\\\log S(t|x)}{dt}\n \\\\lambda(t|x) = \\\\lambda_0(t)e^{\\\\beta^T x}\n \\\\Lambda_0(t|x) = \\\\int_0^t \\\\lambda_0(u)du = (\\\\frac{t}{s})^k\n X \\\\sim \\\\mathcal{N}(0, C)\n \\\\beta \\\\sim \\\\mathcal{N}(0, I)\n\n Parameters\n ----------\n n_samples: int, optional\n Number of samples to generate. Defaults to 1000\n ndim: int, optional\n Number of features, defaults to 10.\n features_type: `{\"cov_toeplitz\", \"cov_uniform\", \"indep_gauss\"}`, optional\n cate: {float, `{\"random\", \"linear\"}`, Callable}\n The way to assign treatment effect (hazard ratio) to samples.\n * \"float\": Constant hazard ratio for all samples.\n * \"random\": Hazard ratio follows log-normal distribution.\n * \"linear\": Hazard ratio depends on a linear combination of\n features with random coefficients.\n Defaults to 1.0 (no treatment effect).\n propensity: {`{\"constant\", \"linear\"}`, Callable}\n The way to assign propensity scores (probabilities of being treated)\n to samples.\n * \"linear\": Propensity scores depend on a linear combination of\n features with random coefficients.\n * \"constant\": All propensity scores take the value of the constant\n defined by the parameter `prop_treated`.\n Defaults to \"constant\".\n cov_corr: float, optional\n The correlation of the covariance matrix.\n scale_t: float, optional\n Scale parameter `s` in the equations above. Defaults to `1.0`.\n shape_t: float, optional\n Shape parameter `k` in the equations above. Defaults to `1.0`.\n censoring_factor: float, optional\n Parameter used to determine the probability of being censored\n (with respect to the median). Defaults to `0.5`.\n percent_ties: float, optional\n Parameter that control the percentage of samples who have the same outcome.\n Defaults to None.\n random_censoring: bool, optional\n Whether to censor completely independently of the rest or not.\n When true, censors samples with probability censoring_factor.\n When false, samples are censored if the drawn event times\n (drawn from the Cox model) is smaller than an independent\n exponential variable with scale factor\n `censoring_factor * mean_time`, where `mean_time`\n is the empirical mean of drawn event times.\n Defaults to False.\n seed: {None, int, Sequence[int], SeedSequence, BitGenerator, Generator},\n optional\n The seed for reproducibility. Defaults to None.\n standardize_features: bool, optional\n Whether to standardize features or not. Defaults to True.\n dtype : `{\"float64\", \"float32\"}`, default=\"float64\"\n Type of the arrays used.\n \"\"\"\n self.n_samples = n_samples\n self.ndim = ndim\n self.features_type: Final = features_type\n self.rng = np.random.default_rng(seed)\n self.prop_treated = prop_treated\n self.overlap = overlap\n self.cate = cate\n self.propensity = propensity\n self.cov_corr = cov_corr\n self.scale_t = scale_t\n self.shape_t = shape_t\n self.censoring_factor = censoring_factor\n self.random_censoring = random_censoring\n self.standardize_features = standardize_features\n self.dtype: Final = dtype\n self.coeffs = None\n self.percent_ties = percent_ties\n self.average_treatment_effect_ = None\n self.probability_treated = None\n\n def standardize_data(self, features: np.ndarray):\n \"\"\"Standardize data. Make data reduced centered.\n\n Standardize the data by substracting the mean of each columns\n and dividing by the standard deviation.\n\n Parameters\n ----------\n features : np.ndarray\n Features to standardize.\n\n Returns\n -------\n np.ndarray\n Normalized features.\n \"\"\"\n features -= features.mean(axis=0)\n features /= features.std(axis=0)\n return features\n\n def generate_data(\n self,\n n_samples: Optional[int] = None,\n seed: _SeedType = None,\n use_cate: bool = True,\n ):\n \"\"\"Generate final survival data.\n\n Use the collection of methods of the class to\n generate data following Cox assumptions.\n\n Returns\n -------\n tuple\n A tuple of np.ndarrays.\n\n Raises\n ------\n ValueError\n If `propensity` is neither \"constant\" nor \"linear\".\n ValueError\n If `cate` is neither \"linear\", \"random\" nor a constant type int or float.\n \"\"\"\n if n_samples is None:\n n_samples = self.n_samples\n if seed is None:\n seed = self.rng\n rng = np.random.default_rng(seed)\n\n if self.features_type == \"cov_uniform\":\n X = features_normal_cov_uniform(\n n_samples, self.ndim, dtype=self.dtype, seed=rng\n )\n elif self.features_type == \"indep_gauss\":\n X = rng.standard_normal(size=(n_samples, self.ndim)).astype(self.dtype)\n else:\n X = features_normal_cov_toeplitz(\n n_samples, self.ndim, self.cov_corr, dtype=self.dtype, seed=rng\n )\n if self.standardize_features:\n X = self.standardize_data(X)\n\n if self.propensity == \"constant\":\n treat_alloc = random_treatment_allocation(\n n_samples, self.prop_treated, seed=rng\n )\n propensity_scores = np.repeat(self.prop_treated, n_samples)\n\n elif self.propensity == \"linear\":\n func_propensity = linear_propensity(\n ndim=self.ndim,\n overlap=self.overlap,\n prop_treated=self.prop_treated,\n seed=rng,\n )\n propensity_scores = np.apply_along_axis(func_propensity, -1, X)\n treat_alloc = rng.binomial(1, propensity_scores)\n else:\n raise ValueError(\"propensity must be either `constant` or `linear`\")\n\n self.coeffs = rng.normal(size=(self.ndim,)).astype(self.dtype)\n u = X.dot(self.coeffs)\n if use_cate:\n if self.cate == \"linear\":\n func_cate = linear_cate(ndim=self.ndim, seed=rng)\n elif self.cate == \"random\":\n func_cate = random_cate(seed=rng)\n elif isinstance(self.cate, (int, float)):\n func_cate = constant_cate(self.cate)\n else:\n raise ValueError(\n \"\"\"cate must be either `linear`, `random` or a constant type\n int or float\"\"\"\n )\n\n cate_vector = np.apply_along_axis(func_cate, -1, X)\n self.average_treatment_effect_ = np.mean(cate_vector[treat_alloc == 1])\n self.probability_treated = cate_vector\n u += treat_alloc * np.log(cate_vector)\n # Simulation of true times\n time_hazard_baseline = -np.log(\n rng.uniform(0, 1.0, size=n_samples).astype(self.dtype)\n )\n time_cox_unscaled = time_hazard_baseline * np.exp(-u)\n times = self.scale_t * time_cox_unscaled ** (1.0 / self.shape_t)\n\n # induce samples with same times\n if self.percent_ties is not None:\n nb_ties_target = int(self.percent_ties * n_samples)\n if nb_ties_target >= 2:\n # sklearn not supporting generator yet, pass int to random_state\n # ref: https://github.com/scikit-learn/scikit-learn/issues/16988\n seed_seq = rng.bit_generator._seed_seq.spawn(1)[0] # type: ignore\n random_state = seed_seq.generate_state(1)[0]\n original_times = copy.deepcopy(times)\n # We progressively reduce the number of bins until there are\n # only 2 bins starting with npoints - 1 bins\n reached = False\n for nbins in range(n_samples - 1, 1, -1):\n discretizer = KBinsDiscretizer(\n n_bins=nbins,\n encode=\"ordinal\",\n strategy=\"quantile\",\n random_state=random_state,\n )\n times = discretizer.fit_transform(original_times.reshape((-1, 1)))\n nb_ties_reached = n_samples - len(np.unique(times))\n if (nb_ties_reached - nb_ties_target) >= 0:\n reached = True\n break\n if not reached:\n raise ValueError(\"This should not happen, lower percent_ties\")\n times = times.reshape((-1))\n\n else:\n raise ValueError(\"Choose a larger number of ties\")\n\n avg_time = times.mean()\n\n # Simulation of the censoring times. times is returned in absolute value\n if self.random_censoring:\n censoring = rng.uniform(size=n_samples) < self.censoring_factor\n times[censoring] = [rng.uniform(0, t) for t in times[censoring].tolist()]\n censoring = censoring.astype(\"uint8\")\n else:\n c_sampled = rng.exponential(\n scale=self.censoring_factor * avg_time, size=n_samples\n ).astype(self.dtype)\n\n censoring = (times > c_sampled).astype(\"uint8\")\n times[censoring] = np.minimum(times, c_sampled)\n\n return X, times, censoring, treat_alloc, propensity_scores\n\n def generate_dataframe(\n self,\n n_samples: Optional[int] = None,\n prefix: str = \"X_\",\n duration_col: str = \"time\",\n event_col: str = \"event\",\n treated_col: str = \"treatment\",\n ps_col: str = \"propensity_scores\",\n seed: _SeedType = None,\n ):\n \"\"\"Generate dataframe.\"\"\"\n (\n covariates,\n times,\n censoring,\n treatments,\n propensity_scores,\n ) = self.generate_data(n_samples, seed=seed)\n data = pd.DataFrame(covariates).add_prefix(prefix)\n data[duration_col] = times\n data[event_col] = 1 - censoring\n data[treated_col] = treatments\n data[ps_col] = propensity_scores\n return data" }, { "identifier": "make_categorical", "path": "fedeca/utils/survival_utils.py", "snippet": "def make_categorical(X, up_to: int = 25, seed: _SeedType = None):\n \"\"\"Convert continuous features in a dataset to categorical features.\n\n This function takes a dataset matrix `X` and converts its first `up_to` columns\n (features) into categorical features using the KBinsDiscretizer method.\n It performs min-max scaling on each feature before discretization.\n\n Parameters\n ----------\n X : np.ndarray\n Input dataset matrix of shape (n_samples, n_features).\n up_to : int, optional\n Number of columns to convert to categorical features, by default 25.\n seed : int or None, optional\n Seed for the random number generator, by default None.\n\n Returns\n -------\n np.ndarray, np.ndarray\n Two arrays: `Xleft` containing the modified categorical features\n and `Xright` containing the remaining original features.\n \"\"\"\n rng = np.random.default_rng(seed)\n Xleft = X[:, :up_to]\n Xright = X[:, up_to:]\n mm_normalizer = MinMaxScaler()\n nbins_vector = rng.integers(2, 10, size=up_to)\n for j, nbins in enumerate(nbins_vector):\n # sklearn not supporting generator yet, pass int to random_state\n # ref: https://github.com/scikit-learn/scikit-learn/issues/16988\n seed_seq = rng.bit_generator._seed_seq.spawn(1)[0] # type: ignore\n random_state = seed_seq.generate_state(1)[0]\n discretizer = KBinsDiscretizer(\n n_bins=nbins, encode=\"ordinal\", random_state=random_state\n )\n Xleft[:, j] = mm_normalizer.fit_transform(Xleft[:, j][:, None])[:, 0]\n Xleft[:, j] = discretizer.fit_transform(Xleft[:, j][:, None])[:, 0]\n return Xleft, Xright" } ]
import sys import numpy as np import pandas as pd import torch import torch.nn as nn from itertools import product from sklearn.metrics import accuracy_score from substrafl.algorithms.pytorch import TorchNewtonRaphsonAlgo from substrafl.model_loading import download_algo_state from substrafl.strategies import FedAvg, NewtonRaphson from torch.optim import SGD from fedeca.algorithms.torch_dp_fed_avg_algo import TorchDPFedAvgAlgo from fedeca.fedeca_core import LogisticRegressionTorch from fedeca.utils import ( Experiment, make_accuracy_function, make_substrafl_torch_dataset_class, ) from fedeca.utils.survival_utils import CoxData, make_categorical
11,812
"""Runs the propensity model training part with DP.""" if __name__ == "__main__": epsilons = [0.1, 1.0, 5.0, 10.0][::-1] deltas = [10 ** (-i) for i in range(1, 3)] START_SEED = 42 NDIM = 10 NUM_ROUNDS = 10 NUM_UPDATES = 100 N_REPETITIONS = 5 BACKEND_TYPE = "subprocess" BATCH_SIZE = 32 na_proportion = 0.0 seeds = np.arange(START_SEED, START_SEED + N_REPETITIONS).tolist() rng = np.random.default_rng(seeds[0]) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=300, ndim=NDIM, prop_treated=0.5, propensity="linear", dtype="float32", overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical Xcat, Xcont = make_categorical(X, up_to=0) # Build the final dataframe using appropriate column names and adding missing values cols_dict = {} X = np.concatenate((Xcat, Xcont), axis=1) for i in range(Xcat.shape[1] + Xcont.shape[1]): currentX = X[:, i].astype("float32") mask_na = rng.uniform(0, 1, X.shape[0]) > (1.0 - na_proportion) currentX[mask_na] = np.nan if i < Xcat.shape[1]: colname = "cat_col" else: colname = "col" i -= Xcat.shape[1] cols_dict[f"{colname}_{i}"] = currentX # The absolute value is superfluous but just to be sure cols_dict["T"] = np.abs(T) cols_dict["E"] = (1.0 - C).astype("uint8") cols_dict["treated"] = treated df = pd.DataFrame(cols_dict) # Final cast of categorical columns that was impossible due to nan in numpy for i in range(Xcat.shape[1]): df[f"cat_col_{i}"] = df[f"cat_col_{i}"].astype("Int64") results_all_reps = [] edelta_list = list(product(epsilons, deltas)) accuracy_metrics_dict = {"accuracy": make_accuracy_function("treated")} # We set model and dataloaders to be the same for each rep
"""Runs the propensity model training part with DP.""" if __name__ == "__main__": epsilons = [0.1, 1.0, 5.0, 10.0][::-1] deltas = [10 ** (-i) for i in range(1, 3)] START_SEED = 42 NDIM = 10 NUM_ROUNDS = 10 NUM_UPDATES = 100 N_REPETITIONS = 5 BACKEND_TYPE = "subprocess" BATCH_SIZE = 32 na_proportion = 0.0 seeds = np.arange(START_SEED, START_SEED + N_REPETITIONS).tolist() rng = np.random.default_rng(seeds[0]) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=300, ndim=NDIM, prop_treated=0.5, propensity="linear", dtype="float32", overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical Xcat, Xcont = make_categorical(X, up_to=0) # Build the final dataframe using appropriate column names and adding missing values cols_dict = {} X = np.concatenate((Xcat, Xcont), axis=1) for i in range(Xcat.shape[1] + Xcont.shape[1]): currentX = X[:, i].astype("float32") mask_na = rng.uniform(0, 1, X.shape[0]) > (1.0 - na_proportion) currentX[mask_na] = np.nan if i < Xcat.shape[1]: colname = "cat_col" else: colname = "col" i -= Xcat.shape[1] cols_dict[f"{colname}_{i}"] = currentX # The absolute value is superfluous but just to be sure cols_dict["T"] = np.abs(T) cols_dict["E"] = (1.0 - C).astype("uint8") cols_dict["treated"] = treated df = pd.DataFrame(cols_dict) # Final cast of categorical columns that was impossible due to nan in numpy for i in range(Xcat.shape[1]): df[f"cat_col_{i}"] = df[f"cat_col_{i}"].astype("Int64") results_all_reps = [] edelta_list = list(product(epsilons, deltas)) accuracy_metrics_dict = {"accuracy": make_accuracy_function("treated")} # We set model and dataloaders to be the same for each rep
logreg_model = LogisticRegressionTorch(NDIM, torch.float32)
1
2023-11-27 18:01:37+00:00
16k
aliyun/pai-python-sdk
pai/api/training_job.py
[ { "identifier": "PaginatedResult", "path": "pai/api/base.py", "snippet": "class PaginatedResult(object):\n \"\"\"A class represent response of a pagination call to PAI service.\"\"\"\n\n items: List[Union[Dict[str, Any], str]] = None\n total_count: int = None\n\n def __init__(self, items: List[Union[Dict[str, Any], str]], total_count: int):\n self.items = items\n self.total_count = total_count" }, { "identifier": "ServiceName", "path": "pai/api/base.py", "snippet": "class ServiceName(object):\n # Service provided by PAI.\n PAI_DLC = \"pai-dlc\"\n PAI_EAS = \"pai-eas\"\n PAI_WORKSPACE = \"aiworkspace\"\n PAI_STUDIO = \"pai\"\n PAIFLOW = \"paiflow\"\n # Other services provided by Alibaba Cloud.\n STS = \"sts\"" }, { "identifier": "WorkspaceScopedResourceAPI", "path": "pai/api/base.py", "snippet": "class WorkspaceScopedResourceAPI(with_metaclass(ABCMeta, ResourceAPI)):\n \"\"\"Workspace Scoped Resource API.\"\"\"\n\n # A workspace_id placeholder indicate the workspace_id field of\n # the request should not be replaced.\n workspace_id_none_placeholder = \"WORKSPACE_ID_NONE_PLACEHOLDER\"\n\n # Default parameter name for request object.\n default_param_name_for_request = \"request\"\n\n def __init__(self, workspace_id, acs_client, **kwargs):\n super(WorkspaceScopedResourceAPI, self).__init__(\n acs_client=acs_client, **kwargs\n )\n self.workspace_id = workspace_id\n\n def _do_request(self, method_, **kwargs):\n request = kwargs.get(self.default_param_name_for_request)\n\n if not request:\n # Sometimes, request object is not named as \"request\", we need to find it.\n for param_name, param_value in kwargs.items():\n if isinstance(param_value, TeaModel) and type(\n param_value\n ).__name__.endswith(\"Request\"):\n request = param_value\n break\n\n # Automatically configure the workspace ID for the request\n if request and hasattr(request, \"workspace_id\"):\n if request.workspace_id is None:\n request.workspace_id = self.workspace_id\n elif (\n request.workspace_id == self.workspace_id_none_placeholder\n or not request.workspace_id\n ):\n # request.workspace_id is 0 or request.workspace_id is empty string,\n # we do not inject workspace_id of the scope.\n request.workspace_id = None\n return super(WorkspaceScopedResourceAPI, self)._do_request(method_, **kwargs)" }, { "identifier": "AlgorithmSpec", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class AlgorithmSpec(TeaModel):\n def __init__(\n self,\n code_dir: Location = None,\n command: List[str] = None,\n compute_resource: AlgorithmSpecComputeResource = None,\n customization: AlgorithmSpecCustomization = None,\n hyper_parameters: List[HyperParameterDefinition] = None,\n image: str = None,\n input_channels: List[Channel] = None,\n job_type: str = None,\n metric_definitions: List[MetricDefinition] = None,\n output_channels: List[Channel] = None,\n progress_definitions: AlgorithmSpecProgressDefinitions = None,\n resource_requirements: List[ConditionExpression] = None,\n supported_instance_types: List[str] = None,\n supports_distributed_training: bool = None,\n ):\n self.code_dir = code_dir\n self.command = command\n self.compute_resource = compute_resource\n self.customization = customization\n self.hyper_parameters = hyper_parameters\n self.image = image\n self.input_channels = input_channels\n self.job_type = job_type\n self.metric_definitions = metric_definitions\n self.output_channels = output_channels\n self.progress_definitions = progress_definitions\n self.resource_requirements = resource_requirements\n self.supported_instance_types = supported_instance_types\n self.supports_distributed_training = supports_distributed_training\n\n def validate(self):\n if self.code_dir:\n self.code_dir.validate()\n if self.compute_resource:\n self.compute_resource.validate()\n if self.customization:\n self.customization.validate()\n if self.hyper_parameters:\n for k in self.hyper_parameters:\n if k:\n k.validate()\n if self.input_channels:\n for k in self.input_channels:\n if k:\n k.validate()\n if self.metric_definitions:\n for k in self.metric_definitions:\n if k:\n k.validate()\n if self.output_channels:\n for k in self.output_channels:\n if k:\n k.validate()\n if self.progress_definitions:\n self.progress_definitions.validate()\n if self.resource_requirements:\n for k in self.resource_requirements:\n if k:\n k.validate()\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.code_dir is not None:\n result['CodeDir'] = self.code_dir.to_map()\n if self.command is not None:\n result['Command'] = self.command\n if self.compute_resource is not None:\n result['ComputeResource'] = self.compute_resource.to_map()\n if self.customization is not None:\n result['Customization'] = self.customization.to_map()\n result['HyperParameters'] = []\n if self.hyper_parameters is not None:\n for k in self.hyper_parameters:\n result['HyperParameters'].append(k.to_map() if k else None)\n if self.image is not None:\n result['Image'] = self.image\n result['InputChannels'] = []\n if self.input_channels is not None:\n for k in self.input_channels:\n result['InputChannels'].append(k.to_map() if k else None)\n if self.job_type is not None:\n result['JobType'] = self.job_type\n result['MetricDefinitions'] = []\n if self.metric_definitions is not None:\n for k in self.metric_definitions:\n result['MetricDefinitions'].append(k.to_map() if k else None)\n result['OutputChannels'] = []\n if self.output_channels is not None:\n for k in self.output_channels:\n result['OutputChannels'].append(k.to_map() if k else None)\n if self.progress_definitions is not None:\n result['ProgressDefinitions'] = self.progress_definitions.to_map()\n result['ResourceRequirements'] = []\n if self.resource_requirements is not None:\n for k in self.resource_requirements:\n result['ResourceRequirements'].append(k.to_map() if k else None)\n if self.supported_instance_types is not None:\n result['SupportedInstanceTypes'] = self.supported_instance_types\n if self.supports_distributed_training is not None:\n result['SupportsDistributedTraining'] = self.supports_distributed_training\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('CodeDir') is not None:\n temp_model = Location()\n self.code_dir = temp_model.from_map(m['CodeDir'])\n if m.get('Command') is not None:\n self.command = m.get('Command')\n if m.get('ComputeResource') is not None:\n temp_model = AlgorithmSpecComputeResource()\n self.compute_resource = temp_model.from_map(m['ComputeResource'])\n if m.get('Customization') is not None:\n temp_model = AlgorithmSpecCustomization()\n self.customization = temp_model.from_map(m['Customization'])\n self.hyper_parameters = []\n if m.get('HyperParameters') is not None:\n for k in m.get('HyperParameters'):\n temp_model = HyperParameterDefinition()\n self.hyper_parameters.append(temp_model.from_map(k))\n if m.get('Image') is not None:\n self.image = m.get('Image')\n self.input_channels = []\n if m.get('InputChannels') is not None:\n for k in m.get('InputChannels'):\n temp_model = Channel()\n self.input_channels.append(temp_model.from_map(k))\n if m.get('JobType') is not None:\n self.job_type = m.get('JobType')\n self.metric_definitions = []\n if m.get('MetricDefinitions') is not None:\n for k in m.get('MetricDefinitions'):\n temp_model = MetricDefinition()\n self.metric_definitions.append(temp_model.from_map(k))\n self.output_channels = []\n if m.get('OutputChannels') is not None:\n for k in m.get('OutputChannels'):\n temp_model = Channel()\n self.output_channels.append(temp_model.from_map(k))\n if m.get('ProgressDefinitions') is not None:\n temp_model = AlgorithmSpecProgressDefinitions()\n self.progress_definitions = temp_model.from_map(m['ProgressDefinitions'])\n self.resource_requirements = []\n if m.get('ResourceRequirements') is not None:\n for k in m.get('ResourceRequirements'):\n temp_model = ConditionExpression()\n self.resource_requirements.append(temp_model.from_map(k))\n if m.get('SupportedInstanceTypes') is not None:\n self.supported_instance_types = m.get('SupportedInstanceTypes')\n if m.get('SupportsDistributedTraining') is not None:\n self.supports_distributed_training = m.get('SupportsDistributedTraining')\n return self" }, { "identifier": "CreateTrainingJobRequest", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequest(TeaModel):\n def __init__(\n self,\n algorithm_name: str = None,\n algorithm_provider: str = None,\n algorithm_spec: AlgorithmSpec = None,\n algorithm_version: str = None,\n code_dir: Location = None,\n compute_resource: CreateTrainingJobRequestComputeResource = None,\n hyper_parameters: List[CreateTrainingJobRequestHyperParameters] = None,\n input_channels: List[CreateTrainingJobRequestInputChannels] = None,\n labels: List[CreateTrainingJobRequestLabels] = None,\n output_channels: List[CreateTrainingJobRequestOutputChannels] = None,\n role_arn: str = None,\n scheduler: CreateTrainingJobRequestScheduler = None,\n training_job_description: str = None,\n training_job_name: str = None,\n user_vpc: CreateTrainingJobRequestUserVpc = None,\n workspace_id: str = None,\n ):\n self.algorithm_name = algorithm_name\n self.algorithm_provider = algorithm_provider\n self.algorithm_spec = algorithm_spec\n self.algorithm_version = algorithm_version\n self.code_dir = code_dir\n self.compute_resource = compute_resource\n self.hyper_parameters = hyper_parameters\n self.input_channels = input_channels\n self.labels = labels\n self.output_channels = output_channels\n self.role_arn = role_arn\n self.scheduler = scheduler\n self.training_job_description = training_job_description\n self.training_job_name = training_job_name\n self.user_vpc = user_vpc\n self.workspace_id = workspace_id\n\n def validate(self):\n if self.algorithm_spec:\n self.algorithm_spec.validate()\n if self.code_dir:\n self.code_dir.validate()\n if self.compute_resource:\n self.compute_resource.validate()\n if self.hyper_parameters:\n for k in self.hyper_parameters:\n if k:\n k.validate()\n if self.input_channels:\n for k in self.input_channels:\n if k:\n k.validate()\n if self.labels:\n for k in self.labels:\n if k:\n k.validate()\n if self.output_channels:\n for k in self.output_channels:\n if k:\n k.validate()\n if self.scheduler:\n self.scheduler.validate()\n if self.user_vpc:\n self.user_vpc.validate()\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.algorithm_name is not None:\n result['AlgorithmName'] = self.algorithm_name\n if self.algorithm_provider is not None:\n result['AlgorithmProvider'] = self.algorithm_provider\n if self.algorithm_spec is not None:\n result['AlgorithmSpec'] = self.algorithm_spec.to_map()\n if self.algorithm_version is not None:\n result['AlgorithmVersion'] = self.algorithm_version\n if self.code_dir is not None:\n result['CodeDir'] = self.code_dir.to_map()\n if self.compute_resource is not None:\n result['ComputeResource'] = self.compute_resource.to_map()\n result['HyperParameters'] = []\n if self.hyper_parameters is not None:\n for k in self.hyper_parameters:\n result['HyperParameters'].append(k.to_map() if k else None)\n result['InputChannels'] = []\n if self.input_channels is not None:\n for k in self.input_channels:\n result['InputChannels'].append(k.to_map() if k else None)\n result['Labels'] = []\n if self.labels is not None:\n for k in self.labels:\n result['Labels'].append(k.to_map() if k else None)\n result['OutputChannels'] = []\n if self.output_channels is not None:\n for k in self.output_channels:\n result['OutputChannels'].append(k.to_map() if k else None)\n if self.role_arn is not None:\n result['RoleArn'] = self.role_arn\n if self.scheduler is not None:\n result['Scheduler'] = self.scheduler.to_map()\n if self.training_job_description is not None:\n result['TrainingJobDescription'] = self.training_job_description\n if self.training_job_name is not None:\n result['TrainingJobName'] = self.training_job_name\n if self.user_vpc is not None:\n result['UserVpc'] = self.user_vpc.to_map()\n if self.workspace_id is not None:\n result['WorkspaceId'] = self.workspace_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('AlgorithmName') is not None:\n self.algorithm_name = m.get('AlgorithmName')\n if m.get('AlgorithmProvider') is not None:\n self.algorithm_provider = m.get('AlgorithmProvider')\n if m.get('AlgorithmSpec') is not None:\n temp_model = AlgorithmSpec()\n self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec'])\n if m.get('AlgorithmVersion') is not None:\n self.algorithm_version = m.get('AlgorithmVersion')\n if m.get('CodeDir') is not None:\n temp_model = Location()\n self.code_dir = temp_model.from_map(m['CodeDir'])\n if m.get('ComputeResource') is not None:\n temp_model = CreateTrainingJobRequestComputeResource()\n self.compute_resource = temp_model.from_map(m['ComputeResource'])\n self.hyper_parameters = []\n if m.get('HyperParameters') is not None:\n for k in m.get('HyperParameters'):\n temp_model = CreateTrainingJobRequestHyperParameters()\n self.hyper_parameters.append(temp_model.from_map(k))\n self.input_channels = []\n if m.get('InputChannels') is not None:\n for k in m.get('InputChannels'):\n temp_model = CreateTrainingJobRequestInputChannels()\n self.input_channels.append(temp_model.from_map(k))\n self.labels = []\n if m.get('Labels') is not None:\n for k in m.get('Labels'):\n temp_model = CreateTrainingJobRequestLabels()\n self.labels.append(temp_model.from_map(k))\n self.output_channels = []\n if m.get('OutputChannels') is not None:\n for k in m.get('OutputChannels'):\n temp_model = CreateTrainingJobRequestOutputChannels()\n self.output_channels.append(temp_model.from_map(k))\n if m.get('RoleArn') is not None:\n self.role_arn = m.get('RoleArn')\n if m.get('Scheduler') is not None:\n temp_model = CreateTrainingJobRequestScheduler()\n self.scheduler = temp_model.from_map(m['Scheduler'])\n if m.get('TrainingJobDescription') is not None:\n self.training_job_description = m.get('TrainingJobDescription')\n if m.get('TrainingJobName') is not None:\n self.training_job_name = m.get('TrainingJobName')\n if m.get('UserVpc') is not None:\n temp_model = CreateTrainingJobRequestUserVpc()\n self.user_vpc = temp_model.from_map(m['UserVpc'])\n if m.get('WorkspaceId') is not None:\n self.workspace_id = m.get('WorkspaceId')\n return self" }, { "identifier": "CreateTrainingJobRequestComputeResource", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestComputeResource(TeaModel):\n def __init__(\n self,\n ecs_count: int = None,\n ecs_spec: str = None,\n instance_count: int = None,\n instance_spec: CreateTrainingJobRequestComputeResourceInstanceSpec = None,\n resource_id: str = None,\n ):\n self.ecs_count = ecs_count\n self.ecs_spec = ecs_spec\n self.instance_count = instance_count\n self.instance_spec = instance_spec\n self.resource_id = resource_id\n\n def validate(self):\n if self.instance_spec:\n self.instance_spec.validate()\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.ecs_count is not None:\n result['EcsCount'] = self.ecs_count\n if self.ecs_spec is not None:\n result['EcsSpec'] = self.ecs_spec\n if self.instance_count is not None:\n result['InstanceCount'] = self.instance_count\n if self.instance_spec is not None:\n result['InstanceSpec'] = self.instance_spec.to_map()\n if self.resource_id is not None:\n result['ResourceId'] = self.resource_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('EcsCount') is not None:\n self.ecs_count = m.get('EcsCount')\n if m.get('EcsSpec') is not None:\n self.ecs_spec = m.get('EcsSpec')\n if m.get('InstanceCount') is not None:\n self.instance_count = m.get('InstanceCount')\n if m.get('InstanceSpec') is not None:\n temp_model = CreateTrainingJobRequestComputeResourceInstanceSpec()\n self.instance_spec = temp_model.from_map(m['InstanceSpec'])\n if m.get('ResourceId') is not None:\n self.resource_id = m.get('ResourceId')\n return self" }, { "identifier": "CreateTrainingJobRequestHyperParameters", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestHyperParameters(TeaModel):\n def __init__(\n self,\n name: str = None,\n value: str = None,\n ):\n self.name = name\n self.value = value\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.name is not None:\n result['Name'] = self.name\n if self.value is not None:\n result['Value'] = self.value\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('Name') is not None:\n self.name = m.get('Name')\n if m.get('Value') is not None:\n self.value = m.get('Value')\n return self" }, { "identifier": "CreateTrainingJobRequestInputChannels", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestInputChannels(TeaModel):\n def __init__(\n self,\n dataset_id: str = None,\n input_uri: str = None,\n name: str = None,\n ):\n self.dataset_id = dataset_id\n self.input_uri = input_uri\n self.name = name\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.dataset_id is not None:\n result['DatasetId'] = self.dataset_id\n if self.input_uri is not None:\n result['InputUri'] = self.input_uri\n if self.name is not None:\n result['Name'] = self.name\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('DatasetId') is not None:\n self.dataset_id = m.get('DatasetId')\n if m.get('InputUri') is not None:\n self.input_uri = m.get('InputUri')\n if m.get('Name') is not None:\n self.name = m.get('Name')\n return self" }, { "identifier": "CreateTrainingJobRequestLabels", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestLabels(TeaModel):\n def __init__(\n self,\n key: str = None,\n value: str = None,\n ):\n self.key = key\n self.value = value\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.key is not None:\n result['Key'] = self.key\n if self.value is not None:\n result['Value'] = self.value\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('Key') is not None:\n self.key = m.get('Key')\n if m.get('Value') is not None:\n self.value = m.get('Value')\n return self" }, { "identifier": "CreateTrainingJobRequestOutputChannels", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestOutputChannels(TeaModel):\n def __init__(\n self,\n dataset_id: str = None,\n name: str = None,\n output_uri: str = None,\n ):\n self.dataset_id = dataset_id\n self.name = name\n self.output_uri = output_uri\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.dataset_id is not None:\n result['DatasetId'] = self.dataset_id\n if self.name is not None:\n result['Name'] = self.name\n if self.output_uri is not None:\n result['OutputUri'] = self.output_uri\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('DatasetId') is not None:\n self.dataset_id = m.get('DatasetId')\n if m.get('Name') is not None:\n self.name = m.get('Name')\n if m.get('OutputUri') is not None:\n self.output_uri = m.get('OutputUri')\n return self" }, { "identifier": "CreateTrainingJobRequestScheduler", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestScheduler(TeaModel):\n def __init__(\n self,\n max_running_time_in_seconds: int = None,\n ):\n self.max_running_time_in_seconds = max_running_time_in_seconds\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.max_running_time_in_seconds is not None:\n result['MaxRunningTimeInSeconds'] = self.max_running_time_in_seconds\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('MaxRunningTimeInSeconds') is not None:\n self.max_running_time_in_seconds = m.get('MaxRunningTimeInSeconds')\n return self" }, { "identifier": "CreateTrainingJobRequestUserVpc", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestUserVpc(TeaModel):\n def __init__(\n self,\n extended_cidrs: List[str] = None,\n security_group_id: str = None,\n switch_id: str = None,\n vpc_id: str = None,\n ):\n self.extended_cidrs = extended_cidrs\n self.security_group_id = security_group_id\n self.switch_id = switch_id\n self.vpc_id = vpc_id\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.extended_cidrs is not None:\n result['ExtendedCIDRs'] = self.extended_cidrs\n if self.security_group_id is not None:\n result['SecurityGroupId'] = self.security_group_id\n if self.switch_id is not None:\n result['SwitchId'] = self.switch_id\n if self.vpc_id is not None:\n result['VpcId'] = self.vpc_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('ExtendedCIDRs') is not None:\n self.extended_cidrs = m.get('ExtendedCIDRs')\n if m.get('SecurityGroupId') is not None:\n self.security_group_id = m.get('SecurityGroupId')\n if m.get('SwitchId') is not None:\n self.switch_id = m.get('SwitchId')\n if m.get('VpcId') is not None:\n self.vpc_id = m.get('VpcId')\n return self" }, { "identifier": "CreateTrainingJobResponseBody", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobResponseBody(TeaModel):\n def __init__(\n self,\n request_id: str = None,\n training_job_id: str = None,\n ):\n self.request_id = request_id\n self.training_job_id = training_job_id\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.request_id is not None:\n result['RequestId'] = self.request_id\n if self.training_job_id is not None:\n result['TrainingJobId'] = self.training_job_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('RequestId') is not None:\n self.request_id = m.get('RequestId')\n if m.get('TrainingJobId') is not None:\n self.training_job_id = m.get('TrainingJobId')\n return self" }, { "identifier": "GetTrainingJobRequest", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class GetTrainingJobRequest(TeaModel):\n def __init__(\n self,\n token: str = None,\n ):\n self.token = token\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.token is not None:\n result['Token'] = self.token\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('Token') is not None:\n self.token = m.get('Token')\n return self" }, { "identifier": "GetTrainingJobResponseBody", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class GetTrainingJobResponseBody(TeaModel):\n def __init__(\n self,\n algorithm_id: str = None,\n algorithm_name: str = None,\n algorithm_provider: str = None,\n algorithm_spec: AlgorithmSpec = None,\n algorithm_version: str = None,\n compute_resource: GetTrainingJobResponseBodyComputeResource = None,\n gmt_create_time: str = None,\n gmt_modified_time: str = None,\n hyper_parameters: List[GetTrainingJobResponseBodyHyperParameters] = None,\n input_channels: List[GetTrainingJobResponseBodyInputChannels] = None,\n instances: List[GetTrainingJobResponseBodyInstances] = None,\n is_temp_algo: bool = None,\n labels: List[GetTrainingJobResponseBodyLabels] = None,\n latest_metrics: List[GetTrainingJobResponseBodyLatestMetrics] = None,\n latest_progress: GetTrainingJobResponseBodyLatestProgress = None,\n output_channels: List[GetTrainingJobResponseBodyOutputChannels] = None,\n reason_code: str = None,\n reason_message: str = None,\n request_id: str = None,\n role_arn: str = None,\n scheduler: GetTrainingJobResponseBodyScheduler = None,\n status: str = None,\n status_transitions: List[GetTrainingJobResponseBodyStatusTransitions] = None,\n training_job_description: str = None,\n training_job_id: str = None,\n training_job_name: str = None,\n training_job_url: str = None,\n user_id: str = None,\n user_vpc: GetTrainingJobResponseBodyUserVpc = None,\n workspace_id: str = None,\n ):\n self.algorithm_id = algorithm_id\n self.algorithm_name = algorithm_name\n self.algorithm_provider = algorithm_provider\n self.algorithm_spec = algorithm_spec\n self.algorithm_version = algorithm_version\n self.compute_resource = compute_resource\n self.gmt_create_time = gmt_create_time\n self.gmt_modified_time = gmt_modified_time\n self.hyper_parameters = hyper_parameters\n self.input_channels = input_channels\n self.instances = instances\n self.is_temp_algo = is_temp_algo\n self.labels = labels\n self.latest_metrics = latest_metrics\n self.latest_progress = latest_progress\n self.output_channels = output_channels\n self.reason_code = reason_code\n self.reason_message = reason_message\n self.request_id = request_id\n self.role_arn = role_arn\n self.scheduler = scheduler\n self.status = status\n self.status_transitions = status_transitions\n self.training_job_description = training_job_description\n self.training_job_id = training_job_id\n self.training_job_name = training_job_name\n self.training_job_url = training_job_url\n self.user_id = user_id\n self.user_vpc = user_vpc\n self.workspace_id = workspace_id\n\n def validate(self):\n if self.algorithm_spec:\n self.algorithm_spec.validate()\n if self.compute_resource:\n self.compute_resource.validate()\n if self.hyper_parameters:\n for k in self.hyper_parameters:\n if k:\n k.validate()\n if self.input_channels:\n for k in self.input_channels:\n if k:\n k.validate()\n if self.instances:\n for k in self.instances:\n if k:\n k.validate()\n if self.labels:\n for k in self.labels:\n if k:\n k.validate()\n if self.latest_metrics:\n for k in self.latest_metrics:\n if k:\n k.validate()\n if self.latest_progress:\n self.latest_progress.validate()\n if self.output_channels:\n for k in self.output_channels:\n if k:\n k.validate()\n if self.scheduler:\n self.scheduler.validate()\n if self.status_transitions:\n for k in self.status_transitions:\n if k:\n k.validate()\n if self.user_vpc:\n self.user_vpc.validate()\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.algorithm_id is not None:\n result['AlgorithmId'] = self.algorithm_id\n if self.algorithm_name is not None:\n result['AlgorithmName'] = self.algorithm_name\n if self.algorithm_provider is not None:\n result['AlgorithmProvider'] = self.algorithm_provider\n if self.algorithm_spec is not None:\n result['AlgorithmSpec'] = self.algorithm_spec.to_map()\n if self.algorithm_version is not None:\n result['AlgorithmVersion'] = self.algorithm_version\n if self.compute_resource is not None:\n result['ComputeResource'] = self.compute_resource.to_map()\n if self.gmt_create_time is not None:\n result['GmtCreateTime'] = self.gmt_create_time\n if self.gmt_modified_time is not None:\n result['GmtModifiedTime'] = self.gmt_modified_time\n result['HyperParameters'] = []\n if self.hyper_parameters is not None:\n for k in self.hyper_parameters:\n result['HyperParameters'].append(k.to_map() if k else None)\n result['InputChannels'] = []\n if self.input_channels is not None:\n for k in self.input_channels:\n result['InputChannels'].append(k.to_map() if k else None)\n result['Instances'] = []\n if self.instances is not None:\n for k in self.instances:\n result['Instances'].append(k.to_map() if k else None)\n if self.is_temp_algo is not None:\n result['IsTempAlgo'] = self.is_temp_algo\n result['Labels'] = []\n if self.labels is not None:\n for k in self.labels:\n result['Labels'].append(k.to_map() if k else None)\n result['LatestMetrics'] = []\n if self.latest_metrics is not None:\n for k in self.latest_metrics:\n result['LatestMetrics'].append(k.to_map() if k else None)\n if self.latest_progress is not None:\n result['LatestProgress'] = self.latest_progress.to_map()\n result['OutputChannels'] = []\n if self.output_channels is not None:\n for k in self.output_channels:\n result['OutputChannels'].append(k.to_map() if k else None)\n if self.reason_code is not None:\n result['ReasonCode'] = self.reason_code\n if self.reason_message is not None:\n result['ReasonMessage'] = self.reason_message\n if self.request_id is not None:\n result['RequestId'] = self.request_id\n if self.role_arn is not None:\n result['RoleArn'] = self.role_arn\n if self.scheduler is not None:\n result['Scheduler'] = self.scheduler.to_map()\n if self.status is not None:\n result['Status'] = self.status\n result['StatusTransitions'] = []\n if self.status_transitions is not None:\n for k in self.status_transitions:\n result['StatusTransitions'].append(k.to_map() if k else None)\n if self.training_job_description is not None:\n result['TrainingJobDescription'] = self.training_job_description\n if self.training_job_id is not None:\n result['TrainingJobId'] = self.training_job_id\n if self.training_job_name is not None:\n result['TrainingJobName'] = self.training_job_name\n if self.training_job_url is not None:\n result['TrainingJobUrl'] = self.training_job_url\n if self.user_id is not None:\n result['UserId'] = self.user_id\n if self.user_vpc is not None:\n result['UserVpc'] = self.user_vpc.to_map()\n if self.workspace_id is not None:\n result['WorkspaceId'] = self.workspace_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('AlgorithmId') is not None:\n self.algorithm_id = m.get('AlgorithmId')\n if m.get('AlgorithmName') is not None:\n self.algorithm_name = m.get('AlgorithmName')\n if m.get('AlgorithmProvider') is not None:\n self.algorithm_provider = m.get('AlgorithmProvider')\n if m.get('AlgorithmSpec') is not None:\n temp_model = AlgorithmSpec()\n self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec'])\n if m.get('AlgorithmVersion') is not None:\n self.algorithm_version = m.get('AlgorithmVersion')\n if m.get('ComputeResource') is not None:\n temp_model = GetTrainingJobResponseBodyComputeResource()\n self.compute_resource = temp_model.from_map(m['ComputeResource'])\n if m.get('GmtCreateTime') is not None:\n self.gmt_create_time = m.get('GmtCreateTime')\n if m.get('GmtModifiedTime') is not None:\n self.gmt_modified_time = m.get('GmtModifiedTime')\n self.hyper_parameters = []\n if m.get('HyperParameters') is not None:\n for k in m.get('HyperParameters'):\n temp_model = GetTrainingJobResponseBodyHyperParameters()\n self.hyper_parameters.append(temp_model.from_map(k))\n self.input_channels = []\n if m.get('InputChannels') is not None:\n for k in m.get('InputChannels'):\n temp_model = GetTrainingJobResponseBodyInputChannels()\n self.input_channels.append(temp_model.from_map(k))\n self.instances = []\n if m.get('Instances') is not None:\n for k in m.get('Instances'):\n temp_model = GetTrainingJobResponseBodyInstances()\n self.instances.append(temp_model.from_map(k))\n if m.get('IsTempAlgo') is not None:\n self.is_temp_algo = m.get('IsTempAlgo')\n self.labels = []\n if m.get('Labels') is not None:\n for k in m.get('Labels'):\n temp_model = GetTrainingJobResponseBodyLabels()\n self.labels.append(temp_model.from_map(k))\n self.latest_metrics = []\n if m.get('LatestMetrics') is not None:\n for k in m.get('LatestMetrics'):\n temp_model = GetTrainingJobResponseBodyLatestMetrics()\n self.latest_metrics.append(temp_model.from_map(k))\n if m.get('LatestProgress') is not None:\n temp_model = GetTrainingJobResponseBodyLatestProgress()\n self.latest_progress = temp_model.from_map(m['LatestProgress'])\n self.output_channels = []\n if m.get('OutputChannels') is not None:\n for k in m.get('OutputChannels'):\n temp_model = GetTrainingJobResponseBodyOutputChannels()\n self.output_channels.append(temp_model.from_map(k))\n if m.get('ReasonCode') is not None:\n self.reason_code = m.get('ReasonCode')\n if m.get('ReasonMessage') is not None:\n self.reason_message = m.get('ReasonMessage')\n if m.get('RequestId') is not None:\n self.request_id = m.get('RequestId')\n if m.get('RoleArn') is not None:\n self.role_arn = m.get('RoleArn')\n if m.get('Scheduler') is not None:\n temp_model = GetTrainingJobResponseBodyScheduler()\n self.scheduler = temp_model.from_map(m['Scheduler'])\n if m.get('Status') is not None:\n self.status = m.get('Status')\n self.status_transitions = []\n if m.get('StatusTransitions') is not None:\n for k in m.get('StatusTransitions'):\n temp_model = GetTrainingJobResponseBodyStatusTransitions()\n self.status_transitions.append(temp_model.from_map(k))\n if m.get('TrainingJobDescription') is not None:\n self.training_job_description = m.get('TrainingJobDescription')\n if m.get('TrainingJobId') is not None:\n self.training_job_id = m.get('TrainingJobId')\n if m.get('TrainingJobName') is not None:\n self.training_job_name = m.get('TrainingJobName')\n if m.get('TrainingJobUrl') is not None:\n self.training_job_url = m.get('TrainingJobUrl')\n if m.get('UserId') is not None:\n self.user_id = m.get('UserId')\n if m.get('UserVpc') is not None:\n temp_model = GetTrainingJobResponseBodyUserVpc()\n self.user_vpc = temp_model.from_map(m['UserVpc'])\n if m.get('WorkspaceId') is not None:\n self.workspace_id = m.get('WorkspaceId')\n return self" }, { "identifier": "ListTrainingJobLogsRequest", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class ListTrainingJobLogsRequest(TeaModel):\n def __init__(\n self,\n end_time: str = None,\n page_number: int = None,\n page_size: int = None,\n start_time: str = None,\n token: str = None,\n worker_id: str = None,\n ):\n self.end_time = end_time\n self.page_number = page_number\n self.page_size = page_size\n self.start_time = start_time\n self.token = token\n self.worker_id = worker_id\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.end_time is not None:\n result['EndTime'] = self.end_time\n if self.page_number is not None:\n result['PageNumber'] = self.page_number\n if self.page_size is not None:\n result['PageSize'] = self.page_size\n if self.start_time is not None:\n result['StartTime'] = self.start_time\n if self.token is not None:\n result['Token'] = self.token\n if self.worker_id is not None:\n result['WorkerId'] = self.worker_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('EndTime') is not None:\n self.end_time = m.get('EndTime')\n if m.get('PageNumber') is not None:\n self.page_number = m.get('PageNumber')\n if m.get('PageSize') is not None:\n self.page_size = m.get('PageSize')\n if m.get('StartTime') is not None:\n self.start_time = m.get('StartTime')\n if m.get('Token') is not None:\n self.token = m.get('Token')\n if m.get('WorkerId') is not None:\n self.worker_id = m.get('WorkerId')\n return self" }, { "identifier": "ListTrainingJobLogsResponseBody", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class ListTrainingJobLogsResponseBody(TeaModel):\n def __init__(\n self,\n logs: List[str] = None,\n request_id: str = None,\n total_count: str = None,\n ):\n self.logs = logs\n self.request_id = request_id\n self.total_count = total_count\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.logs is not None:\n result['Logs'] = self.logs\n if self.request_id is not None:\n result['RequestId'] = self.request_id\n if self.total_count is not None:\n result['TotalCount'] = self.total_count\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('Logs') is not None:\n self.logs = m.get('Logs')\n if m.get('RequestId') is not None:\n self.request_id = m.get('RequestId')\n if m.get('TotalCount') is not None:\n self.total_count = m.get('TotalCount')\n return self" }, { "identifier": "ListTrainingJobsRequest", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class ListTrainingJobsRequest(TeaModel):\n def __init__(\n self,\n algorithm_name: str = None,\n algorithm_provider: str = None,\n end_time: str = None,\n is_temp_algo: bool = None,\n labels: Dict[str, Any] = None,\n order: str = None,\n page_number: int = None,\n page_size: int = None,\n sort_by: str = None,\n start_time: str = None,\n status: str = None,\n training_job_id: str = None,\n training_job_name: str = None,\n workspace_id: str = None,\n ):\n self.algorithm_name = algorithm_name\n self.algorithm_provider = algorithm_provider\n self.end_time = end_time\n self.is_temp_algo = is_temp_algo\n self.labels = labels\n self.order = order\n self.page_number = page_number\n self.page_size = page_size\n self.sort_by = sort_by\n self.start_time = start_time\n self.status = status\n self.training_job_id = training_job_id\n self.training_job_name = training_job_name\n self.workspace_id = workspace_id\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.algorithm_name is not None:\n result['AlgorithmName'] = self.algorithm_name\n if self.algorithm_provider is not None:\n result['AlgorithmProvider'] = self.algorithm_provider\n if self.end_time is not None:\n result['EndTime'] = self.end_time\n if self.is_temp_algo is not None:\n result['IsTempAlgo'] = self.is_temp_algo\n if self.labels is not None:\n result['Labels'] = self.labels\n if self.order is not None:\n result['Order'] = self.order\n if self.page_number is not None:\n result['PageNumber'] = self.page_number\n if self.page_size is not None:\n result['PageSize'] = self.page_size\n if self.sort_by is not None:\n result['SortBy'] = self.sort_by\n if self.start_time is not None:\n result['StartTime'] = self.start_time\n if self.status is not None:\n result['Status'] = self.status\n if self.training_job_id is not None:\n result['TrainingJobId'] = self.training_job_id\n if self.training_job_name is not None:\n result['TrainingJobName'] = self.training_job_name\n if self.workspace_id is not None:\n result['WorkspaceId'] = self.workspace_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('AlgorithmName') is not None:\n self.algorithm_name = m.get('AlgorithmName')\n if m.get('AlgorithmProvider') is not None:\n self.algorithm_provider = m.get('AlgorithmProvider')\n if m.get('EndTime') is not None:\n self.end_time = m.get('EndTime')\n if m.get('IsTempAlgo') is not None:\n self.is_temp_algo = m.get('IsTempAlgo')\n if m.get('Labels') is not None:\n self.labels = m.get('Labels')\n if m.get('Order') is not None:\n self.order = m.get('Order')\n if m.get('PageNumber') is not None:\n self.page_number = m.get('PageNumber')\n if m.get('PageSize') is not None:\n self.page_size = m.get('PageSize')\n if m.get('SortBy') is not None:\n self.sort_by = m.get('SortBy')\n if m.get('StartTime') is not None:\n self.start_time = m.get('StartTime')\n if m.get('Status') is not None:\n self.status = m.get('Status')\n if m.get('TrainingJobId') is not None:\n self.training_job_id = m.get('TrainingJobId')\n if m.get('TrainingJobName') is not None:\n self.training_job_name = m.get('TrainingJobName')\n if m.get('WorkspaceId') is not None:\n self.workspace_id = m.get('WorkspaceId')\n return self" } ]
from typing import Any, Dict, List, Optional from ..api.base import PaginatedResult, ServiceName, WorkspaceScopedResourceAPI from ..libs.alibabacloud_paistudio20220112.models import ( AlgorithmSpec, CreateTrainingJobRequest, CreateTrainingJobRequestComputeResource, CreateTrainingJobRequestHyperParameters, CreateTrainingJobRequestInputChannels, CreateTrainingJobRequestLabels, CreateTrainingJobRequestOutputChannels, CreateTrainingJobRequestScheduler, CreateTrainingJobRequestUserVpc, CreateTrainingJobResponseBody, GetTrainingJobRequest, GetTrainingJobResponseBody, ListTrainingJobLogsRequest, ListTrainingJobLogsResponseBody, ListTrainingJobsRequest, )
12,362
# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class TrainingJobAPI(WorkspaceScopedResourceAPI): BACKEND_SERVICE_NAME = ServiceName.PAI_STUDIO _list_method = "list_training_jobs_with_options" _create_method = "create_training_job_with_options" _get_method = "get_training_job_with_options" _list_logs_method = "list_training_job_logs_with_options" # _list_method = "list_training_jobs_with_options" def list( self, page_size: int = 20, page_number: int = 1, order: str = None, sort_by: str = None, status: str = None, training_job_name: str = None, ) -> PaginatedResult: request = ListTrainingJobsRequest( page_size=page_size, page_number=page_number, status=status, training_job_name=training_job_name, order=order, sort_by=sort_by, ) res = self._do_request( method_=self._list_method, tmp_req=request, ) return self.make_paginated_result(res) def get_api_object_by_resource_id(self, resource_id) -> Dict[str, Any]: res: GetTrainingJobResponseBody = self._do_request( method_=self._get_method, training_job_id=resource_id, request=GetTrainingJobRequest(), ) return res.to_map() def get(self, training_job_id) -> Dict[str, Any]: return self.get_api_object_by_resource_id(training_job_id) def create( self, instance_type, instance_count, job_name, hyperparameters: Optional[Dict[str, Any]] = None, input_channels: Optional[List[Dict[str, Any]]] = None, output_channels: Optional[List[Dict[str, Any]]] = None, labels: Optional[Dict[str, str]] = None, max_running_in_seconds: Optional[int] = None, description: Optional[str] = None, algorithm_name: Optional[str] = None, algorithm_version: Optional[str] = None, algorithm_provider: Optional[str] = None, algorithm_spec: Optional[Dict[str, Any]] = None, user_vpc_config: Optional[Dict[str, Any]] = None, ) -> str: """Create a TrainingJob.""" if algorithm_spec and ( algorithm_name or algorithm_version or algorithm_provider ): raise ValueError( "Please provide algorithm_spec or a tuple of (algorithm_name, " "algorithm_version or algorithm_provider), but not both." ) if algorithm_spec: algo_spec = AlgorithmSpec().from_map(algorithm_spec) else: algo_spec = None input_channels = [ CreateTrainingJobRequestInputChannels().from_map(ch) for ch in input_channels ] output_channels = [ CreateTrainingJobRequestOutputChannels().from_map(ch) for ch in output_channels ] compute_resource = CreateTrainingJobRequestComputeResource( ecs_count=instance_count, ecs_spec=instance_type, ) hyper_parameters = [ CreateTrainingJobRequestHyperParameters( name=name, value=str(value), ) for name, value in hyperparameters.items() ] labels = ( [ CreateTrainingJobRequestLabels(key=key, value=value) for key, value in labels.items() ] if labels else None ) scheduler = CreateTrainingJobRequestScheduler( max_running_time_in_seconds=max_running_in_seconds )
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class TrainingJobAPI(WorkspaceScopedResourceAPI): BACKEND_SERVICE_NAME = ServiceName.PAI_STUDIO _list_method = "list_training_jobs_with_options" _create_method = "create_training_job_with_options" _get_method = "get_training_job_with_options" _list_logs_method = "list_training_job_logs_with_options" # _list_method = "list_training_jobs_with_options" def list( self, page_size: int = 20, page_number: int = 1, order: str = None, sort_by: str = None, status: str = None, training_job_name: str = None, ) -> PaginatedResult: request = ListTrainingJobsRequest( page_size=page_size, page_number=page_number, status=status, training_job_name=training_job_name, order=order, sort_by=sort_by, ) res = self._do_request( method_=self._list_method, tmp_req=request, ) return self.make_paginated_result(res) def get_api_object_by_resource_id(self, resource_id) -> Dict[str, Any]: res: GetTrainingJobResponseBody = self._do_request( method_=self._get_method, training_job_id=resource_id, request=GetTrainingJobRequest(), ) return res.to_map() def get(self, training_job_id) -> Dict[str, Any]: return self.get_api_object_by_resource_id(training_job_id) def create( self, instance_type, instance_count, job_name, hyperparameters: Optional[Dict[str, Any]] = None, input_channels: Optional[List[Dict[str, Any]]] = None, output_channels: Optional[List[Dict[str, Any]]] = None, labels: Optional[Dict[str, str]] = None, max_running_in_seconds: Optional[int] = None, description: Optional[str] = None, algorithm_name: Optional[str] = None, algorithm_version: Optional[str] = None, algorithm_provider: Optional[str] = None, algorithm_spec: Optional[Dict[str, Any]] = None, user_vpc_config: Optional[Dict[str, Any]] = None, ) -> str: """Create a TrainingJob.""" if algorithm_spec and ( algorithm_name or algorithm_version or algorithm_provider ): raise ValueError( "Please provide algorithm_spec or a tuple of (algorithm_name, " "algorithm_version or algorithm_provider), but not both." ) if algorithm_spec: algo_spec = AlgorithmSpec().from_map(algorithm_spec) else: algo_spec = None input_channels = [ CreateTrainingJobRequestInputChannels().from_map(ch) for ch in input_channels ] output_channels = [ CreateTrainingJobRequestOutputChannels().from_map(ch) for ch in output_channels ] compute_resource = CreateTrainingJobRequestComputeResource( ecs_count=instance_count, ecs_spec=instance_type, ) hyper_parameters = [ CreateTrainingJobRequestHyperParameters( name=name, value=str(value), ) for name, value in hyperparameters.items() ] labels = ( [ CreateTrainingJobRequestLabels(key=key, value=value) for key, value in labels.items() ] if labels else None ) scheduler = CreateTrainingJobRequestScheduler( max_running_time_in_seconds=max_running_in_seconds )
request = CreateTrainingJobRequest(
4
2023-12-01 01:40:12+00:00
16k
JunMa11/UHNSeg-Quiz
nnunetv2/inference/predict_from_raw_data.py
[ { "identifier": "default_num_processes", "path": "nnunetv2/configuration.py", "snippet": "ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low" }, { "identifier": "PreprocessAdapterFromNpy", "path": "nnunetv2/inference/data_iterators.py", "snippet": "class PreprocessAdapterFromNpy(DataLoader):\n def __init__(self, list_of_images: List[np.ndarray],\n list_of_segs_from_prev_stage: Union[List[np.ndarray], None],\n list_of_image_properties: List[dict],\n truncated_ofnames: Union[List[str], None],\n plans_manager: PlansManager, dataset_json: dict, configuration_manager: ConfigurationManager,\n num_threads_in_multithreaded: int = 1, verbose: bool = False):\n preprocessor = configuration_manager.preprocessor_class(verbose=verbose)\n self.preprocessor, self.plans_manager, self.configuration_manager, self.dataset_json, self.truncated_ofnames = \\\n preprocessor, plans_manager, configuration_manager, dataset_json, truncated_ofnames\n\n self.label_manager = plans_manager.get_label_manager(dataset_json)\n\n if list_of_segs_from_prev_stage is None:\n list_of_segs_from_prev_stage = [None] * len(list_of_images)\n if truncated_ofnames is None:\n truncated_ofnames = [None] * len(list_of_images)\n\n super().__init__(\n list(zip(list_of_images, list_of_segs_from_prev_stage, list_of_image_properties, truncated_ofnames)),\n 1, num_threads_in_multithreaded,\n seed_for_shuffle=1, return_incomplete=True,\n shuffle=False, infinite=False, sampling_probabilities=None)\n\n self.indices = list(range(len(list_of_images)))\n\n def generate_train_batch(self):\n idx = self.get_indices()[0]\n image = self._data[idx][0]\n seg_prev_stage = self._data[idx][1]\n props = self._data[idx][2]\n ofname = self._data[idx][3]\n # if we have a segmentation from the previous stage we have to process it together with the images so that we\n # can crop it appropriately (if needed). Otherwise it would just be resized to the shape of the data after\n # preprocessing and then there might be misalignments\n data, seg = self.preprocessor.run_case_npy(image, seg_prev_stage, props,\n self.plans_manager,\n self.configuration_manager,\n self.dataset_json)\n if seg_prev_stage is not None:\n seg_onehot = convert_labelmap_to_one_hot(seg[0], self.label_manager.foreground_labels, data.dtype)\n data = np.vstack((data, seg_onehot))\n\n data = torch.from_numpy(data)\n\n return {'data': data, 'data_properties': props, 'ofile': ofname}" }, { "identifier": "preprocessing_iterator_fromfiles", "path": "nnunetv2/inference/data_iterators.py", "snippet": "def preprocessing_iterator_fromfiles(list_of_lists: List[List[str]],\n list_of_segs_from_prev_stage_files: Union[None, List[str]],\n output_filenames_truncated: Union[None, List[str]],\n plans_manager: PlansManager,\n dataset_json: dict,\n configuration_manager: ConfigurationManager,\n num_processes: int,\n pin_memory: bool = False,\n verbose: bool = False):\n context = multiprocessing.get_context('spawn')\n manager = Manager()\n num_processes = min(len(list_of_lists), num_processes)\n assert num_processes >= 1\n processes = []\n done_events = []\n target_queues = []\n abort_event = manager.Event()\n for i in range(num_processes):\n event = manager.Event()\n queue = Manager().Queue(maxsize=1)\n pr = context.Process(target=preprocess_fromfiles_save_to_queue,\n args=(\n list_of_lists[i::num_processes],\n list_of_segs_from_prev_stage_files[\n i::num_processes] if list_of_segs_from_prev_stage_files is not None else None,\n output_filenames_truncated[\n i::num_processes] if output_filenames_truncated is not None else None,\n plans_manager,\n dataset_json,\n configuration_manager,\n queue,\n event,\n abort_event,\n verbose\n ), daemon=True)\n pr.start()\n target_queues.append(queue)\n done_events.append(event)\n processes.append(pr)\n\n worker_ctr = 0\n while (not done_events[worker_ctr].is_set()) or (not target_queues[worker_ctr].empty()):\n if not target_queues[worker_ctr].empty():\n item = target_queues[worker_ctr].get()\n worker_ctr = (worker_ctr + 1) % num_processes\n else:\n all_ok = all(\n [i.is_alive() or j.is_set() for i, j in zip(processes, done_events)]) and not abort_event.is_set()\n if not all_ok:\n raise RuntimeError('Background workers died. Look for the error message further up! If there is '\n 'none then your RAM was full and the worker was killed by the OS. Use fewer '\n 'workers or get more RAM in that case!')\n sleep(0.01)\n continue\n if pin_memory:\n [i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]\n yield item\n [p.join() for p in processes]" }, { "identifier": "preprocessing_iterator_fromnpy", "path": "nnunetv2/inference/data_iterators.py", "snippet": "def preprocessing_iterator_fromnpy(list_of_images: List[np.ndarray],\n list_of_segs_from_prev_stage: Union[List[np.ndarray], None],\n list_of_image_properties: List[dict],\n truncated_ofnames: Union[List[str], None],\n plans_manager: PlansManager,\n dataset_json: dict,\n configuration_manager: ConfigurationManager,\n num_processes: int,\n pin_memory: bool = False,\n verbose: bool = False):\n context = multiprocessing.get_context('spawn')\n manager = Manager()\n num_processes = min(len(list_of_images), num_processes)\n assert num_processes >= 1\n target_queues = []\n processes = []\n done_events = []\n abort_event = manager.Event()\n for i in range(num_processes):\n event = manager.Event()\n queue = manager.Queue(maxsize=1)\n pr = context.Process(target=preprocess_fromnpy_save_to_queue,\n args=(\n list_of_images[i::num_processes],\n list_of_segs_from_prev_stage[\n i::num_processes] if list_of_segs_from_prev_stage is not None else None,\n list_of_image_properties[i::num_processes],\n truncated_ofnames[i::num_processes] if truncated_ofnames is not None else None,\n plans_manager,\n dataset_json,\n configuration_manager,\n queue,\n event,\n abort_event,\n verbose\n ), daemon=True)\n pr.start()\n done_events.append(event)\n processes.append(pr)\n target_queues.append(queue)\n\n worker_ctr = 0\n while (not done_events[worker_ctr].is_set()) or (not target_queues[worker_ctr].empty()):\n if not target_queues[worker_ctr].empty():\n item = target_queues[worker_ctr].get()\n worker_ctr = (worker_ctr + 1) % num_processes\n else:\n all_ok = all(\n [i.is_alive() or j.is_set() for i, j in zip(processes, done_events)]) and not abort_event.is_set()\n if not all_ok:\n raise RuntimeError('Background workers died. Look for the error message further up! If there is '\n 'none then your RAM was full and the worker was killed by the OS. Use fewer '\n 'workers or get more RAM in that case!')\n sleep(0.01)\n continue\n if pin_memory:\n [i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]\n yield item\n [p.join() for p in processes]" }, { "identifier": "export_prediction_from_logits", "path": "nnunetv2/inference/export_prediction.py", "snippet": "def export_prediction_from_logits(predicted_array_or_file: Union[np.ndarray, torch.Tensor], properties_dict: dict,\n configuration_manager: ConfigurationManager,\n plans_manager: PlansManager,\n dataset_json_dict_or_file: Union[dict, str], output_file_truncated: str,\n save_probabilities: bool = False):\n # if isinstance(predicted_array_or_file, str):\n # tmp = deepcopy(predicted_array_or_file)\n # if predicted_array_or_file.endswith('.npy'):\n # predicted_array_or_file = np.load(predicted_array_or_file)\n # elif predicted_array_or_file.endswith('.npz'):\n # predicted_array_or_file = np.load(predicted_array_or_file)['softmax']\n # os.remove(tmp)\n\n if isinstance(dataset_json_dict_or_file, str):\n dataset_json_dict_or_file = load_json(dataset_json_dict_or_file)\n\n label_manager = plans_manager.get_label_manager(dataset_json_dict_or_file)\n ret = convert_predicted_logits_to_segmentation_with_correct_shape(\n predicted_array_or_file, plans_manager, configuration_manager, label_manager, properties_dict,\n return_probabilities=save_probabilities\n )\n del predicted_array_or_file\n\n # save\n if save_probabilities:\n segmentation_final, probabilities_final = ret\n np.savez_compressed(output_file_truncated + '.npz', probabilities=probabilities_final)\n save_pickle(properties_dict, output_file_truncated + '.pkl')\n del probabilities_final, ret\n else:\n segmentation_final = ret\n del ret\n\n rw = plans_manager.image_reader_writer_class()\n rw.write_seg(segmentation_final, output_file_truncated + dataset_json_dict_or_file['file_ending'],\n properties_dict)" }, { "identifier": "convert_predicted_logits_to_segmentation_with_correct_shape", "path": "nnunetv2/inference/export_prediction.py", "snippet": "def convert_predicted_logits_to_segmentation_with_correct_shape(predicted_logits: Union[torch.Tensor, np.ndarray],\n plans_manager: PlansManager,\n configuration_manager: ConfigurationManager,\n label_manager: LabelManager,\n properties_dict: dict,\n return_probabilities: bool = False,\n num_threads_torch: int = default_num_processes):\n old_threads = torch.get_num_threads()\n torch.set_num_threads(num_threads_torch)\n\n # resample to original shape\n current_spacing = configuration_manager.spacing if \\\n len(configuration_manager.spacing) == \\\n len(properties_dict['shape_after_cropping_and_before_resampling']) else \\\n [properties_dict['spacing'][0], *configuration_manager.spacing]\n predicted_logits = configuration_manager.resampling_fn_probabilities(predicted_logits,\n properties_dict['shape_after_cropping_and_before_resampling'],\n current_spacing,\n properties_dict['spacing'])\n # return value of resampling_fn_probabilities can be ndarray or Tensor but that does not matter because\n # apply_inference_nonlin will convert to torch\n predicted_probabilities = label_manager.apply_inference_nonlin(predicted_logits)\n del predicted_logits\n segmentation = label_manager.convert_probabilities_to_segmentation(predicted_probabilities)\n\n # segmentation may be torch.Tensor but we continue with numpy\n if isinstance(segmentation, torch.Tensor):\n segmentation = segmentation.cpu().numpy()\n\n # put segmentation in bbox (revert cropping)\n segmentation_reverted_cropping = np.zeros(properties_dict['shape_before_cropping'],\n dtype=np.uint8 if len(label_manager.foreground_labels) < 255 else np.uint16)\n slicer = bounding_box_to_slice(properties_dict['bbox_used_for_cropping'])\n segmentation_reverted_cropping[slicer] = segmentation\n del segmentation\n\n # revert transpose\n segmentation_reverted_cropping = segmentation_reverted_cropping.transpose(plans_manager.transpose_backward)\n if return_probabilities:\n # revert cropping\n predicted_probabilities = label_manager.revert_cropping_on_probabilities(predicted_probabilities,\n properties_dict[\n 'bbox_used_for_cropping'],\n properties_dict[\n 'shape_before_cropping'])\n predicted_probabilities = predicted_probabilities.cpu().numpy()\n # revert transpose\n predicted_probabilities = predicted_probabilities.transpose([0] + [i + 1 for i in\n plans_manager.transpose_backward])\n torch.set_num_threads(old_threads)\n return segmentation_reverted_cropping, predicted_probabilities\n else:\n torch.set_num_threads(old_threads)\n return segmentation_reverted_cropping" }, { "identifier": "compute_gaussian", "path": "nnunetv2/inference/sliding_window_prediction.py", "snippet": "@lru_cache(maxsize=2)\ndef compute_gaussian(tile_size: Union[Tuple[int, ...], List[int]], sigma_scale: float = 1. / 8,\n value_scaling_factor: float = 1, dtype=torch.float16, device=torch.device('cuda', 0)) \\\n -> torch.Tensor:\n tmp = np.zeros(tile_size)\n center_coords = [i // 2 for i in tile_size]\n sigmas = [i * sigma_scale for i in tile_size]\n tmp[tuple(center_coords)] = 1\n gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)\n\n gaussian_importance_map = torch.from_numpy(gaussian_importance_map)\n\n gaussian_importance_map = gaussian_importance_map / torch.max(gaussian_importance_map) * value_scaling_factor\n gaussian_importance_map = gaussian_importance_map.type(dtype).to(device)\n\n # gaussian_importance_map cannot be 0, otherwise we may end up with nans!\n gaussian_importance_map[gaussian_importance_map == 0] = torch.min(\n gaussian_importance_map[gaussian_importance_map != 0])\n\n return gaussian_importance_map" }, { "identifier": "compute_steps_for_sliding_window", "path": "nnunetv2/inference/sliding_window_prediction.py", "snippet": "def compute_steps_for_sliding_window(image_size: Tuple[int, ...], tile_size: Tuple[int, ...], tile_step_size: float) -> \\\n List[List[int]]:\n assert [i >= j for i, j in zip(image_size, tile_size)], \"image size must be as large or larger than patch_size\"\n assert 0 < tile_step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'\n\n # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of\n # 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46\n target_step_sizes_in_voxels = [i * tile_step_size for i in tile_size]\n\n num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, tile_size)]\n\n steps = []\n for dim in range(len(tile_size)):\n # the highest step value for this dimension is\n max_step_value = image_size[dim] - tile_size[dim]\n if num_steps[dim] > 1:\n actual_step_size = max_step_value / (num_steps[dim] - 1)\n else:\n actual_step_size = 99999999999 # does not matter because there is only one step at 0\n\n steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])]\n\n steps.append(steps_here)\n\n return steps" }, { "identifier": "get_output_folder", "path": "nnunetv2/utilities/file_path_utilities.py", "snippet": "def get_output_folder(dataset_name_or_id: Union[str, int], trainer_name: str = 'nnUNetTrainer',\n plans_identifier: str = 'nnUNetPlans', configuration: str = '3d_fullres',\n fold: Union[str, int] = None) -> str:\n tmp = join(nnUNet_results, maybe_convert_to_dataset_name(dataset_name_or_id),\n convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration))\n if fold is not None:\n tmp = join(tmp, f'fold_{fold}')\n return tmp" }, { "identifier": "check_workers_alive_and_busy", "path": "nnunetv2/utilities/file_path_utilities.py", "snippet": "def check_workers_alive_and_busy(export_pool: Pool, worker_list: List, results_list: List, allowed_num_queued: int = 0):\n \"\"\"\n\n returns True if the number of results that are not ready is greater than the number of available workers + allowed_num_queued\n \"\"\"\n alive = [i.is_alive() for i in worker_list]\n if not all(alive):\n raise RuntimeError('Some background workers are no longer alive')\n\n not_ready = [not i.ready() for i in results_list]\n if sum(not_ready) >= (len(export_pool._pool) + allowed_num_queued):\n return True\n return False" }, { "identifier": "recursive_find_python_class", "path": "nnunetv2/utilities/find_class_by_name.py", "snippet": "def recursive_find_python_class(folder: str, class_name: str, current_module: str):\n tr = None\n for importer, modname, ispkg in pkgutil.iter_modules([folder]):\n # print(modname, ispkg)\n if not ispkg:\n m = importlib.import_module(current_module + \".\" + modname)\n if hasattr(m, class_name):\n tr = getattr(m, class_name)\n break\n\n if tr is None:\n for importer, modname, ispkg in pkgutil.iter_modules([folder]):\n if ispkg:\n next_current_module = current_module + \".\" + modname\n tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module)\n if tr is not None:\n break\n return tr" }, { "identifier": "empty_cache", "path": "nnunetv2/utilities/helpers.py", "snippet": "def empty_cache(device: torch.device):\n if device.type == 'cuda':\n torch.cuda.empty_cache()\n elif device.type == 'mps':\n from torch import mps\n mps.empty_cache()\n else:\n pass" }, { "identifier": "dummy_context", "path": "nnunetv2/utilities/helpers.py", "snippet": "class dummy_context(object):\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass" }, { "identifier": "recursive_fix_for_json_export", "path": "nnunetv2/utilities/json_export.py", "snippet": "def recursive_fix_for_json_export(my_dict: dict):\n # json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro.\n keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys....\n for k in keys:\n if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)):\n tmp = my_dict[k]\n del my_dict[k]\n my_dict[int(k)] = tmp\n del tmp\n k = int(k)\n\n if isinstance(my_dict[k], dict):\n recursive_fix_for_json_export(my_dict[k])\n elif isinstance(my_dict[k], np.ndarray):\n assert my_dict[k].ndim == 1, 'only 1d arrays are supported'\n my_dict[k] = fix_types_iterable(my_dict[k], output_type=list)\n elif isinstance(my_dict[k], (np.bool_,)):\n my_dict[k] = bool(my_dict[k])\n elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)):\n my_dict[k] = int(my_dict[k])\n elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)):\n my_dict[k] = float(my_dict[k])\n elif isinstance(my_dict[k], list):\n my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k]))\n elif isinstance(my_dict[k], tuple):\n my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple)\n elif isinstance(my_dict[k], torch.device):\n my_dict[k] = str(my_dict[k])\n else:\n pass # pray it can be serialized" }, { "identifier": "determine_num_input_channels", "path": "nnunetv2/utilities/label_handling/label_handling.py", "snippet": "def determine_num_input_channels(plans_manager: PlansManager,\n configuration_or_config_manager: Union[str, ConfigurationManager],\n dataset_json: dict) -> int:\n if isinstance(configuration_or_config_manager, str):\n config_manager = plans_manager.get_configuration(configuration_or_config_manager)\n else:\n config_manager = configuration_or_config_manager\n\n label_manager = plans_manager.get_label_manager(dataset_json)\n num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names'])\n\n # cascade has different number of input channels\n if config_manager.previous_stage_name is not None:\n num_label_inputs = len(label_manager.foreground_labels)\n num_input_channels = num_modalities + num_label_inputs\n else:\n num_input_channels = num_modalities\n return num_input_channels" }, { "identifier": "PlansManager", "path": "nnunetv2/utilities/plans_handling/plans_handler.py", "snippet": "class PlansManager(object):\n def __init__(self, plans_file_or_dict: Union[str, dict]):\n \"\"\"\n Why do we need this?\n 1) resolve inheritance in configurations\n 2) expose otherwise annoying stuff like getting the label manager or IO class from a string\n 3) clearly expose the things that are in the plans instead of hiding them in a dict\n 4) cache shit\n\n This class does not prevent you from going wild. You can still use the plans directly if you prefer\n (PlansHandler.plans['key'])\n \"\"\"\n self.plans = plans_file_or_dict if isinstance(plans_file_or_dict, dict) else load_json(plans_file_or_dict)\n\n def __repr__(self):\n return self.plans.__repr__()\n\n def _internal_resolve_configuration_inheritance(self, configuration_name: str,\n visited: Tuple[str, ...] = None) -> dict:\n if configuration_name not in self.plans['configurations'].keys():\n raise ValueError(f'The configuration {configuration_name} does not exist in the plans I have. Valid '\n f'configuration names are {list(self.plans[\"configurations\"].keys())}.')\n configuration = deepcopy(self.plans['configurations'][configuration_name])\n if 'inherits_from' in configuration:\n parent_config_name = configuration['inherits_from']\n\n if visited is None:\n visited = (configuration_name,)\n else:\n if parent_config_name in visited:\n raise RuntimeError(f\"Circular dependency detected. The following configurations were visited \"\n f\"while solving inheritance (in that order!): {visited}. \"\n f\"Current configuration: {configuration_name}. Its parent configuration \"\n f\"is {parent_config_name}.\")\n visited = (*visited, configuration_name)\n\n base_config = self._internal_resolve_configuration_inheritance(parent_config_name, visited)\n base_config.update(configuration)\n configuration = base_config\n return configuration\n\n @lru_cache(maxsize=10)\n def get_configuration(self, configuration_name: str):\n if configuration_name not in self.plans['configurations'].keys():\n raise RuntimeError(f\"Requested configuration {configuration_name} not found in plans. \"\n f\"Available configurations: {list(self.plans['configurations'].keys())}\")\n\n configuration_dict = self._internal_resolve_configuration_inheritance(configuration_name)\n return ConfigurationManager(configuration_dict)\n\n @property\n def dataset_name(self) -> str:\n return self.plans['dataset_name']\n\n @property\n def plans_name(self) -> str:\n return self.plans['plans_name']\n\n @property\n def original_median_spacing_after_transp(self) -> List[float]:\n return self.plans['original_median_spacing_after_transp']\n\n @property\n def original_median_shape_after_transp(self) -> List[float]:\n return self.plans['original_median_shape_after_transp']\n\n @property\n @lru_cache(maxsize=1)\n def image_reader_writer_class(self) -> Type[BaseReaderWriter]:\n return recursive_find_reader_writer_by_name(self.plans['image_reader_writer'])\n\n @property\n def transpose_forward(self) -> List[int]:\n return self.plans['transpose_forward']\n\n @property\n def transpose_backward(self) -> List[int]:\n return self.plans['transpose_backward']\n\n @property\n def available_configurations(self) -> List[str]:\n return list(self.plans['configurations'].keys())\n\n @property\n @lru_cache(maxsize=1)\n def experiment_planner_class(self) -> Type[ExperimentPlanner]:\n planner_name = self.experiment_planner_name\n experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], \"experiment_planning\"),\n planner_name,\n current_module=\"nnunetv2.experiment_planning\")\n return experiment_planner\n\n @property\n def experiment_planner_name(self) -> str:\n return self.plans['experiment_planner_used']\n\n @property\n @lru_cache(maxsize=1)\n def label_manager_class(self) -> Type[LabelManager]:\n return get_labelmanager_class_from_plans(self.plans)\n\n def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelManager:\n return self.label_manager_class(label_dict=dataset_json['labels'],\n regions_class_order=dataset_json.get('regions_class_order'),\n **kwargs)\n\n @property\n def foreground_intensity_properties_per_channel(self) -> dict:\n if 'foreground_intensity_properties_per_channel' not in self.plans.keys():\n if 'foreground_intensity_properties_by_modality' in self.plans.keys():\n return self.plans['foreground_intensity_properties_by_modality']\n return self.plans['foreground_intensity_properties_per_channel']" }, { "identifier": "ConfigurationManager", "path": "nnunetv2/utilities/plans_handling/plans_handler.py", "snippet": "class ConfigurationManager(object):\n def __init__(self, configuration_dict: dict):\n self.configuration = configuration_dict\n\n def __repr__(self):\n return self.configuration.__repr__()\n\n @property\n def data_identifier(self) -> str:\n return self.configuration['data_identifier']\n\n @property\n def preprocessor_name(self) -> str:\n return self.configuration['preprocessor_name']\n\n @property\n @lru_cache(maxsize=1)\n def preprocessor_class(self) -> Type[DefaultPreprocessor]:\n preprocessor_class = recursive_find_python_class(join(nnunetv2.__path__[0], \"preprocessing\"),\n self.preprocessor_name,\n current_module=\"nnunetv2.preprocessing\")\n return preprocessor_class\n\n @property\n def batch_size(self) -> int:\n return self.configuration['batch_size']\n\n @property\n def patch_size(self) -> List[int]:\n return self.configuration['patch_size']\n\n @property\n def median_image_size_in_voxels(self) -> List[int]:\n return self.configuration['median_image_size_in_voxels']\n\n @property\n def spacing(self) -> List[float]:\n return self.configuration['spacing']\n\n @property\n def normalization_schemes(self) -> List[str]:\n return self.configuration['normalization_schemes']\n\n @property\n def use_mask_for_norm(self) -> List[bool]:\n return self.configuration['use_mask_for_norm']\n\n @property\n def UNet_class_name(self) -> str:\n return self.configuration['UNet_class_name']\n\n @property\n @lru_cache(maxsize=1)\n def UNet_class(self) -> Type[nn.Module]:\n unet_class = recursive_find_python_class(join(dynamic_network_architectures.__path__[0], \"architectures\"),\n self.UNet_class_name,\n current_module=\"dynamic_network_architectures.architectures\")\n if unet_class is None:\n raise RuntimeError('The network architecture specified by the plans file '\n 'is non-standard (maybe your own?). Fix this by not using '\n 'ConfigurationManager.UNet_class to instantiate '\n 'it (probably just overwrite build_network_architecture of your trainer.')\n return unet_class\n\n @property\n def UNet_base_num_features(self) -> int:\n return self.configuration['UNet_base_num_features']\n\n @property\n def n_conv_per_stage_encoder(self) -> List[int]:\n return self.configuration['n_conv_per_stage_encoder']\n\n @property\n def n_conv_per_stage_decoder(self) -> List[int]:\n return self.configuration['n_conv_per_stage_decoder']\n\n @property\n def num_pool_per_axis(self) -> List[int]:\n return self.configuration['num_pool_per_axis']\n\n @property\n def pool_op_kernel_sizes(self) -> List[List[int]]:\n return self.configuration['pool_op_kernel_sizes']\n\n @property\n def conv_kernel_sizes(self) -> List[List[int]]:\n return self.configuration['conv_kernel_sizes']\n\n @property\n def unet_max_num_features(self) -> int:\n return self.configuration['unet_max_num_features']\n\n @property\n @lru_cache(maxsize=1)\n def resampling_fn_data(self) -> Callable[\n [Union[torch.Tensor, np.ndarray],\n Union[Tuple[int, ...], List[int], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray]\n ],\n Union[torch.Tensor, np.ndarray]]:\n fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_data'])\n fn = partial(fn, **self.configuration['resampling_fn_data_kwargs'])\n return fn\n\n @property\n @lru_cache(maxsize=1)\n def resampling_fn_probabilities(self) -> Callable[\n [Union[torch.Tensor, np.ndarray],\n Union[Tuple[int, ...], List[int], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray]\n ],\n Union[torch.Tensor, np.ndarray]]:\n fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_probabilities'])\n fn = partial(fn, **self.configuration['resampling_fn_probabilities_kwargs'])\n return fn\n\n @property\n @lru_cache(maxsize=1)\n def resampling_fn_seg(self) -> Callable[\n [Union[torch.Tensor, np.ndarray],\n Union[Tuple[int, ...], List[int], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray]\n ],\n Union[torch.Tensor, np.ndarray]]:\n fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_seg'])\n fn = partial(fn, **self.configuration['resampling_fn_seg_kwargs'])\n return fn\n\n @property\n def batch_dice(self) -> bool:\n return self.configuration['batch_dice']\n\n @property\n def next_stage_names(self) -> Union[List[str], None]:\n ret = self.configuration.get('next_stage')\n if ret is not None:\n if isinstance(ret, str):\n ret = [ret]\n return ret\n\n @property\n def previous_stage_name(self) -> Union[str, None]:\n return self.configuration.get('previous_stage')" }, { "identifier": "create_lists_from_splitted_dataset_folder", "path": "nnunetv2/utilities/utils.py", "snippet": "def create_lists_from_splitted_dataset_folder(folder: str, file_ending: str, identifiers: List[str] = None) -> List[\n List[str]]:\n \"\"\"\n does not rely on dataset.json\n \"\"\"\n if identifiers is None:\n identifiers = get_identifiers_from_splitted_dataset_folder(folder, file_ending)\n files = subfiles(folder, suffix=file_ending, join=False, sort=True)\n list_of_lists = []\n for f in identifiers:\n p = re.compile(re.escape(f) + r\"_\\d\\d\\d\\d\" + re.escape(file_ending))\n list_of_lists.append([join(folder, i) for i in files if p.fullmatch(i)])\n return list_of_lists" } ]
import inspect import multiprocessing import os import traceback import numpy as np import torch import nnunetv2 import argparse import multiprocessing import argparse import multiprocessing from copy import deepcopy from time import sleep from typing import Tuple, Union, List, Optional from acvl_utils.cropping_and_padding.padding import pad_nd_image from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \ save_json from torch import nn from torch._dynamo import OptimizedModule from torch.nn.parallel import DistributedDataParallel from tqdm import tqdm from nnunetv2.configuration import default_num_processes from nnunetv2.inference.data_iterators import PreprocessAdapterFromNpy, preprocessing_iterator_fromfiles, \ preprocessing_iterator_fromnpy from nnunetv2.inference.export_prediction import export_prediction_from_logits, \ convert_predicted_logits_to_segmentation_with_correct_shape from nnunetv2.inference.sliding_window_prediction import compute_gaussian, \ compute_steps_for_sliding_window from nnunetv2.utilities.file_path_utilities import get_output_folder, check_workers_alive_and_busy from nnunetv2.utilities.find_class_by_name import recursive_find_python_class from nnunetv2.utilities.helpers import empty_cache, dummy_context from nnunetv2.utilities.json_export import recursive_fix_for_json_export from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder from nnunetv2.paths import nnUNet_results, nnUNet_raw from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO
10,968
if save_probabilities: tmp2 = [isfile(i + '.npz') for i in output_filename_truncated] tmp = [i and j for i, j in zip(tmp, tmp2)] not_existing_indices = [i for i, j in enumerate(tmp) if not j] output_filename_truncated = [output_filename_truncated[i] for i in not_existing_indices] list_of_lists_or_source_folder = [list_of_lists_or_source_folder[i] for i in not_existing_indices] seg_from_prev_stage_files = [seg_from_prev_stage_files[i] for i in not_existing_indices] print(f'overwrite was set to {overwrite}, so I am only working on cases that haven\'t been predicted yet. ' f'That\'s {len(not_existing_indices)} cases.') return list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files def predict_from_files(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[str, None, List[str]], save_probabilities: bool = False, overwrite: bool = True, num_processes_preprocessing: int = default_num_processes, num_processes_segmentation_export: int = default_num_processes, folder_with_segs_from_prev_stage: str = None, num_parts: int = 1, part_id: int = 0): """ This is nnU-Net's default function for making predictions. It works best for batch predictions (predicting many images at once). """ if isinstance(output_folder_or_list_of_truncated_output_files, str): output_folder = output_folder_or_list_of_truncated_output_files elif isinstance(output_folder_or_list_of_truncated_output_files, list): output_folder = os.path.dirname(output_folder_or_list_of_truncated_output_files[0]) else: output_folder = None ######################## # let's store the input arguments so that its clear what was used to generate the prediction if output_folder is not None: my_init_kwargs = {} for k in inspect.signature(self.predict_from_files).parameters.keys(): my_init_kwargs[k] = locals()[k] my_init_kwargs = deepcopy( my_init_kwargs) # let's not unintentionally change anything in-place. Take this as a recursive_fix_for_json_export(my_init_kwargs) maybe_mkdir_p(output_folder) save_json(my_init_kwargs, join(output_folder, 'predict_from_raw_data_args.json')) # we need these two if we want to do things with the predictions like for example apply postprocessing save_json(self.dataset_json, join(output_folder, 'dataset.json'), sort_keys=False) save_json(self.plans_manager.plans, join(output_folder, 'plans.json'), sort_keys=False) ####################### # check if we need a prediction from the previous stage if self.configuration_manager.previous_stage_name is not None: assert folder_with_segs_from_prev_stage is not None, \ f'The requested configuration is a cascaded network. It requires the segmentations of the previous ' \ f'stage ({self.configuration_manager.previous_stage_name}) as input. Please provide the folder where' \ f' they are located via folder_with_segs_from_prev_stage' # sort out input and output filenames list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files = \ self._manage_input_and_output_lists(list_of_lists_or_source_folder, output_folder_or_list_of_truncated_output_files, folder_with_segs_from_prev_stage, overwrite, part_id, num_parts, save_probabilities) if len(list_of_lists_or_source_folder) == 0: return data_iterator = self._internal_get_data_iterator_from_lists_of_filenames(list_of_lists_or_source_folder, seg_from_prev_stage_files, output_filename_truncated, num_processes_preprocessing) return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export) def _internal_get_data_iterator_from_lists_of_filenames(self, input_list_of_lists: List[List[str]], seg_from_prev_stage_files: Union[List[str], None], output_filenames_truncated: Union[List[str], None], num_processes: int): return preprocessing_iterator_fromfiles(input_list_of_lists, seg_from_prev_stage_files, output_filenames_truncated, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing) # preprocessor = self.configuration_manager.preprocessor_class(verbose=self.verbose_preprocessing) # # hijack batchgenerators, yo # # we use the multiprocessing of the batchgenerators dataloader to handle all the background worker stuff. This # # way we don't have to reinvent the wheel here. # num_processes = max(1, min(num_processes, len(input_list_of_lists))) # ppa = PreprocessAdapter(input_list_of_lists, seg_from_prev_stage_files, preprocessor, # output_filenames_truncated, self.plans_manager, self.dataset_json, # self.configuration_manager, num_processes) # if num_processes == 0: # mta = SingleThreadedAugmenter(ppa, None) # else: # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) # return mta def get_data_iterator_from_raw_npy_data(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3): list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ image_or_list_of_images if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images))
class nnUNetPredictor(object): def __init__(self, tile_step_size: float = 0.5, use_gaussian: bool = True, use_mirroring: bool = True, perform_everything_on_gpu: bool = True, device: torch.device = torch.device('cuda'), verbose: bool = False, verbose_preprocessing: bool = False, allow_tqdm: bool = True): self.verbose = verbose self.verbose_preprocessing = verbose_preprocessing self.allow_tqdm = allow_tqdm self.plans_manager, self.configuration_manager, self.list_of_parameters, self.network, self.dataset_json, \ self.trainer_name, self.allowed_mirroring_axes, self.label_manager = None, None, None, None, None, None, None, None self.tile_step_size = tile_step_size self.use_gaussian = use_gaussian self.use_mirroring = use_mirroring if device.type == 'cuda': # device = torch.device(type='cuda', index=0) # set the desired GPU with CUDA_VISIBLE_DEVICES! # why would I ever want to do that. Stupid dobby. This kills DDP inference... pass if device.type != 'cuda': print(f'perform_everything_on_gpu=True is only supported for cuda devices! Setting this to False') perform_everything_on_gpu = False self.device = device self.perform_everything_on_gpu = perform_everything_on_gpu def initialize_from_trained_model_folder(self, model_training_output_dir: str, use_folds: Union[Tuple[Union[int, str]], None], checkpoint_name: str = 'checkpoint_final.pth'): """ This is used when making predictions with a trained model """ if use_folds is None: use_folds = nnUNetPredictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) dataset_json = load_json(join(model_training_output_dir, 'dataset.json')) plans = load_json(join(model_training_output_dir, 'plans.json')) plans_manager = PlansManager(plans) if isinstance(use_folds, str): use_folds = [use_folds] parameters = [] for i, f in enumerate(use_folds): f = int(f) if f != 'all' else f checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name), map_location=torch.device('cpu')) if i == 0: trainer_name = checkpoint['trainer_name'] configuration_name = checkpoint['init_args']['configuration'] inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \ 'inference_allowed_mirroring_axes' in checkpoint.keys() else None parameters.append(checkpoint['network_weights']) configuration_manager = plans_manager.get_configuration(configuration_name) # restore network num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), trainer_name, 'nnunetv2.training.nnUNetTrainer') network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager, num_input_channels, enable_deep_supervision=False) self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) if ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) \ and not isinstance(self.network, OptimizedModule): print('compiling network') self.network = torch.compile(self.network) def manual_initialization(self, network: nn.Module, plans_manager: PlansManager, configuration_manager: ConfigurationManager, parameters: Optional[List[dict]], dataset_json: dict, trainer_name: str, inference_allowed_mirroring_axes: Optional[Tuple[int, ...]]): """ This is used by the nnUNetTrainer to initialize nnUNetPredictor for the final validation """ self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) allow_compile = True allow_compile = allow_compile and ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) allow_compile = allow_compile and not isinstance(self.network, OptimizedModule) if isinstance(self.network, DistributedDataParallel): allow_compile = allow_compile and isinstance(self.network.module, OptimizedModule) if allow_compile: print('compiling network') self.network = torch.compile(self.network) @staticmethod def auto_detect_available_folds(model_training_output_dir, checkpoint_name): print('use_folds is None, attempting to auto detect available folds') fold_folders = subdirs(model_training_output_dir, prefix='fold_', join=False) fold_folders = [i for i in fold_folders if i != 'fold_all'] fold_folders = [i for i in fold_folders if isfile(join(model_training_output_dir, i, checkpoint_name))] use_folds = [int(i.split('_')[-1]) for i in fold_folders] print(f'found the following folds: {use_folds}') return use_folds def _manage_input_and_output_lists(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[None, str, List[str]], folder_with_segs_from_prev_stage: str = None, overwrite: bool = True, part_id: int = 0, num_parts: int = 1, save_probabilities: bool = False): if isinstance(list_of_lists_or_source_folder, str): list_of_lists_or_source_folder = create_lists_from_splitted_dataset_folder(list_of_lists_or_source_folder, self.dataset_json['file_ending']) print(f'There are {len(list_of_lists_or_source_folder)} cases in the source folder') list_of_lists_or_source_folder = list_of_lists_or_source_folder[part_id::num_parts] caseids = [os.path.basename(i[0])[:-(len(self.dataset_json['file_ending']) + 5)] for i in list_of_lists_or_source_folder] print( f'I am process {part_id} out of {num_parts} (max process ID is {num_parts - 1}, we start counting with 0!)') print(f'There are {len(caseids)} cases that I would like to predict') if isinstance(output_folder_or_list_of_truncated_output_files, str): output_filename_truncated = [join(output_folder_or_list_of_truncated_output_files, i) for i in caseids] else: output_filename_truncated = output_folder_or_list_of_truncated_output_files seg_from_prev_stage_files = [join(folder_with_segs_from_prev_stage, i + self.dataset_json['file_ending']) if folder_with_segs_from_prev_stage is not None else None for i in caseids] # remove already predicted files form the lists if not overwrite and output_filename_truncated is not None: tmp = [isfile(i + self.dataset_json['file_ending']) for i in output_filename_truncated] if save_probabilities: tmp2 = [isfile(i + '.npz') for i in output_filename_truncated] tmp = [i and j for i, j in zip(tmp, tmp2)] not_existing_indices = [i for i, j in enumerate(tmp) if not j] output_filename_truncated = [output_filename_truncated[i] for i in not_existing_indices] list_of_lists_or_source_folder = [list_of_lists_or_source_folder[i] for i in not_existing_indices] seg_from_prev_stage_files = [seg_from_prev_stage_files[i] for i in not_existing_indices] print(f'overwrite was set to {overwrite}, so I am only working on cases that haven\'t been predicted yet. ' f'That\'s {len(not_existing_indices)} cases.') return list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files def predict_from_files(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[str, None, List[str]], save_probabilities: bool = False, overwrite: bool = True, num_processes_preprocessing: int = default_num_processes, num_processes_segmentation_export: int = default_num_processes, folder_with_segs_from_prev_stage: str = None, num_parts: int = 1, part_id: int = 0): """ This is nnU-Net's default function for making predictions. It works best for batch predictions (predicting many images at once). """ if isinstance(output_folder_or_list_of_truncated_output_files, str): output_folder = output_folder_or_list_of_truncated_output_files elif isinstance(output_folder_or_list_of_truncated_output_files, list): output_folder = os.path.dirname(output_folder_or_list_of_truncated_output_files[0]) else: output_folder = None ######################## # let's store the input arguments so that its clear what was used to generate the prediction if output_folder is not None: my_init_kwargs = {} for k in inspect.signature(self.predict_from_files).parameters.keys(): my_init_kwargs[k] = locals()[k] my_init_kwargs = deepcopy( my_init_kwargs) # let's not unintentionally change anything in-place. Take this as a recursive_fix_for_json_export(my_init_kwargs) maybe_mkdir_p(output_folder) save_json(my_init_kwargs, join(output_folder, 'predict_from_raw_data_args.json')) # we need these two if we want to do things with the predictions like for example apply postprocessing save_json(self.dataset_json, join(output_folder, 'dataset.json'), sort_keys=False) save_json(self.plans_manager.plans, join(output_folder, 'plans.json'), sort_keys=False) ####################### # check if we need a prediction from the previous stage if self.configuration_manager.previous_stage_name is not None: assert folder_with_segs_from_prev_stage is not None, \ f'The requested configuration is a cascaded network. It requires the segmentations of the previous ' \ f'stage ({self.configuration_manager.previous_stage_name}) as input. Please provide the folder where' \ f' they are located via folder_with_segs_from_prev_stage' # sort out input and output filenames list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files = \ self._manage_input_and_output_lists(list_of_lists_or_source_folder, output_folder_or_list_of_truncated_output_files, folder_with_segs_from_prev_stage, overwrite, part_id, num_parts, save_probabilities) if len(list_of_lists_or_source_folder) == 0: return data_iterator = self._internal_get_data_iterator_from_lists_of_filenames(list_of_lists_or_source_folder, seg_from_prev_stage_files, output_filename_truncated, num_processes_preprocessing) return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export) def _internal_get_data_iterator_from_lists_of_filenames(self, input_list_of_lists: List[List[str]], seg_from_prev_stage_files: Union[List[str], None], output_filenames_truncated: Union[List[str], None], num_processes: int): return preprocessing_iterator_fromfiles(input_list_of_lists, seg_from_prev_stage_files, output_filenames_truncated, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing) # preprocessor = self.configuration_manager.preprocessor_class(verbose=self.verbose_preprocessing) # # hijack batchgenerators, yo # # we use the multiprocessing of the batchgenerators dataloader to handle all the background worker stuff. This # # way we don't have to reinvent the wheel here. # num_processes = max(1, min(num_processes, len(input_list_of_lists))) # ppa = PreprocessAdapter(input_list_of_lists, seg_from_prev_stage_files, preprocessor, # output_filenames_truncated, self.plans_manager, self.dataset_json, # self.configuration_manager, num_processes) # if num_processes == 0: # mta = SingleThreadedAugmenter(ppa, None) # else: # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) # return mta def get_data_iterator_from_raw_npy_data(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3): list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ image_or_list_of_images if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images))
pp = preprocessing_iterator_fromnpy(
3
2023-12-04 19:43:14+00:00
16k
Zuricho/chroma_pipeline
chroma/models/graph_backbone.py
[ { "identifier": "validate_XC", "path": "chroma/data/xcs.py", "snippet": "def validate_XCS(all_atom=None, sequence=True):\n def decorator(func):\n def new_func(*args, **kwargs):" }, { "identifier": "basic", "path": "chroma/layers/basic.py", "snippet": "class NoOp(nn.Module):\nclass Transpose(nn.Module):\nclass Unsqueeze(nn.Module):\nclass OneHot(nn.Module):\nclass MeanEmbedding(nn.Module):\nclass PeriodicPositionalEncoding(nn.Module):\nclass PositionWiseFeedForward(nn.Module):\nclass DropNormLin(nn.Module):\nclass ResidualLinearLayer(nn.Module):\nclass TriangleMultiplication(nn.Module):\nclass NodeProduct(nn.Module):\nclass FourierFeaturization(nn.Module):\nclass PositionalEncoding(nn.Module):\nclass MaybeOnehotEmbedding(nn.Embedding):\n def __init__(self):\n def forward(self, x, **kwargs):\n def __init__(self, d1=1, d2=2):\n def forward(self, x):\n def __init__(self, dim=1):\n def forward(self, x):\n def __init__(self, n_tokens):\n def forward(self, x):\n def __init__(self, embedding, use_softmax=True):\n def forward(self, x):\n def __init__(self, d_model, max_seq_len=4000, dropout=0.0):\n def forward(self, x):\n def __init__(self, d_model, d_hidden, dropout=0.1):\n def reset_parameters(self):\n def forward(self, x):\n def __init__(\n self, in_features, out_features, norm_type=\"ln\", dropout=0.0, actn=nn.ReLU()\n ):\n def forward(self, x, input_mask=None):\n def __init__(self, d_model, use_norm=True):\n def forward(self, x):\n def __init__(self, d_model=512, mode=\"outgoing\"):\n def forward(self, X, mask=None):\n def __init__(self, d_in, d_out):\n def forward(self, node_features, node_mask=None, edge_mask=None):\n def __init__(self, d_input, d_model, trainable=False, scale=1.0):\n def forward(self, inputs):\n def __init__(self, d_model, d_input=1, period_range=(1.0, 1000.0)):\n def forward(self, inputs):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n A = self.left_edge_mlp(h)\n B = self.right_edge_mlp(h)\n G = self.skip(h)\n A = A.masked_fill(~mask, 0.0)\n B = B.masked_fill(~mask, 0.0)\n B = 2 * math.pi * scale * torch.randn(d_input, d_model // 2)" }, { "identifier": "graph", "path": "chroma/layers/graph.py", "snippet": "class GraphNN(nn.Module):\nclass GraphLayer(nn.Module):\nclass MLP(nn.Module):\nclass MaskedNorm(nn.Module):\n def __init__(\n self,\n num_layers: int,\n dim_nodes: int,\n dim_edges: int,\n node_mlp_layers: int = 1,\n node_mlp_dim: Optional[int] = None,\n edge_update: bool = True,\n edge_mlp_layers: int = 1,\n edge_mlp_dim: Optional[int] = None,\n mlp_activation: str = \"relu\",\n dropout: float = 0.0,\n norm: str = \"transformer\",\n scale: float = 1.0,\n skip_connect_input: bool = False,\n attentional: bool = False,\n num_attention_heads: int = 4,\n checkpoint_gradients: bool = False,\n ):\n def forward(\n self,\n node_h: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n def checkpoint(self, layer, *args):\n def sequential(\n self,\n tensors: dict,\n pre_step_function: Callable = None,\n post_step_function: Callable = None,\n ) -> dict:\n def init_steps(\n self, node_h: torch.Tensor, edge_h: torch.Tensor\n ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\n def step(\n self,\n t: int,\n node_h_cache: List[torch.Tensor],\n edge_h_cache: List[torch.Tensor],\n edge_idx: torch.LongTensor,\n mask_i: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\n def __init__(\n self,\n dim_nodes: int,\n dim_edges: int,\n node_mlp_layers: int = 1,\n node_mlp_dim: Optional[int] = None,\n edge_update: bool = True,\n edge_mlp_layers: int = 1,\n edge_mlp_dim: Optional[int] = None,\n mlp_activation: str = \"relu\",\n dropout: float = 0.0,\n norm: str = \"transformer\",\n scale: float = 1.0,\n attentional: bool = False,\n num_attention_heads: int = 4,\n ):\n def attend(\n self, node_h: torch.Tensor, messages: torch.Tensor, mask_ij: torch.Tensor\n ) -> torch.Tensor:\n def _normalize(self, node_h, edge_h, mask_i=None, mask_ij=None):\n def _normalize_t(\n self, edge_node_stack_t, mask_ij_t, include_nodes=True, include_edges=True\n ):\n def _update_nodes(\n self, node_h, node_h_norm, edge_h_norm, edge_idx, mask_i=None, mask_ij=None\n ):\n def _update_nodes_t(\n self,\n t,\n node_h,\n node_h_norm_t,\n edge_h_norm_t,\n edge_idx_t,\n mask_i_t=None,\n mask_ij_t=None,\n ):\n def _update_edges(self, edge_h, node_h_out, edge_h_norm, edge_idx, mask_ij):\n def _update_edges_t(\n self, t, edge_h_t, node_h_out, edge_h_t_norm, edge_idx_t, mask_ij_t\n ):\n def forward(\n self,\n node_h: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ):\n def step(\n self,\n t: int,\n node_h: torch.Tensor,\n node_h_out: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ):\n def __init__(\n self,\n dim_in: int,\n dim_hidden: Optional[int] = None,\n dim_out: Optional[int] = None,\n num_layers_hidden: int = 1,\n activation: str = \"relu\",\n dropout: float = 0.0,\n ):\n def forward(self, h: torch.Tensor) -> torch.Tensor:\ndef collect_neighbors(node_h: torch.Tensor, edge_idx: torch.Tensor) -> torch.Tensor:\ndef collect_edges(\n edge_h_dense: torch.Tensor, edge_idx: torch.LongTensor\n) -> torch.Tensor:\ndef collect_edges_transpose(\n edge_h: torch.Tensor, edge_idx: torch.LongTensor, mask_ij: torch.Tensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef scatter_edges(edge_h: torch.Tensor, edge_idx: torch.LongTensor) -> torch.Tensor:\ndef pack_edges(\n node_h: torch.Tensor, edge_h: torch.Tensor, edge_idx: torch.LongTensor\n) -> torch.Tensor:\ndef pack_edges_step(\n t: int, node_h: torch.Tensor, edge_h_t: torch.Tensor, edge_idx_t: torch.LongTensor\n) -> torch.Tensor:\ndef transpose_edge_idx(\n edge_idx: torch.LongTensor, mask_ij: torch.Tensor\n) -> Tuple[torch.LongTensor, torch.Tensor]:\ndef permute_tensor(\n tensor: torch.Tensor, dim: int, permute_idx: torch.LongTensor\n) -> torch.Tensor:\ndef permute_graph_embeddings(\n node_h: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: torch.Tensor,\n mask_ij: torch.Tensor,\n permute_idx: torch.LongTensor,\n) -> Tuple[torch.Tensor, torch.Tensor, torch.LongTensor, torch.Tensor, torch.Tensor]:\ndef edge_mask_causal(edge_idx: torch.LongTensor, mask_ij: torch.Tensor) -> torch.Tensor:\n def __init__(\n self,\n dim: int,\n num_features: int = -1,\n affine: bool = False,\n norm: str = \"instance\",\n eps: float = 1e-5,\n ):\n def forward(\n self, data: torch.Tensor, mask: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n B, L, K, D = messages.size()" }, { "identifier": "backbone", "path": "chroma/layers/structure/backbone.py", "snippet": "class ProteinBackbone(nn.Module):\nclass RigidTransform(nn.Module):\nclass RigidTransformer(nn.Module):\nclass BackboneBuilder(nn.Module):\nclass FrameBuilder(nn.Module):\nclass GraphBackboneUpdate(nn.Module):\nclass LossBackboneResidueDistance(nn.Module):\n def __init__(\n self,\n num_residues: int,\n num_batch: int = 1,\n init_state: str = \"alpha\",\n use_internal_coords: bool = True,\n X_init: Optional[torch.Tensor] = None,\n ):\n def forward(self) -> torch.Tensor:\n def __init__(\n self,\n num_batch: int = 1,\n keep_centered: bool = False,\n scale_dX: float = 1.0,\n scale_q: float = 1.0,\n ):\n def forward(self, X: torch.Tensor) -> torch.Tensor:\n def __init__(self, center_rotation: bool = True, keep_centered: bool = False):\n def _rotation_matrix(self, q_unc: torch.Tensor) -> torch.Tensor:\n def forward(\n self,\n X: torch.Tensor,\n dX: torch.Tensor,\n q: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n def __init__(self):\n def forward(\n self,\n phi: torch.Tensor,\n psi: torch.Tensor,\n omega: Optional[torch.Tensor] = None,\n angles: Optional[torch.Tensor] = None,\n lengths: Optional[torch.Tensor] = None,\n add_O: bool = True,\n ) -> torch.Tensor:\n def _build_x_i(v_i, l_i, x, u_minus_1, u_minus_2):\n def __init__(self, distance_eps: float = 1e-3):\n def _build_O(self, X_chain: torch.Tensor, C: torch.LongTensor):\n def forward(\n self,\n R: torch.Tensor,\n t: torch.Tensor,\n C: torch.LongTensor,\n q: Optional[torch.Tensor] = None,\n ):\n def inverse(\n self, X: torch.Tensor, C: torch.LongTensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n def __init__(\n self,\n dim_nodes: int,\n dim_edges: int,\n distance_scale: float = 10.0,\n distance_eps: float = 1e-3,\n method: str = \"neighbor\",\n iterations: int = 1,\n unconstrained: bool = True,\n num_transform_weights: int = 1,\n black_hole_init: bool = False,\n ):\n def _init_black_hole(self, X):\n def _update_local_transform(self, X, C, node_h, edge_h, edge_idx, mask_i, mask_ij):\n def _update_neighbor_transform(\n self, X, C, node_h, edge_h, edge_idx, mask_i, mask_ij\n ):\n def _update_neighbor_global_transform(\n self, X, C, node_h, edge_h, edge_idx, mask_i, mask_ij\n ):\n def _update_neighbor_global_affine_transform(\n self, X, C, node_h, edge_h, edge_idx, mask_i, mask_ij\n ):\n def _inner_transforms(self, X, C, edge_idx):\n def _transform_loss(self, R_ij_predict, t_ij_predict, X, C, edge_idx, mask_ij):\n def forward(\n self,\n X: torch.Tensor,\n C: torch.LongTensor,\n node_h: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: torch.Tensor,\n mask_ij: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n def __init__(self, dist_eps: float = 1e-3):\n def _D(self, X):\n def forward(\n self, X_mobile: torch.Tensor, X_target: torch.Tensor, C: torch.LongTensor\n ) -> torch.Tensor:\ndef center_X(X: torch.Tensor, C: torch.LongTensor) -> torch.Tensor:\ndef atomic_mean(\n X_flat: torch.Tensor, mask: torch.Tensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef scale_around_mean(\n X: torch.Tensor, C: torch.LongTensor, scale: float\n) -> torch.Tensor:\ndef impute_masked_X(X: torch.Tensor, C: torch.LongTensor) -> torch.Tensor:\ndef expand_chain_map(C: torch.LongTensor) -> torch.Tensor:\n X = self.backbone_geometry(self.phi, self.psi)\n X = self.X\n X = self.transform(X)\n R = torch.stack([\n a2 + b2 - c2 - d2, 2*b*c - 2*a*d, 2*b*d + 2*a*c,\n 2*b*c + 2*a*d, a2 - b2 + c2 - d2, 2*c*d - 2*a*b,\n 2*b*d - 2*a*c, 2*c*d + 2*a*b, a2 - b2 - c2 + d2\n ], dim=-1)\n R = R.view([num_batch, 3, 3])\n R = self._rotation_matrix(q)\n R = torch.stack([u_minus_1, n_b, n_a], 2)\n X = []\n X = torch.stack(X, 1)\n X = X.view([N_batch, -1, 3, 3])\n X = X[:, :-1, :, :]\n X_O = X[:, :, 2, :] + u\n X = torch.cat([X, X_O.unsqueeze(2)], 2)\n X = X - X.mean([1, 2, 3], keepdim=True)\n R = torch.eye(3).reshape([1, 1, 1, 3, 3])\n X_N, X_CA, X_C = X_chain.unbind(-2)\n X_O = geometry.extend_atoms(\n X_N_next,\n X_CA,\n X_C,\n self._length_C_O * ones,\n self._angle_CA_C_O * ones,\n self._dihedral_Np_CA_C_O * ones,\n degrees=True,\n )\n X = mask * torch.stack([X_N, X_CA, X_C, X_O], dim=-2)\n R = geometry.rotations_from_quaternions(\n q, normalize=True, eps=self.distance_eps\n )\n R = R.unsqueeze(-3)\n X = self._build_O(X_chain, C)\n R = mask.unsqueeze(-1) * R\n R = (\n torch.eye(3, device=X.device, dtype=X.dtype)\n .reshape(1, 1, 3, 3)\n .repeat(X.size(0), X.size(1), 1, 1)\n )\n R = geometry.rotations_from_quaternions(\n self.W_q(node_h), normalize=True, eps=self.distance_eps\n )\n D = (\n (X_mean[:, :, None, :] - X_mean[:, None, :, :])\n .square()\n .sum(-1)\n .add(self.dist_eps)\n .sqrt()\n )" }, { "identifier": "diffusion", "path": "chroma/layers/structure/diffusion.py", "snippet": "class GaussianNoiseSchedule:\nclass NoiseTimeEmbedding(nn.Module):\nclass DiffusionChainCov(nn.Module):\nclass ReconstructionLosses(nn.Module):\n def __init__(\n self, log_snr_range: Tuple[float, float] = (-7.0, 13.5), kind: str = \"log_snr\",\n ) -> None:\n def t_map(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def derivative(self, t: torch.Tensor, func: Callable) -> torch.Tensor:\n def tensor_check(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def alpha_func(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def sigma_func(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def alpha(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def sigma(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def alpha_deriv(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def sigma_deriv(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def beta(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def g(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def SNR(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def log_SNR(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def compute_t_range(self, log_snr: Union[float, torch.Tensor]) -> torch.Tensor:\n def SNR_derivative(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def SSNR(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def SSNR_inv(self, ssnr: torch.Tensor) -> torch.Tensor:\n def SSNR_inv_deriv(self, ssnr: Union[float, torch.Tensor]) -> torch.Tensor:\n def prob_SSNR(self, ssnr: Union[float, torch.Tensor]) -> torch.Tensor:\n def linear_logsnr_grid(self, N: int, tspan: Tuple[float, float]) -> torch.Tensor:\n def __init__(\n self,\n dim_embedding: int,\n noise_schedule: GaussianNoiseSchedule,\n rff_scale: float = 0.8,\n feature_type: str = \"log_snr\",\n ) -> None:\n def forward(\n self, t: torch.Tensor, log_alpha: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n def __init__(\n self,\n log_snr_range: Tuple[float, float] = (-7.0, 13.5),\n noise_schedule: str = \"log_snr\",\n sigma_translation: float = 1.0,\n covariance_model: str = \"brownian\",\n complex_scaling: bool = False,\n **kwargs,\n ) -> None:\n def sample_t(\n self,\n C: torch.LongTensor,\n t: Optional[torch.Tensor] = None,\n inverse_CDF: Optional[Callable] = None,\n ) -> torch.Tensor:\n def sde_forward(self, X, C, t, Z=None):\n def _schedule_coefficients(\n self,\n t: torch.Tensor,\n inverse_temperature: float = 1.0,\n langevin_isothermal: bool = True,\n ) -> Tuple[\n def langevin(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.LongTensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n Z: Union[torch.Tensor, None] = None,\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n align_X0: bool = True,\n ):\n def reverse_sde(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.LongTensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n Z: Union[torch.Tensor, None] = None,\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n align_X0: bool = True,\n ):\n def ode(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.LongTensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n Z: Union[torch.Tensor, None] = None,\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n align_X0: bool = True,\n detach_X0: bool = True,\n ):\n def energy(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.Tensor,\n t: torch.Tensor,\n detach_X0: bool = True,\n align_X0: bool = True,\n ) -> torch.Tensor:\n def score(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.Tensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n detach_X0: bool = True,\n align_X0: bool = True,\n U_traj: List = [],\n ) -> torch.Tensor:\n def elbo(self, X0_pred, X0, C, t):\n def pseudoelbo(self, loss_per_residue, C, t):\n def _baoab_sample_step(\n self,\n _x,\n p,\n C,\n t,\n dt,\n score_func,\n gamma=2.0,\n kT=1.0,\n n_equil=1,\n ode_boost=True,\n langevin_isothermal=False,\n ):\n def baoab_step(_x, p, t):\n def ode_step(t, _x):\n def sample_sde(\n self,\n X0_func: Callable,\n C: torch.LongTensor,\n X_init: Optional[torch.Tensor] = None,\n conditioner: Optional[Callable] = None,\n N: int = 100,\n tspan: Tuple[float, float] = (1.0, 0.001),\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n sde_func: str = \"reverse_sde\",\n integrate_func: str = \"euler_maruyama\",\n initialize_noise: bool = True,\n remap_time: bool = False,\n remove_drift_translate: bool = False,\n remove_noise_translate: bool = False,\n align_X0: bool = True,\n ) -> Dict[str, torch.Tensor]:\n def _X0_func(_X, _C, t):\n def sdefun(_t, _X):\n def estimate_pseudoelbo_X(\n self,\n X0_func,\n X,\n C,\n num_samples=50,\n deterministic_seed=0,\n return_elbo_t=False,\n noise=True,\n ):\n def _score_direct(\n self, Xt, X0_func, C, t, align_X0=True,\n ):\n def estimate_logp(\n self,\n X0_func: Callable,\n X_sample: torch.Tensor,\n C: torch.LongTensor,\n N: int,\n return_trace_t: bool = False,\n ):\n def divergence(fn, x, t):\n def flow_gradient(\n X, X0_func, C, t,\n ):\n def odefun(_t, _X):\n def estimate_elbo(\n self,\n X0_func: Callable,\n X: torch.Tensor,\n C: torch.LongTensor,\n num_samples: int = 50,\n deterministic_seed: int = 0,\n return_elbo_t: bool = False,\n grad_logprob_Y_func: Optional[Callable] = None,\n ) -> torch.Tensor:\n def conditional_X0(\n self, X0: torch.Tensor, score: torch.Tensor, C: torch.tensor, t: torch.Tensor\n ) -> torch.Tensor:\n def _mean(self, X, C, alpha):\n def _X_to_Z(self, X_sample, X, C, alpha, sigma):\n def _Z_to_X(self, Z, X, C, alpha, sigma):\n def sample_conditional(\n self, X: torch.Tensor, C: torch.LongTensor, t: torch.Tensor, s: torch.Tensor\n ) -> torch.Tensor:\n def forward(\n self, X: torch.Tensor, C: torch.LongTensor, t: Optional[torch.Tensor] = None\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n def __init__(\n self,\n diffusion: DiffusionChainCov,\n loss_scale: float = 10.0,\n rmsd_method: str = \"symeig\",\n ):\n def _batch_average(self, loss, C):\n def _loss_elbo(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_rmsd(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_pseudoelbo(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_fragment(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_pair(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_neighborhood(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_distance(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_hbonds(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def estimate_metrics(\n self,\n X0_func: Callable,\n X: torch.Tensor,\n C: torch.LongTensor,\n num_samples: int = 50,\n deterministic_seed: int = 0,\n use_noise: bool = True,\n return_samples: bool = False,\n tspan: Tuple[float] = (1e-4, 1.0),\n ):\n def forward(\n self,\n X0_pred: torch.Tensor,\n X: torch.Tensor,\n C: torch.LongTensor,\n t: torch.Tensor,\n ):\ndef _debug_viz_gradients(\n pml_file, X_list, dX_list, C, S, arrow_length=2.0, name=\"gradient\", color=\"red\"\n):\ndef _debug_viz_XZC(X, Z, C, rgb=True):\n SNR = self.log_SNR(t).exp()\n SNR = self.alpha(t).pow(2) / (self.sigma(t).pow(2))\n Z = torch.randn_like(X)\n Z = Z.reshape(X.shape[0], -1, 3)\n R_Z = self.base_gaussian._multiply_R(Z, C).reshape(X.shape)\n X = backbone.center_X(X, C)\n Z = torch.randn_like(X) if Z is None else Z\n Z = torch.randn_like(X) if Z is None else Z\n X = backbone.center_X(X, C)\n X = backbone.impute_masked_X(X, C)\n X0 = X0_func(X, C, t=t)\n X0 = X0_func(X, C, t=t)\n X0, _ = self.loss_rmsd.align(X0, X, C, align_unmasked=True)\n X0 = X0.detach()\n Z = self._X_to_Z(X, X0, C, alpha, sigma)\n X = backbone.impute_masked_X(X, C)\n X = X.detach().clone()\n X0 = backbone.impute_masked_X(X0, C)\n Z = torch.randn_like(_x)\n _X0 = X0_func(_X, _C, t)\n T = np.linspace(1e-4, 1.0, num_samples)\n X0 = X0_func(Xt, C, t)\n X0, _ = self.loss_rmsd.align(X0, Xt, C, align_unmasked=True)\n C = C.abs()\n X = backbone.impute_masked_X(X, C)\n T = np.linspace(1e-4, 1.0, num_samples)\n X = backbone.impute_masked_X(X, C)\n Z = self.base_gaussian._multiply_R_inverse(X_noise, C)\n X = backbone.impute_masked_X(X, C)\n X = backbone.center_X(X, C)\n X = backbone.impute_masked_X(X, C)\n T = np.linspace(1e-4, 1.0, num_samples)\n X = X.reshape(X.shape[0], -1, 3)\n Z = Z.reshape(Z.shape[0], -1, 3)\n C = C_expand.reshape(C.shape[0], -1)\n N = X.shape[1]" }, { "identifier": "transforms", "path": "chroma/layers/structure/transforms.py", "snippet": "def compose_transforms(\n R_a: torch.Tensor, t_a: torch.Tensor, R_b: torch.Tensor, t_b: torch.Tensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef compose_translation(\n R_a: torch.Tensor, t_a: torch.Tensor, t_b: torch.Tensor\n) -> torch.Tensor:\ndef compose_inner_transforms(\n R_a: torch.Tensor, t_a: torch.Tensor, R_b: torch.Tensor, t_b: torch.Tensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef fuse_gaussians_isometric_plus_radial(\n x: torch.Tensor,\n p_iso: torch.Tensor,\n p_rad: torch.Tensor,\n direction: torch.Tensor,\n dim: int,\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef collect_neighbor_transforms(\n R_i: torch.Tensor, t_i: torch.Tensor, edge_idx: torch.LongTensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef collect_neighbor_inner_transforms(\n R_i: torch.Tensor, t_i: torch.Tensor, edge_idx: torch.LongTensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef equilibrate_transforms(\n R_i: torch.Tensor,\n t_i: torch.Tensor,\n R_ji: torch.Tensor,\n t_ji: torch.Tensor,\n logit_ij: torch.Tensor,\n mask_ij: torch.Tensor,\n edge_idx: torch.LongTensor,\n iterations: int = 1,\n R_global: Optional[torch.Tensor] = None,\n t_global: Optional[torch.Tensor] = None,\n R_global_i: Optional[torch.Tensor] = None,\n t_global_i: Optional[torch.Tensor] = None,\n logit_global_i: Optional[torch.Tensor] = None,\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef average_transforms(\n R: torch.Tensor,\n t: torch.Tensor,\n w: torch.Tensor,\n mask: torch.Tensor,\n dim: int,\n t_edge: Optional[torch.Tensor] = None,\n dither: Optional[bool] = True,\n dither_eps: float = 1e-4,\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef _debug_plot_transforms(\n R_ij: torch.Tensor,\n t_ij: torch.Tensor,\n logits_ij: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_ij: torch.Tensor,\n dist_eps: float = 1e-3,\n):\n def _format(T):\n P = P_iso + P_rad\n D = torch.sqrt(t_ij.square().sum(-1))\n U = t_ij / (D[..., None] + dist_eps)\n T = T.cpu().data.numpy()\n T = (T + 1) / 2" }, { "identifier": "BackboneEncoderGNN", "path": "chroma/models/graph_design.py", "snippet": "class BackboneEncoderGNN(nn.Module):\n \"\"\"Graph Neural Network for processing protein structure into graph embeddings.\n\n Args:\n See documention of `structure.protein_graph.ProteinFeatureGraph`,\n and `graph.GraphNN` for more details.\n\n dim_nodes (int): Hidden dimension of node tensors.\n dim_edges (int): Hidden dimension of edge tensors.\n num_neighbors (int): Number of neighbors per nodes.\n node_features (tuple): List of node feature specifications. Features\n can be given as strings or as dictionaries.\n edge_features (tuple): List of edge feature specifications. Features\n can be given as strings or as dictionaries.\n num_layers (int): Number of layers.\n node_mlp_layers (int): Number of hidden layers for node update\n function.\n node_mlp_dim (int, optional): Dimension of hidden layers for node update\n function, defaults to match output dimension.\n edge_update (bool): Whether to include an edge update step.\n edge_mlp_layers (int): Number of hidden layers for edge update\n function.\n edge_mlp_dim (int, optional): Dimension of hidden layers for edge update\n function, defaults to match output dimension.\n skip_connect_input (bool): Whether to include skip connections between\n layers.\n mlp_activation (str): MLP nonlinearity function, `relu` or `softplus`\n accepted.\n dropout (float): Dropout fraction.\n graph_distance_atom_type (int): Atom type for computing residue-residue\n distances for graph construction. Negative values will specify\n centroid across atom types. Default is `-1` (centroid).\n graph_cutoff (float, optional): Cutoff distance for graph construction:\n mask any edges further than this cutoff. Default is `None`.\n graph_mask_interfaces (bool): Restrict connections only to within\n chains, excluding-between chain interactions. Default is `False`.\n graph_criterion (str): Method used for building graph from distances.\n Currently supported methods are `{knn, random_log, random_linear}`.\n Default is `knn`.\n graph_random_min_local (int): Minimum number of neighbors in GNN that\n come from local neighborhood, before random neighbors are chosen.\n checkpoint_gradients (bool): Switch to implement gradient checkpointing\n during training.\n\n Inputs:\n X (torch.Tensor): Backbone coordinates with shape\n `(num_batch, num_residues, num_atoms, 3)`.\n C (torch.LongTensor): Chain map with shape `(num_batch, num_residues)`.\n node_h_aux (torch.LongTensor, optional): Auxiliary node features with\n shape `(num_batch, num_residues, dim_nodes)`.\n edge_h_aux (torch.LongTensor, optional): Auxiliary edge features with\n shape `(num_batch, num_residues, num_neighbors, dim_edges)`.\n edge_idx (torch.LongTensor, optional): Input edge indices for neighbors\n with shape `(num_batch, num_residues, num_neighbors)`.\n mask_ij (torch.Tensor, optional): Input edge mask with shape\n `(num_batch, num_nodes, num_neighbors)`.\n\n Outputs:\n node_h (torch.Tensor): Node features with shape\n `(num_batch, num_residues, dim_nodes)`.\n edge_h (torch.Tensor): Edge features with shape\n `(num_batch, num_residues, num_neighbors, dim_edges)`.\n edge_idx (torch.LongTensor): Edge indices for neighbors with shape\n `(num_batch, num_residues, num_neighbors)`.\n mask_i (torch.Tensor): Node mask with shape `(num_batch, num_residues)`.\n mask_ij (torch.Tensor): Edge mask with shape\n `(num_batch, num_nodes, num_neighbors)`.\n \"\"\"\n\n def __init__(\n self,\n dim_nodes: int = 128,\n dim_edges: int = 128,\n num_neighbors: int = 30,\n node_features: tuple = ((\"internal_coords\", {\"log_lengths\": True}),),\n edge_features: tuple = (\n \"distances_2mer\",\n \"orientations_2mer\",\n \"distances_chain\",\n ),\n num_layers: int = 3,\n node_mlp_layers: int = 1,\n node_mlp_dim: Optional[int] = None,\n edge_update: bool = True,\n edge_mlp_layers: int = 1,\n edge_mlp_dim: Optional[int] = None,\n skip_connect_input: bool = False,\n mlp_activation: str = \"softplus\",\n dropout: float = 0.1,\n graph_distance_atom_type: int = -1,\n graph_cutoff: Optional[float] = None,\n graph_mask_interfaces: bool = False,\n graph_criterion: str = \"knn\",\n graph_random_min_local: int = 20,\n checkpoint_gradients: bool = False,\n **kwargs\n ) -> None:\n \"\"\"Initialize BackboneEncoderGNN.\"\"\"\n super(BackboneEncoderGNN, self).__init__()\n\n # Save configuration in kwargs\n self.kwargs = locals()\n self.kwargs.pop(\"self\")\n for key in list(self.kwargs.keys()):\n if key.startswith(\"__\") and key.endswith(\"__\"):\n self.kwargs.pop(key)\n args = SimpleNamespace(**self.kwargs)\n\n # Important global options\n self.dim_nodes = dim_nodes\n self.dim_edges = dim_edges\n self.checkpoint_gradients = checkpoint_gradients\n\n graph_kwargs = {\n \"distance_atom_type\": args.graph_distance_atom_type,\n \"cutoff\": args.graph_cutoff,\n \"mask_interfaces\": args.graph_mask_interfaces,\n \"criterion\": args.graph_criterion,\n \"random_min_local\": args.graph_random_min_local,\n }\n\n self.feature_graph = protein_graph.ProteinFeatureGraph(\n dim_nodes=args.dim_nodes,\n dim_edges=args.dim_edges,\n num_neighbors=args.num_neighbors,\n graph_kwargs=graph_kwargs,\n node_features=args.node_features,\n edge_features=args.edge_features,\n )\n\n self.gnn = graph.GraphNN(\n dim_nodes=args.dim_nodes,\n dim_edges=args.dim_edges,\n num_layers=args.num_layers,\n node_mlp_layers=args.node_mlp_layers,\n node_mlp_dim=args.node_mlp_dim,\n edge_update=args.edge_update,\n edge_mlp_layers=args.edge_mlp_layers,\n edge_mlp_dim=args.edge_mlp_dim,\n mlp_activation=args.mlp_activation,\n dropout=args.dropout,\n norm=\"transformer\",\n scale=args.num_neighbors,\n skip_connect_input=args.skip_connect_input,\n checkpoint_gradients=checkpoint_gradients,\n )\n\n @validate_XC(all_atom=False)\n def forward(\n self,\n X: torch.Tensor,\n C: torch.LongTensor,\n node_h_aux: Optional[torch.Tensor] = None,\n edge_h_aux: Optional[torch.Tensor] = None,\n edge_idx: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ) -> Tuple[\n torch.Tensor, torch.Tensor, torch.LongTensor, torch.Tensor, torch.Tensor\n ]:\n \"\"\"Encode XC backbone structure into node and edge features.\"\"\"\n num_batch, num_residues = C.shape\n\n # Hack to enable checkpointing\n if self.checkpoint_gradients and (not X.requires_grad):\n X.requires_grad = True\n\n node_h, edge_h, edge_idx, mask_i, mask_ij = self._checkpoint(\n self.feature_graph, X, C, edge_idx, mask_ij\n )\n\n if node_h_aux is not None:\n node_h = node_h + mask_i.unsqueeze(-1) * node_h_aux\n if edge_h_aux is not None:\n edge_h = edge_h + mask_ij.unsqueeze(-1) * edge_h_aux\n\n node_h, edge_h = self.gnn(node_h, edge_h, edge_idx, mask_i, mask_ij)\n return node_h, edge_h, edge_idx, mask_i, mask_ij\n\n def _checkpoint(self, module: nn.Module, *args) -> nn.Module:\n if self.checkpoint_gradients:\n return checkpoint(module, *args)\n else:\n return module(*args)" }, { "identifier": "load_model", "path": "chroma/utility/model.py", "snippet": "def load_model(\n weights,\n model_class,\n device=\"cpu\",\n strict=False,\n strict_unexpected=True,\n verbose=True,\n):\n \"\"\"Load model saved with save_model.\n\n Args:\n weights (str): The destination path of the model weights to load.\n Compatible with files saved by `save_model`.\n model_class: Name of model class.\n device (str, optional): Pytorch device specification, e.g. `'cuda'` for\n GPU. Default is `'cpu'`.\n strict (bool): Whether to require that the keys match between the\n input file weights and the model created from the parameters stored\n in the model kwargs.\n strict_unexpected (bool): Whether to require that there are no\n unexpected keys when loading model weights, as distinct from the\n strict option which doesn't allow for missing keys either. By\n default, we use this option rather than strict for ease of\n development when adding model features.\n verbose (bool, optional): Show outputs from download and loading. Default True.\n\n Returns:\n model (nn.Module): Torch model with loaded weights.\n \"\"\"\n\n # Process weights path\n if str(weights).startswith(\"named:\"):\n weights = weights.split(\"named:\")[1]\n if weights not in NAMED_MODELS[model_class.__name__]:\n raise Exception(f\"Unknown {model_class.__name__} model name: {weights},\")\n weights = NAMED_MODELS[model_class.__name__][weights][\"s3_uri\"]\n\n # resolve s3 paths\n if str(weights).startswith(\"s3:\"):\n raise NotImplementedError(\"Loading Models from an S3 link not supported.\")\n\n # download public models from generate\n if str(weights).startswith(\"https:\"):\n # Decompose into arguments\n parsed_url = urlparse(weights)\n base_url = f\"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}\"\n model_name = parse_qs(parsed_url.query).get(\"weights\", [None])[0]\n weights = api.download_from_generate(\n base_url, model_name, force=False, exist_ok=True\n )\n\n # load model weights\n params = torch.load(weights, map_location=\"cpu\")\n model = model_class(**params[\"init_kwargs\"]).to(device)\n missing_keys, unexpected_keys = model.load_state_dict(\n params[\"model_state_dict\"], strict=strict\n )\n if strict_unexpected and len(unexpected_keys) > 0:\n raise Exception(\n f\"Error loading model from checkpoint file: {weights} contains {len(unexpected_keys)} unexpected keys: {unexpected_keys}\"\n )\n return model" } ]
from types import SimpleNamespace from typing import Optional, Tuple, Union from chroma.data.xcs import validate_XC from chroma.layers import basic, graph from chroma.layers.structure import backbone, diffusion, transforms from chroma.models.graph_design import BackboneEncoderGNN from chroma.utility.model import load_model as utility_load_model from matplotlib import pyplot as plt import torch import torch.nn as nn
11,541
`layers.structure.diffusion.DiffusionChainCov` for more details on hyperparameters. Inputs: X (Tensor): Backbone coordinates with shape `(num_batch, num_residues, num_atoms, 3)`. C (LongTensor): Chain map with shape `(num_batch, num_residues)`. Outputs: neglogp (Tensor): Sum of `neglogp_S` and `neglogp_chi`. """ def __init__( self, dim_nodes: int = 128, dim_edges: int = 128, num_neighbors: int = 30, node_features: Tuple = (("internal_coords", {"log_lengths": True}),), edge_features: Tuple = ( "distances_2mer", "orientations_2mer", "distances_chain", ), num_layers: int = 3, dropout: float = 0.1, node_mlp_layers: int = 1, node_mlp_dim: Optional[int] = None, edge_update: bool = True, edge_mlp_layers: int = 1, edge_mlp_dim: Optional[int] = None, skip_connect_input: bool = False, mlp_activation: str = "softplus", decoder_num_hidden: int = 512, graph_criterion: str = "knn", graph_random_min_local: int = 20, backbone_update_method: str = "neighbor", backbone_update_iterations: int = 1, backbone_update_num_weights: int = 1, backbone_update_unconstrained: bool = True, use_time_features: bool = True, time_feature_type: str = "t", time_log_feature_scaling: float = 0.05, noise_schedule: str = "log_snr", noise_covariance_model: str = "brownian", noise_beta_min: float = 0.2, noise_beta_max: float = 70.0, noise_log_snr_range: Tuple[float] = (-7.0, 13.5), noise_complex_scaling: bool = False, loss_scale: float = 10.0, loss_scale_ssnr_cutoff: float = 0.99, loss_function: str = "squared_fape", checkpoint_gradients: bool = False, prediction_type: str = "X0", num_graph_cycles: int = 1, **kwargs, ): """Initialize GraphBackbone network.""" super(GraphBackbone, self).__init__() # Save configuration in kwargs self.kwargs = locals() self.kwargs.pop("self") for key in list(self.kwargs.keys()): if key.startswith("__") and key.endswith("__"): self.kwargs.pop(key) args = SimpleNamespace(**self.kwargs) # Important global options self.dim_nodes = args.dim_nodes self.dim_edges = args.dim_edges # Encoder GNN process backbone self.num_graph_cycles = args.num_graph_cycles self.encoders = nn.ModuleList( [ BackboneEncoderGNN( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, num_neighbors=args.num_neighbors, node_features=args.node_features, edge_features=args.edge_features, num_layers=args.num_layers, node_mlp_layers=args.node_mlp_layers, node_mlp_dim=args.node_mlp_dim, edge_update=args.edge_update, edge_mlp_layers=args.edge_mlp_layers, edge_mlp_dim=args.edge_mlp_dim, mlp_activation=args.mlp_activation, dropout=args.dropout, skip_connect_input=args.skip_connect_input, graph_criterion=args.graph_criterion, graph_random_min_local=args.graph_random_min_local, checkpoint_gradients=checkpoint_gradients, ) for i in range(self.num_graph_cycles) ] ) self.backbone_updates = nn.ModuleList( [ backbone.GraphBackboneUpdate( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, method=args.backbone_update_method, iterations=args.backbone_update_iterations, num_transform_weights=args.backbone_update_num_weights, unconstrained=args.backbone_update_unconstrained, ) for i in range(self.num_graph_cycles) ] ) self.use_time_features = args.use_time_features self.time_feature_type = args.time_feature_type self.time_log_feature_scaling = time_log_feature_scaling if self.use_time_features: self.time_features = basic.FourierFeaturization( d_input=1, d_model=dim_nodes, trainable=False, scale=16.0 )
# Copyright Generate Biomedicines, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Models for generating protein backbone structure via diffusion. """ class GraphBackbone(nn.Module): """Graph-based backbone generation for protein complexes. GraphBackbone parameterizes a generative model of the backbone coordinates of protein complexes. Args: See documention of `layers.structure.protein_graph.ProteinFeatureGraph`, `graph.GraphNN`, `layers.structure.backbone.GraphBackboneUpdate` and `layers.structure.diffusion.DiffusionChainCov` for more details on hyperparameters. Inputs: X (Tensor): Backbone coordinates with shape `(num_batch, num_residues, num_atoms, 3)`. C (LongTensor): Chain map with shape `(num_batch, num_residues)`. Outputs: neglogp (Tensor): Sum of `neglogp_S` and `neglogp_chi`. """ def __init__( self, dim_nodes: int = 128, dim_edges: int = 128, num_neighbors: int = 30, node_features: Tuple = (("internal_coords", {"log_lengths": True}),), edge_features: Tuple = ( "distances_2mer", "orientations_2mer", "distances_chain", ), num_layers: int = 3, dropout: float = 0.1, node_mlp_layers: int = 1, node_mlp_dim: Optional[int] = None, edge_update: bool = True, edge_mlp_layers: int = 1, edge_mlp_dim: Optional[int] = None, skip_connect_input: bool = False, mlp_activation: str = "softplus", decoder_num_hidden: int = 512, graph_criterion: str = "knn", graph_random_min_local: int = 20, backbone_update_method: str = "neighbor", backbone_update_iterations: int = 1, backbone_update_num_weights: int = 1, backbone_update_unconstrained: bool = True, use_time_features: bool = True, time_feature_type: str = "t", time_log_feature_scaling: float = 0.05, noise_schedule: str = "log_snr", noise_covariance_model: str = "brownian", noise_beta_min: float = 0.2, noise_beta_max: float = 70.0, noise_log_snr_range: Tuple[float] = (-7.0, 13.5), noise_complex_scaling: bool = False, loss_scale: float = 10.0, loss_scale_ssnr_cutoff: float = 0.99, loss_function: str = "squared_fape", checkpoint_gradients: bool = False, prediction_type: str = "X0", num_graph_cycles: int = 1, **kwargs, ): """Initialize GraphBackbone network.""" super(GraphBackbone, self).__init__() # Save configuration in kwargs self.kwargs = locals() self.kwargs.pop("self") for key in list(self.kwargs.keys()): if key.startswith("__") and key.endswith("__"): self.kwargs.pop(key) args = SimpleNamespace(**self.kwargs) # Important global options self.dim_nodes = args.dim_nodes self.dim_edges = args.dim_edges # Encoder GNN process backbone self.num_graph_cycles = args.num_graph_cycles self.encoders = nn.ModuleList( [ BackboneEncoderGNN( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, num_neighbors=args.num_neighbors, node_features=args.node_features, edge_features=args.edge_features, num_layers=args.num_layers, node_mlp_layers=args.node_mlp_layers, node_mlp_dim=args.node_mlp_dim, edge_update=args.edge_update, edge_mlp_layers=args.edge_mlp_layers, edge_mlp_dim=args.edge_mlp_dim, mlp_activation=args.mlp_activation, dropout=args.dropout, skip_connect_input=args.skip_connect_input, graph_criterion=args.graph_criterion, graph_random_min_local=args.graph_random_min_local, checkpoint_gradients=checkpoint_gradients, ) for i in range(self.num_graph_cycles) ] ) self.backbone_updates = nn.ModuleList( [ backbone.GraphBackboneUpdate( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, method=args.backbone_update_method, iterations=args.backbone_update_iterations, num_transform_weights=args.backbone_update_num_weights, unconstrained=args.backbone_update_unconstrained, ) for i in range(self.num_graph_cycles) ] ) self.use_time_features = args.use_time_features self.time_feature_type = args.time_feature_type self.time_log_feature_scaling = time_log_feature_scaling if self.use_time_features: self.time_features = basic.FourierFeaturization( d_input=1, d_model=dim_nodes, trainable=False, scale=16.0 )
self.noise_perturb = diffusion.DiffusionChainCov(
4
2023-11-28 00:09:40+00:00
16k
BiQiWHU/CMFormer
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop_CategoryAreaConstraint(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "mask2former/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "mask2former/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,412
warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj":
mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True)
2
2023-11-29 15:26:53+00:00
16k
PopicLab/insilicoSV
test/test_processing.py
[ { "identifier": "SV_Simulator", "path": "insilicosv/simulate.py", "snippet": "class SV_Simulator:\n def __init__(self, par_file, log_file=None):\n \"\"\"\n par_file: file location to configuration file (.yaml)\n log_file: location to store log file with diagnostic information if config parameters indicate so\n \"\"\"\n global time_start\n print(\"Setting up Simulator...\")\n\n self.formatter = FormatterIO(par_file)\n self.formatter.yaml_to_var_list()\n config = self.formatter.config\n self.ref_file = config['sim_settings']['reference']\n self.ref_fasta = FastaFile(self.ref_file)\n self.svs_config = config['variant_sets']\n\n self.sim_settings = config['sim_settings']\n if log_file and \"generate_log_file\" in self.sim_settings.keys():\n logging.basicConfig(filename=log_file, filemode=\"w\", level=logging.DEBUG,\n format='[%(name)s: %(levelname)s - %(asctime)s] %(message)s')\n self.log_to_file(\"YAML Configuration: {}\".format(config))\n\n # get all chromosome ids\n self.order_ids = self.ref_fasta.references\n self.len_dict = dict() # stores mapping with key = chromosome, value = chromosome length\n for id in self.order_ids:\n chrom_len = self.ref_fasta.get_reference_length(id)\n if 'filter_small_chr' in self.sim_settings and chrom_len < self.sim_settings['filter_small_chr']:\n print(\"Filtering chromosome {}: Length of {} below threshold of {}\".format(id, chrom_len, self.sim_settings['filter_small_chr']))\n else:\n self.len_dict[id] = chrom_len\n print(\"Length of chromosome {}: {}\".format(id, self.len_dict[id]))\n\n # initialize stats file to be generated after all edits and exporting are finished\n self.stats = StatsCollection(self.order_ids, self.len_dict)\n\n self.mode = \"randomized\"\n self.vcf_path = None\n if \"vcf_path\" in self.svs_config[0]:\n self.mode = \"fixed\"\n self.vcf_path = self.svs_config[0][\"vcf_path\"]\n\n self.svs = []\n self.event_ranges = defaultdict(list)\n\n if \"avoid_intervals\" in config:\n # extract {chrom: [(start, end)]} intervals from vcf, add intervals from vcf to event range\n self.extract_vcf_event_intervals(config[\"avoid_intervals\"])\n\n self.overlap_events = None if \"overlap_events\" not in config.keys() \\\n else utils.OverlapEvents(config, allow_chroms=self.order_ids)\n\n self.initialize_svs()\n\n print(\"Finished Setting up Simulator in {} seconds\\n\".format(time.time() - time_start))\n time_start = time.time()\n\n def __repr__(self):\n return \"All structural variants entered into simulator: {}\".format(self.svs)\n\n def log_to_file(self, info, key=\"DEBUG\"):\n # only logs to file if config setting indicates so\n key_to_func = {\"DEBUG\": logging.debug, \"WARNING\": logging.warning}\n if \"generate_log_file\" in self.sim_settings and self.sim_settings[\"generate_log_file\"]:\n key_to_func[key](info)\n\n def get_rand_chr(self, check_size=None, fixed_chrom=None):\n # random assignment of SV to a chromosome (unless we have a predetermined chromosome for this event)\n valid_chrs = self.order_ids\n if check_size is not None:\n valid_chrs = [chrom for chrom, chr_size in self.len_dict.items() if chr_size >= check_size]\n if len(valid_chrs) == 0:\n raise Exception(\"SVs are too big for the reference!\")\n rand_id = valid_chrs[random.randint(0, len(valid_chrs) - 1)] if fixed_chrom is None else fixed_chrom\n chr_len = self.len_dict[rand_id]\n chr_event_ranges = self.event_ranges[rand_id]\n assert rand_id is not None\n return rand_id, chr_len, chr_event_ranges\n\n def extract_vcf_event_intervals(self, vcf_path):\n vcf = VariantFile(vcf_path)\n for rec in vcf.fetch():\n self.event_ranges[rec.chrom].append((rec.start, rec.stop))\n\n def process_vcf(self, vcf_path):\n # process vcf containing SVs to be added (deterministically) to reference\n active_svs_total = 0\n time_start_local = 0\n vcf = VariantFile(vcf_path)\n for rec in vcf.fetch():\n svtype = Variant_Type(rec.info['SVTYPE']) if 'SVTYPE' in rec.info else Variant_Type(rec.id)\n self.event_ranges[rec.chrom].append((rec.start, rec.stop))\n sv = Structural_Variant(sv_type=svtype, mode='fixed', vcf_rec=rec, ref_fasta=self.ref_fasta)\n self.svs.append(sv)\n active_svs_total += 1\n self.log_to_file(\"Intervals {} added to Chromosome \\\"{}\\\"\".format(self.event_ranges[rec.chrom], rec.chrom))\n time_dif = time.time() - time_start_local\n print(\"{} SVs successfully placed ========== {} seconds\".format(active_svs_total, time_dif), end=\"\\r\")\n time_start_local = time.time()\n\n def initialize_svs(self):\n \"\"\"\n Creates Structural_Variant objects for every SV to simulate and decides zygosity\n self.mode: flag indicating whether SVs are to be randomly generated or read in from VCF\n self.vcf_path: optional path that will be used if mode==\"fixed\"\n \"\"\"\n if self.mode == \"randomized\":\n for sv_config in self.svs_config:\n for num in range(sv_config[\"number\"]):\n # logic for placing events at intervals given in overlap bed file:\n # for the first (sv_config[\"num_overlap\"]) events, instantiate the SV at the next valid repeat elt interval\n repeat_elt = None\n elt_type = None\n if self.overlap_events is not None:\n sv_config_identifier = utils.get_sv_config_identifier(sv_config)\n if sv_config_identifier in self.overlap_events.svtype_overlap_counts.keys():\n repeat_elt, retrieved_type, elt_type = self.overlap_events.get_single_element_interval(\n sv_config_identifier, sv_config, partial_overlap=False)\n elif sv_config_identifier in self.overlap_events.svtype_partial_overlap_counts.keys():\n repeat_elt, retrieved_type, elt_type = self.overlap_events.get_single_element_interval(\n sv_config_identifier, sv_config, partial_overlap=True)\n elif sv_config_identifier in self.overlap_events.svtype_alu_mediated_counts.keys():\n repeat_elt, retrieved_type = self.overlap_events.get_alu_mediated_interval(sv_config_identifier)\n if sv_config['type'] == Variant_Type.SNP:\n sv = Structural_Variant(sv_type=sv_config[\"type\"], mode=self.mode, length_ranges=[(1, 1)])\n else:\n sv = Structural_Variant(sv_type=sv_config[\"type\"], mode=self.mode,\n length_ranges=sv_config[\"length_ranges\"], source=sv_config[\"source\"],\n target=sv_config[\"target\"],\n overlap_event=(repeat_elt + (retrieved_type if elt_type in ['ALL', None] else elt_type,) if repeat_elt is not None else None),\n div_prob=(None if 'divergence_prob' not in sv_config.keys() else sv_config['divergence_prob']))\n\n # For divergent repeat simulation, need div_dDUP to be homozygous\n if self.sim_settings.get(\"homozygous_only\", False) or random.randint(0, 1):\n sv.ishomozygous = Zygosity.HOMOZYGOUS\n sv.hap = [True, True]\n else:\n sv.ishomozygous = Zygosity.HETEROZYGOUS\n sv.hap = random.choice([[True, False], [False, True]])\n\n self.svs.append(sv)\n if not self.sim_settings[\"prioritize_top\"]:\n random.shuffle(self.svs)\n else: # mode == \"fixed\"\n self.process_vcf(self.vcf_path)\n\n def produce_variant_genome(self, fasta1_out, fasta2_out, ins_fasta, bedfile, stats_file=None, initial_reset=True,\n verbose=False, export_to_file=True):\n \"\"\"\n initial_reset: boolean to indicate if output file should be overwritten (True) or appended to (False)\n stats_file: whether a stats file summarizing SVs simulated will be generated in same directory the reference genome is located in\n \"\"\"\n global time_start\n if initial_reset:\n utils.reset_file(fasta1_out)\n utils.reset_file(fasta2_out)\n ref_fasta = self.ref_fasta\n self.apply_transformations(ref_fasta)\n print(\"Finished SV placements and transformations in {} seconds\".format(time.time() - time_start))\n time_start = time.time()\n active_svs = [sv for sv in self.svs if sv.active]\n print(\"Starting Export Process...\")\n for x in range(2):\n edits_dict = dict()\n for id in self.order_ids:\n edits_dict[id] = []\n if x == 0:\n fasta_out = fasta1_out\n elif x == 1:\n fasta_out = fasta2_out\n for sv in active_svs:\n if sv.hap[x]:\n for frag in sv.changed_fragments:\n edits_dict[frag[0]].append(frag[1:])\n for id in edits_dict:\n edits_dict[id].sort()\n self.event_ranges[id].sort()\n self.log_to_file(\"Event Ranges: {}\".format(self.event_ranges))\n self.log_to_file(\"Intervals for hap {}: {}\".format(x, edits_dict))\n for id in self.order_ids:\n edits_x = edits_dict[id]\n utils.fail_if_any_overlapping(edits_x)\n self.formatter.export_variants_to_fasta(id, edits_x, fasta_out, ref_fasta, verbose=verbose)\n print(\"ID {} exported to fasta file {} in {} seconds\".format(id, fasta_out, time.time() - time_start))\n time_start = time.time()\n if export_to_file:\n self.formatter.export_to_bedpe(active_svs, bedfile, ins_fasta=ins_fasta, reset_file=initial_reset)\n self.formatter.export_to_vcf(active_svs, self.stats, vcffile=bedfile[:-4]+'.vcf')\n if stats_file:\n self.stats.get_info(self.svs)\n self.stats.export_data(stats_file)\n\n def choose_rand_pos(self, svs, ref_fasta, verbose=False):\n \"\"\"\n randomly positions SVs and stores reference fragments in SV events\n\n svs: list of Structural Variant objects\n ref_fasta: FastaFile with access to reference file\n \"\"\"\n active_svs_total = 0\n inactive_svs_total = 0\n time_start_local = time.time()\n for sv in svs:\n tries = 0\n valid = False\n while not valid:\n tries += 1\n valid = True\n if tries > self.sim_settings[\"max_tries\"]:\n if self.sim_settings[\"fail_if_placement_issues\"]:\n raise Exception(\n \"Failed to simulate {}, {} / {} SVs successfully simulated (set fail_if_placement_issues \"\n \"to False to override placement failures)\".format(\n sv, active_svs_total, len(svs)))\n valid = False\n break\n rand_id, chr_len, chr_event_ranges = self.get_rand_chr(check_size=sv.req_space,\n fixed_chrom=(None if sv.overlap_event is None\n else sv.overlap_event[0]))\n if not (sv.dispersion_flip and sv.overlap_event is not None):\n # if an overlap event is given, need to find the SV start position based on which fragment has been\n # set to the overlap event interval\n if sv.overlap_event is not None:\n start_pos = 0\n for frag in sv.source_events[::-1]:\n if frag.start is not None:\n start_pos = frag.start\n else:\n start_pos -= frag.length\n else:\n start_pos = random.randint(0, chr_len - sv.req_space)\n # define the space in which SV operates\n new_intervals = [] # tracks new ranges of blocks\n sv.start, sv.start_chr = start_pos, rand_id\n sv.end = sv.start + sv.req_space\n block_start = sv.start\n else:\n # to assign event \"A\" to a repeat interval in a flipped dispersion event, need to\n # anchor the sv to the end of \"A\" and get the start position by subtracting off the total size\n end_pos = int(sv.overlap_event[2])\n start_pos = end_pos - sv.req_space\n new_intervals = []\n sv.start, sv.start_chr = start_pos, rand_id\n sv.end = end_pos\n block_start = sv.start\n\n for sv_event in sv.source_events:\n sv_event.start, sv_event.end = start_pos, start_pos + sv_event.length\n sv_event.source_chr = rand_id\n frag = ref_fasta.fetch(rand_id, sv_event.start, sv_event.end)\n sv_event.source_frag = frag\n start_pos += sv_event.length\n\n if sv_event.symbol.startswith(Symbols.DIS.value):\n if utils.is_overlapping(chr_event_ranges, (block_start, sv_event.start)):\n valid = False\n break\n new_intervals.append((block_start, sv_event.start))\n block_start = sv_event.end\n elif utils.percent_N(frag) > 0.05:\n valid = False\n break\n # catches the last (and perhaps only) block in sequence\n if utils.is_overlapping(chr_event_ranges, (block_start, sv.end)):\n valid = False\n continue\n else:\n new_intervals.append((block_start, sv.end))\n\n # adds new SV to simulate only if chosen positions were valid\n if valid:\n active_svs_total += 1\n sv.active = True\n self.log_to_file(\"Intervals {} added to Chromosome \\\"{}\\\" for SV {}\".format(new_intervals, rand_id, sv))\n chr_event_ranges.extend(new_intervals)\n # populates insertions with random sequence - these event symbols only show up in target transformation\n for event in sv.events_dict.values():\n if event.source_frag is None and event.length > 0:\n event.source_frag = utils.generate_seq(event.length)\n sv.assign_locations(sv.start)\n else:\n inactive_svs_total += 1\n if tries != self.sim_settings[\"max_tries\"] + 1:\n self.log_to_file(\"{} only got {} tries instead of the max {}\".format(sv, tries, self.sim_settings[\n \"max_tries\"] + 1), key=\"WARNING\")\n\n time_dif = time.time() - time_start_local\n print(\n \"{} / {} SVs successfully placed ========== {} / {} SVs unsuccessfully placed, {} tries, {} seconds\".format(\n active_svs_total, len(svs), inactive_svs_total, len(svs), tries, time_dif), end=\"\\r\")\n time_start_local = time.time()\n\n def apply_transformations(self, ref_fasta):\n \"\"\"\n Randomly chooses positions for all SVs and carries out all edits\n Populates event classes within SVs with reference fragments and start & end positions\n Stores list of changes, which each have an interval and a sequence to substitute the reference frag with, in SV\n\n ref_fasta: FastaFile with access to reference\n mode: flag indicating whether we're adding SVs to the reference in a randomized or deterministic way\n \"\"\"\n if self.mode == \"randomized\":\n # select random positions for SVs\n self.choose_rand_pos(self.svs, ref_fasta)\n print()\n\n total = 0\n for sv in self.svs:\n if sv.active:\n sv.change_fragment()\n total += 1\n self.log_to_file(\"Events Dict after all edits: {} \".format(sv.events_dict))\n\n def close(self):\n self.ref_fasta.close()" }, { "identifier": "FormatterIO", "path": "insilicosv/processing.py", "snippet": "class FormatterIO:\n def __init__(self, par_file):\n self.bedpe_counter = 1\n self.par_file = par_file\n self.config = None\n\n @staticmethod\n def run_checks_randomized(config):\n \"\"\"\n check method for yaml given with SVs given for randomized placement on reference\n \"\"\"\n config_svs = config['variant_sets']\n for config_sv in config_svs:\n if \"avoid_intervals\" in config_sv:\n continue\n elif \"type\" not in config_sv:\n raise Exception(\"\\\"Type\\\" attribute must be specified! For custom transformations, enter in \\\"Custom\\\"\")\n elif config_sv[\"type\"] == \"SNP\": # SNP events are only specified by count (size is deterministic)\n if \"number\" in config_sv and isinstance(config_sv[\"number\"], int) and config_sv[\"number\"] > 0:\n continue\n else:\n raise Exception(\"Number (of type int > 0) is a required parameter for all SVs\")\n if \"min_length\" not in config_sv:\n raise Exception(\"Min length must be specified on all SVs!\")\n if \"max_length\" not in config_sv:\n raise Exception(\"Max length must be specified on all SVs!\")\n if \"number\" not in config_sv:\n raise Exception(\"Number is a required parameter for all SVs\")\n\n elif \"type\" in config_sv and not isinstance(config_sv[\"type\"], str):\n raise Exception(\"Invalid {} type for SV \\'type\\' attribute, str expected\".format(type(config_sv[\"type\"])))\n valid_optional_par = [\"fail_if_placement_issues\", \"max_tries\", \"generate_log_file\", \"filter_small_chr\",\n \"prioritize_top\", \"homozygous_only\", \"reference\"] # valid arguments within sim_settings\n for parameter in config['sim_settings']:\n if parameter not in valid_optional_par:\n raise Exception(\"\\\"{}\\\" is an invalid argument under sim_settings\".format(parameter))\n valid_keys = [\"sim_settings\", \"variant_sets\", \"overlap_events\", \"avoid_intervals\"] # valid arguments at the top level\n for key in config:\n if key not in valid_keys:\n raise Exception(\"Unknown argument \\\"{}\\\"\".format(key))\n\n def postproc_config_dict(self):\n if 'sim_settings' not in self.config.keys():\n raise Exception(\"Must include \\'sim_settings\\' sections specifying at least \\'reference\\' path\")\n if \"filter_small_chr\" in self.config.keys() and not isinstance(self.config[\"filter_small_chr\"], int):\n raise Exception(\"Must provide value of type int to \\'filter_small_chr\\'\")\n if \"reference\" not in self.config[\"sim_settings\"]:\n raise Exception(\"Must include reference FASTA file in \\'reference\\' field of \\'sim_settings\\'\")\n elif self.config[\"sim_settings\"][\"reference\"].split(\".\")[-1] not in [\"fa\", \"fna\", \"fasta\"]:\n raise Exception(\"Input reference must be of type .fa, .fna, or .fasta\")\n if \"vcf_path\" not in self.config[\"variant_sets\"][0]:\n self.run_checks_randomized(self.config)\n for config_sv in self.config['variant_sets']:\n if \"vcf_path\" in config_sv:\n continue\n # SV event length specification - not applicable for SNPs\n if config_sv[\"type\"] != \"SNP\":\n if not isinstance(config_sv[\"min_length\"], list) or not isinstance(config_sv[\"max_length\"], list):\n raise Exception(\"Must provide entries of type list to \\'min_length\\' and \\'max_length\\'\")\n else:\n config_sv[\"length_ranges\"] = list(zip(config_sv[\"min_length\"], config_sv[\"max_length\"]))\n assert all(max_len >= min_len >= 0 for (min_len, max_len) in config_sv[\"length_ranges\"]), \"Max length must be >= min length for all SVs! Also ensure that all length values are >= 0.\"\n if \"divergence_prob\" in config_sv:\n if config_sv[\"type\"] != \"DIVERGENCE\":\n raise Exception(\"divergence_prob can only be given for event type DIVERGENCE\")\n else:\n assert isinstance(config_sv[\"divergence_prob\"], int) or isinstance(config_sv[\"divergence_prob\"], float), \\\n \"Must give \\'divergence_prob\\'\"\n assert 1 >= config_sv[\"divergence_prob\"] > 0, \"divergence_prob must be in (0,1]\"\n\n config_sv[\"type\"] = Variant_Type(config_sv[\"type\"])\n if config_sv[\"type\"] != Variant_Type.Custom:\n config_sv[\"source\"] = None\n config_sv[\"target\"] = None\n\n # setting default values for sim_settings fields\n if 'max_tries' not in self.config['sim_settings']:\n self.config['sim_settings']['max_tries'] = 50\n if 'fail_if_placement_issues' not in self.config['sim_settings']:\n self.config['sim_settings']['fail_if_placement_issues'] = False\n\n def yaml_to_var_list(self):\n try:\n with open(self.par_file) as yaml_file:\n self.config = yaml.full_load(yaml_file)\n except:\n raise Exception(\"YAML File {} failed to be open\".format(self.par_file))\n self.postproc_config_dict()\n\n def write_to_file(self, sv, bedfile, source_s, source_e, target_s, target_e, transform, event, sv_id):\n assert (not event.symbol.startswith(Symbols.DIS.value))\n if transform == Operations.INS.value:\n transform_length = event.length\n else:\n transform_length = source_e - source_s\n if event.length > 0:\n with open(bedfile, \"a\") as fout:\n row = [str(event.source_chr),\n str(source_s),\n str(source_e),\n str(event.source_chr),\n str(target_s),\n str(target_e),\n transform,\n str(transform_length),\n '%d/%d' % (int(sv.hap[0]), int(sv.hap[1])),\n sv.name,\n str(sv_id)]\n fout.write(\"\\t\".join(row) + \"\\n\")\n\n @staticmethod\n def symbol_is_inversion(symbol):\n return any(c.islower() for c in symbol)\n\n @staticmethod\n def export_insertions(chr, start_pos, seq, ins_fasta):\n \"\"\"\n Exports foreign insertion sequences to separate fasta file, append only\n \"\"\"\n with open(ins_fasta, \"a\") as fout_ins:\n fout_ins.write(\">{}_{}\\n\".format(chr, start_pos))\n fout_ins.write(\"{}\\n\".format(seq))\n\n @staticmethod\n def get_event_target_operation(ev, target_events_dict, source_events_dict):\n \"\"\"\n determines target interval and operation for multi-source events\n \"\"\"\n # A -> A'\n if ev + Symbols.DUP.value in target_events_dict.keys():\n trg_sym = ev + Symbols.DUP.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), \\\n Operations.DUP.value if ev in target_events_dict.keys() else Operations.TRA.value\n # A -> a'\n elif ev.lower() + Symbols.DUP.value in target_events_dict.keys():\n trg_sym = ev.lower() + Symbols.DUP.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.INVDUP.value\n # A -> a\n elif ev.lower() in target_events_dict.keys():\n trg_sym = ev.lower()\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.INV.value\n # A -> A* (in the case of a custom event in which an event is divergently duplicated)\n elif ev + Symbols.DIV.value in target_events_dict.keys():\n trg_sym = ev + Symbols.DIV.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.DIV.value\n # A -> A (insertion if source A is undefined, identity otherwise)\n elif ev in target_events_dict.keys():\n return (target_events_dict[ev].start, target_events_dict[ev].end), \\\n Operations.INS.value if source_events_dict[ev].start is None else Operations.IDENTITY.value\n # A -> [none]\n elif ev not in [sym[0] for sym in target_events_dict.keys()]:\n return (source_events_dict[ev].start, source_events_dict[ev].end), Operations.DEL.value\n # otherwise unknown mapping\n else:\n return (source_events_dict[ev].start, source_events_dict[ev].end), Operations.UNDEFINED.value\n\n @staticmethod\n def postprocess_record_params(sv, sv_record_info):\n \"\"\"\n arrange the bed_record parameter dictionaries in order of ascending source interval start position\n and assign order values to the relevant entries\n \"\"\"\n # for TRA/INS/DUP events with the same target position, 'order' describes the order in which they\n # are compiled (i.e., the order in which they appear in the target sequence)\n order = 0\n ins_pos = None\n for block in sv.target_symbol_blocks:\n for target_event in block:\n if target_event.symbol.startswith(Symbols.DIS.value) or \\\n target_event.symbol in sv_record_info.keys(): # <- prevent collision with A' and A if both in target\n continue\n src_sym = target_event.symbol[0].upper()\n if sv_record_info[src_sym]['transform'] in NONZERO_ORDER_OPERATIONS:\n if ins_pos is None:\n ins_pos = sv_record_info[src_sym]['target_s']\n order += 1\n elif sv_record_info[src_sym]['target_s'] == ins_pos:\n order += 1\n else:\n ins_pos = None\n order = 0\n # sv_record_info[src_sym]['order'] = order\n return sorted([params for params in sv_record_info.values()], key=lambda params: params['source_s'])\n\n def export_to_bedpe(self, svs, bedfile, ins_fasta=None, reset_file=True):\n if reset_file:\n utils.reset_file(bedfile)\n if ins_fasta:\n utils.reset_file(ins_fasta)\n for sv_id, sv in enumerate(svs):\n # SVs with multiple source events will be split into multiple bed records (one for each)\n if len(sv.events_dict) == 1:\n ev = list(sv.sv_blocks.target_events_dict.values())[0] if sv.type == Variant_Type.INS\\\n else list(sv.events_dict.values())[0]\n op = self.get_event_target_operation(ev.symbol, sv.sv_blocks.target_events_dict, sv.events_dict)[1]\n record_info = {'source_s': ev.start, 'source_e': ev.end, 'target_s': ev.start, 'target_e': ev.end,\n 'transform': op, 'sv': sv, 'event': ev, 'bedfile': bedfile, 'sv_id': sv_id + 1}\n self.write_to_file(**record_info)\n if op == Operations.INS.value:\n self.export_insertions(sv.start_chr, ev.start, ev.source_frag, ins_fasta)\n else:\n # multiple source events: source intervals taken from the source events\n # and target intervals taken from corresponding target events (if no match, then deletion)\n sv_record_info = {}\n for ev in sv.events_dict.values():\n if ev.symbol.startswith(Symbols.DIS.value):\n continue\n sv_record_info[ev.symbol] = {'source_s': ev.start, 'source_e': ev.end, 'sv': sv, 'event': ev, 'bedfile': bedfile, 'sv_id': sv_id + 1}\n (target_s, target_e), operation = self.get_event_target_operation(ev.symbol, sv.sv_blocks.target_events_dict, sv.events_dict)\n sv_record_info[ev.symbol]['target_s'] = target_s\n sv_record_info[ev.symbol]['target_e'] = target_e\n sv_record_info[ev.symbol]['transform'] = operation\n for param_dict in self.postprocess_record_params(sv, sv_record_info):\n self.write_to_file(**param_dict)\n\n def export_to_vcf(self, svs, stats, vcffile):\n with open(vcffile, \"w\") as vcf:\n vcf.write(\"##fileformat=VCFv4.2\\n\")\n for chrm, chrm_len in stats.chr_lengths.items():\n vcf.write(\"##contig=<ID=%s,length=%d>\\n\" % (chrm, chrm_len))\n vcf.write(\"#%s\\n\" % \"\\t\".join([\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\",\n \"SAMPLE\"]))\n # *** This will throw an error with pysam version 0.18, need 0.16.0.1\n vcf_file = pysam.VariantFile(vcffile)\n vcf_file.header.info.add('END', number=1, type='Integer', description=\"End position of the variant \"\n \"described in this record\")\n vcf_file.header.info.add('CIPOS', number=2, type='Integer', description=\"Confidence interval around POS for \"\n \"imprecise variants\")\n vcf_file.header.info.add('CIEND', number=2, type='Integer', description=\"Confidence interval around END for \"\n \"imprecise variants\")\n vcf_file.header.info.add('SVTYPE', number=1, type='String', description=\"Type of structural variant\")\n vcf_file.header.info.add('SVLEN', number=1, type='Integer', description=\"Length of structural variant\")\n vcf_file.header.info.add('SVMETHOD', number=1, type='String', description=\"SV detection method\")\n vcf_file.header.info.add('TARGET', number=1, type='Integer', description=\"Target location for divergent repeat\")\n vcf_file.header.info.add('OVERLAP_EV', number=1, type='String', description=\"Bool. indicator for the event being\"\n \"placed at an overlap_events interval\")\n vcf_file.header.formats.add('GT', number=1, type='String', description=\"Genotype\")\n\n vcf_out_file = pysam.VariantFile(vcffile, 'w', header=vcf_file.header)\n\n for sv in svs:\n zyg = (int(sv.hap[0]), int(sv.hap[1]))\n dispersion_target = None\n if sv.type in DISPERSION_TYPES:\n source_event = sv.events_dict[Symbols.REQUIRED_SOURCE.value]\n disp_event = sv.events_dict['_1']\n rec_start = source_event.start\n rec_end = source_event.end\n if disp_event.start == source_event.end:\n dispersion_target = disp_event.end\n else:\n dispersion_target = disp_event.start\n else:\n rec_start = min([frag[1] for frag in sv.changed_fragments])\n rec_end = max(frag[2] for frag in sv.changed_fragments)\n if dispersion_target is not None:\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': rec_end - rec_start, 'TARGET': dispersion_target}\n else:\n if sv.type == Variant_Type.INS:\n # special case of simple INS: sv length \\neq (sv end - sv start)\n # **pysam will delete END fields that are equal to POS, therefore INS records won't have an END\n rec_end += 1\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': sv.events_dict[Symbols.REQUIRED_SOURCE.value].length}\n else:\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': rec_end - rec_start}\n if sv.overlap_event is not None:\n info_field['OVERLAP_EV'] = sv.overlap_event[3]\n\n vcf_record = vcf_out_file.header.new_record(contig=sv.start_chr, start=rec_start, stop=rec_end,\n alleles=['N', '<%s>' % sv.type.value], id=sv.type.value,\n info=info_field,\n qual=100, filter='PASS',\n samples=[{'GT': zyg}])\n vcf_out_file.write(vcf_record)\n\n vcf_out_file.close()\n\n def export_variants_to_fasta(self, id, edits, fasta_out, fasta_file, verbose=False):\n \"\"\"\n Exports list of changes from simulator to fasta file\n\n id: chr_id to apply edits to\n edits: list with elements of the form (start, end, new_frag)\n fasta_out: Fasta file to export changes to\n fasta_file: FastaFile with access to reference\n \"\"\"\n with open(fasta_out, \"a\") as fout_export:\n if id not in fasta_file.references:\n raise KeyError(\"ID {} not found in inputted fasta file\".format(id))\n if verbose:\n print(\"New ID: \", id)\n fout_export.write(\">\" + str(id) + \"\\n\")\n chr_variants = list(edits)\n chr_variants.sort()\n chr_variants.append([fasta_file.get_reference_length(id), fasta_file.get_reference_length(id), \"\"])\n pos = 0\n for variant in chr_variants:\n var_start, var_end = variant[0], variant[1]\n while pos < var_start:\n appropriate_buffer = MAX_BUFFER_SIZE if var_start - pos > MAX_BUFFER_SIZE else var_start - pos\n c = fasta_file.fetch(id, pos, pos + appropriate_buffer)\n fout_export.write(c)\n pos += appropriate_buffer\n assert (pos == var_start), \"Replacement fragment about to be inserted at position {} instead of var_start {}\".format(pos, var_start)\n fout_export.write(variant[2])\n pos = var_end\n fout_export.write(\"\\n\")\n\n def close(self):\n self.fin_export1.close()\n self.fin_export2.close()" }, { "identifier": "NestedDict", "path": "insilicosv/utils.py", "snippet": "class NestedDict(defaultdict):\n def __call__(self):\n return NestedDict(self.default_factory)" }, { "identifier": "utils", "path": "insilicosv/utils.py", "snippet": "class NestedDict(defaultdict):\nclass OverlapEvents:\n def __call__(self):\ndef is_overlapping(event_ranges, addition, called_from_helper=False, strictly_partial=False):\ndef fail_if_any_overlapping(arr):\ndef validate_symbols(source, target):\ndef remove_file(file):\ndef reset_file(filename):\ndef generate_seq(length):\ndef percent_N(seq):\ndef complement(seq):\ndef divergence(seq, divergence_prob=None):\ndef get_sv_config_identifier(sv_config):\n def __init__(self, config, allow_chroms=None):\n def get_num_overlap_counts(self, config):\n def parse_bed_file(self, bed_fname, allow_chroms=None, allow_types=None):\n def get_single_element_interval(self, sv_config_id, sv_config, partial_overlap):\n def populate_alu_pairs(self, svs_config):\n def get_alu_mediated_interval(self, sv_config_id):\n def remove_alu_from_overlap_dict(self, chrom, start, end):\n def midpoint(start, end):\n def get_intrvl_len(chr, st, end):\n def elt_type_is_allowed(self, elt_type):\n def get_partially_overlapping_interval(elt_chrom, elt_start, elt_stop, sv_min, sv_max):\n def draw_from_unif(a, b):\n def decrement_counts(self, sv_config_id, input_elt_type, partial_overlap):\n def __getitem__(self, sv_config_id, minsize, maxsize, elt_type=None, partial_overlap=False):" }, { "identifier": "constants", "path": "insilicosv/constants.py", "snippet": "MAX_BUFFER_SIZE: int = 1000000 # max number of bases that can be read at one time to export to fasta file\n INS = \"INS\"\n DEL = \"DEL\"\n INV = \"INV\"\n DUP = \"DUP\"\n SNP = \"SNP\"\n TRA = \"TRA\"\n DIVERGENCE = \"DIVERGENCE\"\nDISPERSION_TYPES = [Variant_Type.dDUP, Variant_Type.INV_dDUP,\n Variant_Type.TRA, Variant_Type.div_dDUP,\n Variant_Type.dDUP_iDEL, Variant_Type.INS_iDEL]\n INS = \"INS\"\n DUP = \"DUP\"\n INV = \"INV\"\n DEL = \"DEL\"\n TRA = \"TRA\"\n INVDUP = \"INVDUP\"\n INVTRA = \"INVTRA\"\n IDENTITY = \"IDENTITY\"\n UNDEFINED = \"UNDEFINED\"\n DIV = \"DIV\"\nNONZERO_ORDER_OPERATIONS = [Operations.TRA.value, Operations.INS.value, Operations.DUP.value, Operations.INVDUP.value,\n Operations.INVTRA.value, Operations.DIV.value]\n UNDEFINED = -1\n HOMOZYGOUS = 1\n HETEROZYGOUS = 0\n DIS = \"_\" # dispersion event\n DUP = \"'\" # attached to symbols that are not the original one from source sequence\n DIV = \"*\" # divergent interval, attached to symbols that vary from the original by low-probability base error\n REQUIRED_SOURCE = \"A\" # event symbol of the required source/main event all SVs must have\nSV_KEY = {Variant_Type.INS: [(), (\"A\")],\n Variant_Type.SNP: [(\"A\",), (\"A*\",)],\n Variant_Type.DEL: [(\"A\",), ()],\n Variant_Type.INV: [(\"A\",), (\"a\",)],\n Variant_Type.DUP: [(\"A\",), (\"A\", \"A'\")],\n Variant_Type.TRA: [(\"A\", \"_\"), (\"_\", \"A'\")],\n Variant_Type.dupINVdup: [(\"A\", \"B\", \"C\"), (\"A\", \"c'\", \"b\", \"a'\", \"C\")],\n Variant_Type.delINVdel: [(\"A\", \"B\", \"C\"), (\"b\",)],\n Variant_Type.delINVdup: [(\"A\", \"B\", \"C\"), (\"c'\", \"b\", \"C\")],\n Variant_Type.dupINVdel: [(\"A\", \"B\", \"C\"), (\"A\", \"b\", \"a'\")],\n Variant_Type.delINV: [(\"A\", \"B\"), (\"b\",)],\n Variant_Type.INVdel: [(\"A\", \"B\"), (\"a\",)],\n Variant_Type.dDUP_iDEL: [(\"A\", \"_\", \"B\"), (\"A\", \"_\", \"A'\")],\n Variant_Type.INS_iDEL: [(\"A\", \"_\", \"B\"), (\"_\", \"A'\")],\n Variant_Type.INVdup: [(\"A\",), (\"a\", \"a'\")],\n Variant_Type.dup_INV: [(\"A\", \"B\"), (\"A\", \"b\", \"a'\")],\n Variant_Type.INV_dup: [(\"A\", \"B\"), (\"b'\", \"a\", \"B\")],\n Variant_Type.dDUP: [(\"A\", \"_\"), (\"A\", \"_\", \"A'\")],\n Variant_Type.INV_dDUP: [(\"A\", \"_\"), (\"A\", \"_\", \"a'\")],\n Variant_Type.div_dDUP: [(\"A\", \"_\"), (\"A\", \"_\", \"A*\")],\n Variant_Type.DIVERGENCE: [(\"A\",), (\"A*\",)]}\nDEFAULT_CONFIG = {\"sim_settings\": {\"max_tries\": 100,\n \"fail_if_placement_issues\": False,\n \"generate_log_file\": False,\n \"prioritize_top\": False},\n \"variant_sets\": {}}\nclass Variant_Type(Enum):\nclass Operations(Enum):\nclass Zygosity(Enum):\nclass Symbols(Enum):" } ]
from insilicosv.simulate import SV_Simulator from insilicosv.processing import FormatterIO from test_simulate import TestObject from pysam import VariantFile, FastaFile from collections import defaultdict, Counter from insilicosv.utils import NestedDict from insilicosv import utils from insilicosv import constants import unittest import sys import os
14,130
self.test_objects_overlap_simple = {'overlap1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.formatter = FormatterIO(self.par) def tearDown(self):
class TestProcObject(TestObject): def __init__(self, ref, par, hap1, hap2, bed, vcf): self.vcf = vcf super().__init__(ref, par, hap1, hap2, bed) def extract_bed_records(self): # parse bed record into dict for easy comparison # --> example split bed record: ['chr19', '0', '3', 'chr19', '0', '3', 'DEL', '3', '1/1', 'DEL', '1'] bed_records = [] with open(self.bed) as f: for line in f: ln = line.split() bed_record = {'source_chr': ln[0], 'source_s': ln[1], 'source_e': ln[2], 'target_chr': ln[3], 'target_s': ln[4], 'target_e': ln[5], 'ev_type': ln[6], 'len': ln[7], 'zyg': ln[8], 'parent_type': ln[9], 'sv_id': ln[10]} bed_records.append(bed_record) return bed_records def extract_vcf_records(self): vcf_records = [] vcf = VariantFile(self.vcf) for rec in vcf.fetch(): ln = str(rec).split() # separately parse info field of the form: 'END=45590417;SVTYPE=dDUP;SVLEN=539;TARGET=45581738' info = {field.split('=')[0]: field.split('=')[1] for field in ln[7].split(';')} vcf_record = {'CHROM': ln[0], 'POS': ln[1], 'ID': ln[2], 'REF': ln[3], 'ALT': ln[4], 'QUAL': ln[5], 'FILTER': ln[6], 'INFO': info, 'FORMAT': ln[8], 'SAMPLE': ln[9]} vcf_records.append(vcf_record) return vcf_records class TestProcessing(unittest.TestCase): def setUp(self): # runs before every test self.ref_file = "test/inputs/test.fa" self.par = "test/inputs/par.yaml" self.hap1 = "test/inputs/test1.fa" self.hap2 = "test/inputs/test2.fa" self.bed = "test/inputs/out.bed" self.vcf = "test/inputs/out.vcf" self.ins_fasta = "test/inputs/ins_fasta.fa" self.test_overlap_bed = "test/inputs/example_overlap_events.bed" self.test_overlap_bed_2 = "test/inputs/example_overlap_events_2.bed" # test_overlap_bed_3: events with differing chromosome self.test_overlap_bed_3 = "test/inputs/example_overlap_events_3.bed" self.test_overlap_bed_4 = "test/inputs/example_overlap_events_4.bed" self.test_overlap_bed_11 = "test/inputs/example_overlap_events_11.bed" self.test_objects_simple_events = {'DEL': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DEL", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'DUP': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DUP", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INV", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS': TestProcObject([self.ref_file, {"chr19": "C"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INS", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_flanked_inversions = {'dupINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'dupINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dispersions = {'dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'TRA': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "TRA", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_del_inv = {'delINV': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINV", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdel", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_idel = {'dDUP_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INS_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dup_inv = {'dup_INV': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dup_INV", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dup': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dup", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_INVdup = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 1, "max_length": [4], "min_length": [4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_multievent = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTGCTAATGCGTTCACTGCTAATGCGTTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 200, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 3, "max_length": [4], "min_length": [2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_overlap_simple = {'overlap1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.formatter = FormatterIO(self.par) def tearDown(self):
utils.remove_file(self.ins_fasta)
3
2023-12-01 14:39:20+00:00
16k
BiQiWHU/BWG
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop_CategoryAreaConstraint(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "mask2former/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "mask2former/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,304
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
3
2023-11-29 17:15:46+00:00
16k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/tacotron.py
[ { "identifier": "CapacitronVAE", "path": "TTS/tts/layers/tacotron/capacitron_layers.py", "snippet": "class CapacitronVAE(nn.Module):\n \"\"\"Effective Use of Variational Embedding Capacity for prosody transfer.\n\n See https://arxiv.org/abs/1906.03402\"\"\"\n\n def __init__(\n self,\n num_mel,\n capacitron_VAE_embedding_dim,\n encoder_output_dim=256,\n reference_encoder_out_dim=128,\n speaker_embedding_dim=None,\n text_summary_embedding_dim=None,\n ):\n super().__init__()\n # Init distributions\n self.prior_distribution = MVN(\n torch.zeros(capacitron_VAE_embedding_dim), torch.eye(capacitron_VAE_embedding_dim)\n )\n self.approximate_posterior_distribution = None\n # define output ReferenceEncoder dim to the capacitron_VAE_embedding_dim\n self.encoder = ReferenceEncoder(num_mel, out_dim=reference_encoder_out_dim)\n\n # Init beta, the lagrange-like term for the KL distribution\n self.beta = torch.nn.Parameter(torch.log(torch.exp(torch.Tensor([1.0])) - 1), requires_grad=True)\n mlp_input_dimension = reference_encoder_out_dim\n\n if text_summary_embedding_dim is not None:\n self.text_summary_net = TextSummary(text_summary_embedding_dim, encoder_output_dim=encoder_output_dim)\n mlp_input_dimension += text_summary_embedding_dim\n if speaker_embedding_dim is not None:\n # TODO: Test a multispeaker model!\n mlp_input_dimension += speaker_embedding_dim\n self.post_encoder_mlp = PostEncoderMLP(mlp_input_dimension, capacitron_VAE_embedding_dim)\n\n def forward(self, reference_mel_info=None, text_info=None, speaker_embedding=None):\n # Use reference\n if reference_mel_info is not None:\n reference_mels = reference_mel_info[0] # [batch_size, num_frames, num_mels]\n mel_lengths = reference_mel_info[1] # [batch_size]\n enc_out = self.encoder(reference_mels, mel_lengths)\n\n # concat speaker_embedding and/or text summary embedding\n if text_info is not None:\n text_inputs = text_info[0] # [batch_size, num_characters, num_embedding]\n input_lengths = text_info[1]\n text_summary_out = self.text_summary_net(text_inputs, input_lengths).to(reference_mels.device)\n enc_out = torch.cat([enc_out, text_summary_out], dim=-1)\n if speaker_embedding is not None:\n speaker_embedding = torch.squeeze(speaker_embedding)\n enc_out = torch.cat([enc_out, speaker_embedding], dim=-1)\n\n # Feed the output of the ref encoder and information about text/speaker into\n # an MLP to produce the parameteres for the approximate poterior distributions\n mu, sigma = self.post_encoder_mlp(enc_out)\n # convert to cpu because prior_distribution was created on cpu\n mu = mu.cpu()\n sigma = sigma.cpu()\n\n # Sample from the posterior: z ~ q(z|x)\n self.approximate_posterior_distribution = MVN(mu, torch.diag_embed(sigma))\n VAE_embedding = self.approximate_posterior_distribution.rsample()\n # Infer from the model, bypasses encoding\n else:\n # Sample from the prior: z ~ p(z)\n VAE_embedding = self.prior_distribution.sample().unsqueeze(0)\n\n # reshape to [batch_size, 1, capacitron_VAE_embedding_dim]\n return VAE_embedding.unsqueeze(1), self.approximate_posterior_distribution, self.prior_distribution, self.beta" }, { "identifier": "GST", "path": "TTS/tts/layers/tacotron/gst_layers.py", "snippet": "class GST(nn.Module):\n \"\"\"Global Style Token Module for factorizing prosody in speech.\n\n See https://arxiv.org/pdf/1803.09017\"\"\"\n\n def __init__(self, num_mel, num_heads, num_style_tokens, gst_embedding_dim, embedded_speaker_dim=None):\n super().__init__()\n self.encoder = ReferenceEncoder(num_mel, gst_embedding_dim)\n self.style_token_layer = StyleTokenLayer(num_heads, num_style_tokens, gst_embedding_dim, embedded_speaker_dim)\n\n def forward(self, inputs, speaker_embedding=None):\n enc_out = self.encoder(inputs)\n # concat speaker_embedding\n if speaker_embedding is not None:\n enc_out = torch.cat([enc_out, speaker_embedding], dim=-1)\n style_embed = self.style_token_layer(enc_out)\n\n return style_embed" }, { "identifier": "Decoder", "path": "TTS/tts/layers/tacotron/tacotron.py", "snippet": "class Decoder(nn.Module):\n \"\"\"Tacotron decoder.\n\n Args:\n in_channels (int): number of input channels.\n frame_channels (int): number of feature frame channels.\n r (int): number of outputs per time step (reduction rate).\n memory_size (int): size of the past window. if <= 0 memory_size = r\n attn_type (string): type of attention used in decoder.\n attn_windowing (bool): if true, define an attention window centered to maximum\n attention response. It provides more robust attention alignment especially\n at interence time.\n attn_norm (string): attention normalization function. 'sigmoid' or 'softmax'.\n prenet_type (string): 'original' or 'bn'.\n prenet_dropout (float): prenet dropout rate.\n forward_attn (bool): if true, use forward attention method. https://arxiv.org/abs/1807.06736\n trans_agent (bool): if true, use transition agent. https://arxiv.org/abs/1807.06736\n forward_attn_mask (bool): if true, mask attention values smaller than a threshold.\n location_attn (bool): if true, use location sensitive attention.\n attn_K (int): number of attention heads for GravesAttention.\n separate_stopnet (bool): if true, detach stopnet input to prevent gradient flow.\n d_vector_dim (int): size of speaker embedding vector, for multi-speaker training.\n max_decoder_steps (int): Maximum number of steps allowed for the decoder. Defaults to 500.\n \"\"\"\n\n # Pylint gets confused by PyTorch conventions here\n # pylint: disable=attribute-defined-outside-init\n\n def __init__(\n self,\n in_channels,\n frame_channels,\n r,\n memory_size,\n attn_type,\n attn_windowing,\n attn_norm,\n prenet_type,\n prenet_dropout,\n forward_attn,\n trans_agent,\n forward_attn_mask,\n location_attn,\n attn_K,\n separate_stopnet,\n max_decoder_steps,\n ):\n super().__init__()\n self.r_init = r\n self.r = r\n self.in_channels = in_channels\n self.max_decoder_steps = max_decoder_steps\n self.use_memory_queue = memory_size > 0\n self.memory_size = memory_size if memory_size > 0 else r\n self.frame_channels = frame_channels\n self.separate_stopnet = separate_stopnet\n self.query_dim = 256\n # memory -> |Prenet| -> processed_memory\n prenet_dim = frame_channels * self.memory_size if self.use_memory_queue else frame_channels\n self.prenet = Prenet(prenet_dim, prenet_type, prenet_dropout, out_features=[256, 128])\n # processed_inputs, processed_memory -> |Attention| -> Attention, attention, RNN_State\n # attention_rnn generates queries for the attention mechanism\n self.attention_rnn = nn.GRUCell(in_channels + 128, self.query_dim)\n self.attention = init_attn(\n attn_type=attn_type,\n query_dim=self.query_dim,\n embedding_dim=in_channels,\n attention_dim=128,\n location_attention=location_attn,\n attention_location_n_filters=32,\n attention_location_kernel_size=31,\n windowing=attn_windowing,\n norm=attn_norm,\n forward_attn=forward_attn,\n trans_agent=trans_agent,\n forward_attn_mask=forward_attn_mask,\n attn_K=attn_K,\n )\n # (processed_memory | attention context) -> |Linear| -> decoder_RNN_input\n self.project_to_decoder_in = nn.Linear(256 + in_channels, 256)\n # decoder_RNN_input -> |RNN| -> RNN_state\n self.decoder_rnns = nn.ModuleList([nn.GRUCell(256, 256) for _ in range(2)])\n # RNN_state -> |Linear| -> mel_spec\n self.proj_to_mel = nn.Linear(256, frame_channels * self.r_init)\n # learn init values instead of zero init.\n self.stopnet = StopNet(256 + frame_channels * self.r_init)\n\n def set_r(self, new_r):\n self.r = new_r\n\n def _reshape_memory(self, memory):\n \"\"\"\n Reshape the spectrograms for given 'r'\n \"\"\"\n # Grouping multiple frames if necessary\n if memory.size(-1) == self.frame_channels:\n memory = memory.view(memory.shape[0], memory.size(1) // self.r, -1)\n # Time first (T_decoder, B, frame_channels)\n memory = memory.transpose(0, 1)\n return memory\n\n def _init_states(self, inputs):\n \"\"\"\n Initialization of decoder states\n \"\"\"\n B = inputs.size(0)\n # go frame as zeros matrix\n if self.use_memory_queue:\n self.memory_input = torch.zeros(1, device=inputs.device).repeat(B, self.frame_channels * self.memory_size)\n else:\n self.memory_input = torch.zeros(1, device=inputs.device).repeat(B, self.frame_channels)\n # decoder states\n self.attention_rnn_hidden = torch.zeros(1, device=inputs.device).repeat(B, 256)\n self.decoder_rnn_hiddens = [\n torch.zeros(1, device=inputs.device).repeat(B, 256) for idx in range(len(self.decoder_rnns))\n ]\n self.context_vec = inputs.data.new(B, self.in_channels).zero_()\n # cache attention inputs\n self.processed_inputs = self.attention.preprocess_inputs(inputs)\n\n def _parse_outputs(self, outputs, attentions, stop_tokens):\n # Back to batch first\n attentions = torch.stack(attentions).transpose(0, 1)\n stop_tokens = torch.stack(stop_tokens).transpose(0, 1)\n outputs = torch.stack(outputs).transpose(0, 1).contiguous()\n outputs = outputs.view(outputs.size(0), -1, self.frame_channels)\n outputs = outputs.transpose(1, 2)\n return outputs, attentions, stop_tokens\n\n def decode(self, inputs, mask=None):\n # Prenet\n processed_memory = self.prenet(self.memory_input)\n # Attention RNN\n self.attention_rnn_hidden = self.attention_rnn(\n torch.cat((processed_memory, self.context_vec), -1), self.attention_rnn_hidden\n )\n self.context_vec = self.attention(self.attention_rnn_hidden, inputs, self.processed_inputs, mask)\n # Concat RNN output and attention context vector\n decoder_input = self.project_to_decoder_in(torch.cat((self.attention_rnn_hidden, self.context_vec), -1))\n\n # Pass through the decoder RNNs\n for idx, decoder_rnn in enumerate(self.decoder_rnns):\n self.decoder_rnn_hiddens[idx] = decoder_rnn(decoder_input, self.decoder_rnn_hiddens[idx])\n # Residual connection\n decoder_input = self.decoder_rnn_hiddens[idx] + decoder_input\n decoder_output = decoder_input\n\n # predict mel vectors from decoder vectors\n output = self.proj_to_mel(decoder_output)\n # output = torch.sigmoid(output)\n # predict stop token\n stopnet_input = torch.cat([decoder_output, output], -1)\n if self.separate_stopnet:\n stop_token = self.stopnet(stopnet_input.detach())\n else:\n stop_token = self.stopnet(stopnet_input)\n output = output[:, : self.r * self.frame_channels]\n return output, stop_token, self.attention.attention_weights\n\n def _update_memory_input(self, new_memory):\n if self.use_memory_queue:\n if self.memory_size > self.r:\n # memory queue size is larger than number of frames per decoder iter\n self.memory_input = torch.cat(\n [new_memory, self.memory_input[:, : (self.memory_size - self.r) * self.frame_channels].clone()],\n dim=-1,\n )\n else:\n # memory queue size smaller than number of frames per decoder iter\n self.memory_input = new_memory[:, : self.memory_size * self.frame_channels]\n else:\n # use only the last frame prediction\n # assert new_memory.shape[-1] == self.r * self.frame_channels\n self.memory_input = new_memory[:, self.frame_channels * (self.r - 1) :]\n\n def forward(self, inputs, memory, mask):\n \"\"\"\n Args:\n inputs: Encoder outputs.\n memory: Decoder memory (autoregression. If None (at eval-time),\n decoder outputs are used as decoder inputs. If None, it uses the last\n output as the input.\n mask: Attention mask for sequence padding.\n\n Shapes:\n - inputs: (B, T, D_out_enc)\n - memory: (B, T_mel, D_mel)\n \"\"\"\n # Run greedy decoding if memory is None\n memory = self._reshape_memory(memory)\n outputs = []\n attentions = []\n stop_tokens = []\n t = 0\n self._init_states(inputs)\n self.attention.init_states(inputs)\n while len(outputs) < memory.size(0):\n if t > 0:\n new_memory = memory[t - 1]\n self._update_memory_input(new_memory)\n\n output, stop_token, attention = self.decode(inputs, mask)\n outputs += [output]\n attentions += [attention]\n stop_tokens += [stop_token.squeeze(1)]\n t += 1\n return self._parse_outputs(outputs, attentions, stop_tokens)\n\n def inference(self, inputs):\n \"\"\"\n Args:\n inputs: encoder outputs.\n Shapes:\n - inputs: batch x time x encoder_out_dim\n \"\"\"\n outputs = []\n attentions = []\n stop_tokens = []\n t = 0\n self._init_states(inputs)\n self.attention.init_states(inputs)\n while True:\n if t > 0:\n new_memory = outputs[-1]\n self._update_memory_input(new_memory)\n output, stop_token, attention = self.decode(inputs, None)\n stop_token = torch.sigmoid(stop_token.data)\n outputs += [output]\n attentions += [attention]\n stop_tokens += [stop_token]\n t += 1\n if t > inputs.shape[1] / 4 and (stop_token > 0.6 or attention[:, -1].item() > 0.6):\n break\n if t > self.max_decoder_steps:\n print(\" | > Decoder stopped with 'max_decoder_steps\")\n break\n return self._parse_outputs(outputs, attentions, stop_tokens)" }, { "identifier": "Encoder", "path": "TTS/tts/layers/tacotron/tacotron.py", "snippet": "class Encoder(nn.Module):\n r\"\"\"Stack Prenet and CBHG module for encoder\n Args:\n inputs (FloatTensor): embedding features\n\n Shapes:\n - inputs: (B, T, D_in)\n - outputs: (B, T, 128 * 2)\n \"\"\"\n\n def __init__(self, in_features):\n super().__init__()\n self.prenet = Prenet(in_features, out_features=[256, 128])\n self.cbhg = EncoderCBHG()\n\n def forward(self, inputs):\n # B x T x prenet_dim\n outputs = self.prenet(inputs)\n outputs = self.cbhg(outputs.transpose(1, 2))\n return outputs" }, { "identifier": "PostCBHG", "path": "TTS/tts/layers/tacotron/tacotron.py", "snippet": "class PostCBHG(nn.Module):\n def __init__(self, mel_dim):\n super().__init__()\n self.cbhg = CBHG(\n mel_dim,\n K=8,\n conv_bank_features=128,\n conv_projections=[256, mel_dim],\n highway_features=128,\n gru_features=128,\n num_highways=4,\n )\n\n def forward(self, x):\n return self.cbhg(x)" }, { "identifier": "BaseTacotron", "path": "TTS/tts/models/base_tacotron.py", "snippet": "class BaseTacotron(BaseTTS):\n \"\"\"Base class shared by Tacotron and Tacotron2\"\"\"\n\n def __init__(\n self,\n config: \"TacotronConfig\",\n ap: \"AudioProcessor\",\n tokenizer: \"TTSTokenizer\",\n speaker_manager: SpeakerManager = None,\n ):\n super().__init__(config, ap, tokenizer, speaker_manager)\n\n # pass all config fields as class attributes\n for key in config:\n setattr(self, key, config[key])\n\n # layers\n self.embedding = None\n self.encoder = None\n self.decoder = None\n self.postnet = None\n\n # init tensors\n self.embedded_speakers = None\n self.embedded_speakers_projected = None\n\n # global style token\n if self.gst and self.use_gst:\n self.decoder_in_features += self.gst.gst_embedding_dim # add gst embedding dim\n self.gst_layer = None\n\n # Capacitron\n if self.capacitron_vae and self.use_capacitron_vae:\n self.decoder_in_features += self.capacitron_vae.capacitron_VAE_embedding_dim # add capacitron embedding dim\n self.capacitron_vae_layer = None\n\n # additional layers\n self.decoder_backward = None\n self.coarse_decoder = None\n\n @staticmethod\n def _format_aux_input(aux_input: Dict) -> Dict:\n \"\"\"Set missing fields to their default values\"\"\"\n if aux_input:\n return format_aux_input({\"d_vectors\": None, \"speaker_ids\": None}, aux_input)\n return None\n\n #############################\n # INIT FUNCTIONS\n #############################\n\n def _init_backward_decoder(self):\n \"\"\"Init the backward decoder for Forward-Backward decoding.\"\"\"\n self.decoder_backward = copy.deepcopy(self.decoder)\n\n def _init_coarse_decoder(self):\n \"\"\"Init the coarse decoder for Double-Decoder Consistency.\"\"\"\n self.coarse_decoder = copy.deepcopy(self.decoder)\n self.coarse_decoder.r_init = self.ddc_r\n self.coarse_decoder.set_r(self.ddc_r)\n\n #############################\n # CORE FUNCTIONS\n #############################\n\n @abstractmethod\n def forward(self):\n pass\n\n @abstractmethod\n def inference(self):\n pass\n\n def load_checkpoint(\n self, config, checkpoint_path, eval=False, cache=False\n ): # pylint: disable=unused-argument, redefined-builtin\n \"\"\"Load model checkpoint and set up internals.\n\n Args:\n config (Coqpi): model configuration.\n checkpoint_path (str): path to checkpoint file.\n eval (bool, optional): whether to load model for evaluation.\n cache (bool, optional): If True, cache the file locally for subsequent calls. It is cached under `get_user_data_dir()/tts_cache`. Defaults to False.\n \"\"\"\n state = load_fsspec(checkpoint_path, map_location=torch.device(\"cpu\"), cache=cache)\n self.load_state_dict(state[\"model\"])\n # TODO: set r in run-time by taking it from the new config\n if \"r\" in state:\n # set r from the state (for compatibility with older checkpoints)\n self.decoder.set_r(state[\"r\"])\n elif \"config\" in state:\n # set r from config used at training time (for inference)\n self.decoder.set_r(state[\"config\"][\"r\"])\n else:\n # set r from the new config (for new-models)\n self.decoder.set_r(config.r)\n if eval:\n self.eval()\n print(f\" > Model's reduction rate `r` is set to: {self.decoder.r}\")\n assert not self.training\n\n def get_criterion(self) -> nn.Module:\n \"\"\"Get the model criterion used in training.\"\"\"\n return TacotronLoss(self.config)\n\n @staticmethod\n def init_from_config(config: Coqpit):\n \"\"\"Initialize model from config.\"\"\"\n from TTS.utils.audio import AudioProcessor\n\n ap = AudioProcessor.init_from_config(config)\n tokenizer = TTSTokenizer.init_from_config(config)\n speaker_manager = SpeakerManager.init_from_config(config)\n return BaseTacotron(config, ap, tokenizer, speaker_manager)\n\n ##########################\n # TEST AND LOG FUNCTIONS #\n ##########################\n\n def test_run(self, assets: Dict) -> Tuple[Dict, Dict]:\n \"\"\"Generic test run for `tts` models used by `Trainer`.\n\n You can override this for a different behaviour.\n\n Args:\n assets (dict): A dict of training assets. For `tts` models, it must include `{'audio_processor': ap}`.\n\n Returns:\n Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.\n \"\"\"\n print(\" | > Synthesizing test sentences.\")\n test_audios = {}\n test_figures = {}\n test_sentences = self.config.test_sentences\n aux_inputs = self._get_test_aux_input()\n for idx, sen in enumerate(test_sentences):\n outputs_dict = synthesis(\n self,\n sen,\n self.config,\n \"cuda\" in str(next(self.parameters()).device),\n speaker_id=aux_inputs[\"speaker_id\"],\n d_vector=aux_inputs[\"d_vector\"],\n style_wav=aux_inputs[\"style_wav\"],\n use_griffin_lim=True,\n do_trim_silence=False,\n )\n test_audios[\"{}-audio\".format(idx)] = outputs_dict[\"wav\"]\n test_figures[\"{}-prediction\".format(idx)] = plot_spectrogram(\n outputs_dict[\"outputs\"][\"model_outputs\"], self.ap, output_fig=False\n )\n test_figures[\"{}-alignment\".format(idx)] = plot_alignment(\n outputs_dict[\"outputs\"][\"alignments\"], output_fig=False\n )\n return {\"figures\": test_figures, \"audios\": test_audios}\n\n def test_log(\n self, outputs: dict, logger: \"Logger\", assets: dict, steps: int # pylint: disable=unused-argument\n ) -> None:\n logger.test_audios(steps, outputs[\"audios\"], self.ap.sample_rate)\n logger.test_figures(steps, outputs[\"figures\"])\n\n #############################\n # COMMON COMPUTE FUNCTIONS\n #############################\n\n def compute_masks(self, text_lengths, mel_lengths):\n \"\"\"Compute masks against sequence paddings.\"\"\"\n # B x T_in_max (boolean)\n input_mask = sequence_mask(text_lengths)\n output_mask = None\n if mel_lengths is not None:\n max_len = mel_lengths.max()\n r = self.decoder.r\n max_len = max_len + (r - (max_len % r)) if max_len % r > 0 else max_len\n output_mask = sequence_mask(mel_lengths, max_len=max_len)\n return input_mask, output_mask\n\n def _backward_pass(self, mel_specs, encoder_outputs, mask):\n \"\"\"Run backwards decoder\"\"\"\n decoder_outputs_b, alignments_b, _ = self.decoder_backward(\n encoder_outputs, torch.flip(mel_specs, dims=(1,)), mask\n )\n decoder_outputs_b = decoder_outputs_b.transpose(1, 2).contiguous()\n return decoder_outputs_b, alignments_b\n\n def _coarse_decoder_pass(self, mel_specs, encoder_outputs, alignments, input_mask):\n \"\"\"Double Decoder Consistency\"\"\"\n T = mel_specs.shape[1]\n if T % self.coarse_decoder.r > 0:\n padding_size = self.coarse_decoder.r - (T % self.coarse_decoder.r)\n mel_specs = torch.nn.functional.pad(mel_specs, (0, 0, 0, padding_size, 0, 0))\n decoder_outputs_backward, alignments_backward, _ = self.coarse_decoder(\n encoder_outputs.detach(), mel_specs, input_mask\n )\n # scale_factor = self.decoder.r_init / self.decoder.r\n alignments_backward = torch.nn.functional.interpolate(\n alignments_backward.transpose(1, 2),\n size=alignments.shape[1],\n mode=\"nearest\",\n ).transpose(1, 2)\n decoder_outputs_backward = decoder_outputs_backward.transpose(1, 2)\n decoder_outputs_backward = decoder_outputs_backward[:, :T, :]\n return decoder_outputs_backward, alignments_backward\n\n #############################\n # EMBEDDING FUNCTIONS\n #############################\n\n def compute_gst(self, inputs, style_input, speaker_embedding=None):\n \"\"\"Compute global style token\"\"\"\n if isinstance(style_input, dict):\n # multiply each style token with a weight\n query = torch.zeros(1, 1, self.gst.gst_embedding_dim // 2).type_as(inputs)\n if speaker_embedding is not None:\n query = torch.cat([query, speaker_embedding.reshape(1, 1, -1)], dim=-1)\n\n _GST = torch.tanh(self.gst_layer.style_token_layer.style_tokens)\n gst_outputs = torch.zeros(1, 1, self.gst.gst_embedding_dim).type_as(inputs)\n for k_token, v_amplifier in style_input.items():\n key = _GST[int(k_token)].unsqueeze(0).expand(1, -1, -1)\n gst_outputs_att = self.gst_layer.style_token_layer.attention(query, key)\n gst_outputs = gst_outputs + gst_outputs_att * v_amplifier\n elif style_input is None:\n # ignore style token and return zero tensor\n gst_outputs = torch.zeros(1, 1, self.gst.gst_embedding_dim).type_as(inputs)\n else:\n # compute style tokens\n gst_outputs = self.gst_layer(style_input, speaker_embedding) # pylint: disable=not-callable\n inputs = self._concat_speaker_embedding(inputs, gst_outputs)\n return inputs\n\n def compute_capacitron_VAE_embedding(self, inputs, reference_mel_info, text_info=None, speaker_embedding=None):\n \"\"\"Capacitron Variational Autoencoder\"\"\"\n (\n VAE_outputs,\n posterior_distribution,\n prior_distribution,\n capacitron_beta,\n ) = self.capacitron_vae_layer(\n reference_mel_info,\n text_info,\n speaker_embedding, # pylint: disable=not-callable\n )\n\n VAE_outputs = VAE_outputs.to(inputs.device)\n encoder_output = self._concat_speaker_embedding(\n inputs, VAE_outputs\n ) # concatenate to the output of the basic tacotron encoder\n return (\n encoder_output,\n posterior_distribution,\n prior_distribution,\n capacitron_beta,\n )\n\n @staticmethod\n def _add_speaker_embedding(outputs, embedded_speakers):\n embedded_speakers_ = embedded_speakers.expand(outputs.size(0), outputs.size(1), -1)\n outputs = outputs + embedded_speakers_\n return outputs\n\n @staticmethod\n def _concat_speaker_embedding(outputs, embedded_speakers):\n embedded_speakers_ = embedded_speakers.expand(outputs.size(0), outputs.size(1), -1)\n outputs = torch.cat([outputs, embedded_speakers_], dim=-1)\n return outputs\n\n #############################\n # CALLBACKS\n #############################\n\n def on_epoch_start(self, trainer):\n \"\"\"Callback for setting values wrt gradual training schedule.\n\n Args:\n trainer (TrainerTTS): TTS trainer object that is used to train this model.\n \"\"\"\n if self.gradual_training:\n r, trainer.config.batch_size = gradual_training_scheduler(trainer.total_steps_done, trainer.config)\n trainer.config.r = r\n self.decoder.set_r(r)\n if trainer.config.bidirectional_decoder:\n trainer.model.decoder_backward.set_r(r)\n print(f\"\\n > Number of output frames: {self.decoder.r}\")" }, { "identifier": "alignment_diagonal_score", "path": "TTS/tts/utils/measures.py", "snippet": "def alignment_diagonal_score(alignments, binary=False):\n \"\"\"\n Compute how diagonal alignment predictions are. It is useful\n to measure the alignment consistency of a model\n Args:\n alignments (torch.Tensor): batch of alignments.\n binary (bool): if True, ignore scores and consider attention\n as a binary mask.\n Shape:\n - alignments : :math:`[B, T_de, T_en]`\n \"\"\"\n maxs = alignments.max(dim=1)[0]\n if binary:\n maxs[maxs > 0] = 1\n return maxs.mean(dim=1).mean(dim=0).item()" }, { "identifier": "SpeakerManager", "path": "TTS/tts/utils/speakers.py", "snippet": "class SpeakerManager(EmbeddingManager):\n \"\"\"Manage the speakers for multi-speaker 🐸TTS models. Load a datafile and parse the information\n in a way that can be queried by speaker or clip.\n\n There are 3 different scenarios considered:\n\n 1. Models using speaker embedding layers. The datafile only maps speaker names to ids used by the embedding layer.\n 2. Models using d-vectors. The datafile includes a dictionary in the following format.\n\n ::\n\n {\n 'clip_name.wav':{\n 'name': 'speakerA',\n 'embedding'[<d_vector_values>]\n },\n ...\n }\n\n\n 3. Computing the d-vectors by the speaker encoder. It loads the speaker encoder model and\n computes the d-vectors for a given clip or speaker.\n\n Args:\n d_vectors_file_path (str, optional): Path to the metafile including x vectors. Defaults to \"\".\n speaker_id_file_path (str, optional): Path to the metafile that maps speaker names to ids used by\n TTS models. Defaults to \"\".\n encoder_model_path (str, optional): Path to the speaker encoder model file. Defaults to \"\".\n encoder_config_path (str, optional): Path to the spealer encoder config file. Defaults to \"\".\n\n Examples:\n >>> # load audio processor and speaker encoder\n >>> ap = AudioProcessor(**config.audio)\n >>> manager = SpeakerManager(encoder_model_path=encoder_model_path, encoder_config_path=encoder_config_path)\n >>> # load a sample audio and compute embedding\n >>> waveform = ap.load_wav(sample_wav_path)\n >>> mel = ap.melspectrogram(waveform)\n >>> d_vector = manager.compute_embeddings(mel.T)\n \"\"\"\n\n def __init__(\n self,\n data_items: List[List[Any]] = None,\n d_vectors_file_path: str = \"\",\n speaker_id_file_path: str = \"\",\n encoder_model_path: str = \"\",\n encoder_config_path: str = \"\",\n use_cuda: bool = False,\n ):\n super().__init__(\n embedding_file_path=d_vectors_file_path,\n id_file_path=speaker_id_file_path,\n encoder_model_path=encoder_model_path,\n encoder_config_path=encoder_config_path,\n use_cuda=use_cuda,\n )\n\n if data_items:\n self.set_ids_from_data(data_items, parse_key=\"speaker_name\")\n\n @property\n def num_speakers(self):\n return len(self.name_to_id)\n\n @property\n def speaker_names(self):\n return list(self.name_to_id.keys())\n\n def get_speakers(self) -> List:\n return self.name_to_id\n\n @staticmethod\n def init_from_config(config: \"Coqpit\", samples: Union[List[List], List[Dict]] = None) -> \"SpeakerManager\":\n \"\"\"Initialize a speaker manager from config\n\n Args:\n config (Coqpit): Config object.\n samples (Union[List[List], List[Dict]], optional): List of data samples to parse out the speaker names.\n Defaults to None.\n\n Returns:\n SpeakerEncoder: Speaker encoder object.\n \"\"\"\n speaker_manager = None\n if get_from_config_or_model_args_with_default(config, \"use_speaker_embedding\", False):\n if samples:\n speaker_manager = SpeakerManager(data_items=samples)\n if get_from_config_or_model_args_with_default(config, \"speaker_file\", None):\n speaker_manager = SpeakerManager(\n speaker_id_file_path=get_from_config_or_model_args_with_default(config, \"speaker_file\", None)\n )\n if get_from_config_or_model_args_with_default(config, \"speakers_file\", None):\n speaker_manager = SpeakerManager(\n speaker_id_file_path=get_from_config_or_model_args_with_default(config, \"speakers_file\", None)\n )\n\n if get_from_config_or_model_args_with_default(config, \"use_d_vector_file\", False):\n speaker_manager = SpeakerManager()\n if get_from_config_or_model_args_with_default(config, \"d_vector_file\", None):\n speaker_manager = SpeakerManager(\n d_vectors_file_path=get_from_config_or_model_args_with_default(config, \"d_vector_file\", None)\n )\n return speaker_manager" }, { "identifier": "TTSTokenizer", "path": "TTS/tts/utils/text/tokenizer.py", "snippet": "class TTSTokenizer:\n \"\"\"🐸TTS tokenizer to convert input characters to token IDs and back.\n\n Token IDs for OOV chars are discarded but those are stored in `self.not_found_characters` for later.\n\n Args:\n use_phonemes (bool):\n Whether to use phonemes instead of characters. Defaults to False.\n\n characters (Characters):\n A Characters object to use for character-to-ID and ID-to-character mappings.\n\n text_cleaner (callable):\n A function to pre-process the text before tokenization and phonemization. Defaults to None.\n\n phonemizer (Phonemizer):\n A phonemizer object or a dict that maps language codes to phonemizer objects. Defaults to None.\n\n Example:\n\n >>> from TTS.tts.utils.text.tokenizer import TTSTokenizer\n >>> tokenizer = TTSTokenizer(use_phonemes=False, characters=Graphemes())\n >>> text = \"Hello world!\"\n >>> ids = tokenizer.text_to_ids(text)\n >>> text_hat = tokenizer.ids_to_text(ids)\n >>> assert text == text_hat\n \"\"\"\n\n def __init__(\n self,\n use_phonemes=False,\n text_cleaner: Callable = None,\n characters: \"BaseCharacters\" = None,\n phonemizer: Union[\"Phonemizer\", Dict] = None,\n add_blank: bool = False,\n use_eos_bos=False,\n ):\n self.text_cleaner = text_cleaner\n self.use_phonemes = use_phonemes\n self.add_blank = add_blank\n self.use_eos_bos = use_eos_bos\n self.characters = characters\n self.not_found_characters = []\n self.phonemizer = phonemizer\n\n @property\n def characters(self):\n return self._characters\n\n @characters.setter\n def characters(self, new_characters):\n self._characters = new_characters\n self.pad_id = self.characters.char_to_id(self.characters.pad) if self.characters.pad else None\n self.blank_id = self.characters.char_to_id(self.characters.blank) if self.characters.blank else None\n\n def encode(self, text: str) -> List[int]:\n \"\"\"Encodes a string of text as a sequence of IDs.\"\"\"\n token_ids = []\n for char in text:\n try:\n idx = self.characters.char_to_id(char)\n token_ids.append(idx)\n except KeyError:\n # discard but store not found characters\n if char not in self.not_found_characters:\n self.not_found_characters.append(char)\n print(text)\n print(f\" [!] Character {repr(char)} not found in the vocabulary. Discarding it.\")\n return token_ids\n\n def decode(self, token_ids: List[int]) -> str:\n \"\"\"Decodes a sequence of IDs to a string of text.\"\"\"\n text = \"\"\n for token_id in token_ids:\n text += self.characters.id_to_char(token_id)\n return text\n\n def text_to_ids(self, text: str, language: str = None) -> List[int]: # pylint: disable=unused-argument\n \"\"\"Converts a string of text to a sequence of token IDs.\n\n Args:\n text(str):\n The text to convert to token IDs.\n\n language(str):\n The language code of the text. Defaults to None.\n\n TODO:\n - Add support for language-specific processing.\n\n 1. Text normalizatin\n 2. Phonemization (if use_phonemes is True)\n 3. Add blank char between characters\n 4. Add BOS and EOS characters\n 5. Text to token IDs\n \"\"\"\n # TODO: text cleaner should pick the right routine based on the language\n if self.text_cleaner is not None:\n text = self.text_cleaner(text)\n if self.use_phonemes:\n text = self.phonemizer.phonemize(text, separator=\"\", language=language)\n text = self.encode(text)\n if self.add_blank:\n text = self.intersperse_blank_char(text, True)\n if self.use_eos_bos:\n text = self.pad_with_bos_eos(text)\n return text\n\n def ids_to_text(self, id_sequence: List[int]) -> str:\n \"\"\"Converts a sequence of token IDs to a string of text.\"\"\"\n return self.decode(id_sequence)\n\n def pad_with_bos_eos(self, char_sequence: List[str]):\n \"\"\"Pads a sequence with the special BOS and EOS characters.\"\"\"\n return [self.characters.bos_id] + list(char_sequence) + [self.characters.eos_id]\n\n def intersperse_blank_char(self, char_sequence: List[str], use_blank_char: bool = False):\n \"\"\"Intersperses the blank character between characters in a sequence.\n\n Use the ```blank``` character if defined else use the ```pad``` character.\n \"\"\"\n char_to_use = self.characters.blank_id if use_blank_char else self.characters.pad\n result = [char_to_use] * (len(char_sequence) * 2 + 1)\n result[1::2] = char_sequence\n return result\n\n def print_logs(self, level: int = 0):\n indent = \"\\t\" * level\n print(f\"{indent}| > add_blank: {self.add_blank}\")\n print(f\"{indent}| > use_eos_bos: {self.use_eos_bos}\")\n print(f\"{indent}| > use_phonemes: {self.use_phonemes}\")\n if self.use_phonemes:\n print(f\"{indent}| > phonemizer:\")\n self.phonemizer.print_logs(level + 1)\n if len(self.not_found_characters) > 0:\n print(f\"{indent}| > {len(self.not_found_characters)} not found characters:\")\n for char in self.not_found_characters:\n print(f\"{indent}| > {char}\")\n\n @staticmethod\n def init_from_config(config: \"Coqpit\", characters: \"BaseCharacters\" = None):\n \"\"\"Init Tokenizer object from config\n\n Args:\n config (Coqpit): Coqpit model config.\n characters (BaseCharacters): Defines the model character set. If not set, use the default options based on\n the config values. Defaults to None.\n \"\"\"\n # init cleaners\n text_cleaner = None\n if isinstance(config.text_cleaner, (str, list)):\n text_cleaner = getattr(cleaners, config.text_cleaner)\n\n # init characters\n if characters is None:\n # set characters based on defined characters class\n if config.characters and config.characters.characters_class:\n CharactersClass = import_class(config.characters.characters_class)\n characters, new_config = CharactersClass.init_from_config(config)\n # set characters based on config\n else:\n if config.use_phonemes:\n # init phoneme set\n characters, new_config = IPAPhonemes().init_from_config(config)\n else:\n # init character set\n characters, new_config = Graphemes().init_from_config(config)\n\n else:\n characters, new_config = characters.init_from_config(config)\n\n # set characters class\n new_config.characters.characters_class = get_import_path(characters)\n\n # init phonemizer\n phonemizer = None\n if config.use_phonemes:\n if \"phonemizer\" in config and config.phonemizer == \"multi_phonemizer\":\n lang_to_phonemizer_name = {}\n for dataset in config.datasets:\n if dataset.language != \"\":\n lang_to_phonemizer_name[dataset.language] = dataset.phonemizer\n else:\n raise ValueError(\"Multi phonemizer requires language to be set for each dataset.\")\n phonemizer = MultiPhonemizer(lang_to_phonemizer_name)\n else:\n phonemizer_kwargs = {\"language\": config.phoneme_language}\n if \"phonemizer\" in config and config.phonemizer:\n phonemizer = get_phonemizer_by_name(config.phonemizer, **phonemizer_kwargs)\n else:\n try:\n phonemizer = get_phonemizer_by_name(\n DEF_LANG_TO_PHONEMIZER[config.phoneme_language], **phonemizer_kwargs\n )\n new_config.phonemizer = phonemizer.name()\n except KeyError as e:\n raise ValueError(\n f\"\"\"No phonemizer found for language {config.phoneme_language}.\n You may need to install a third party library for this language.\"\"\"\n ) from e\n\n return (\n TTSTokenizer(\n config.use_phonemes, text_cleaner, characters, phonemizer, config.add_blank, config.enable_eos_bos_chars\n ),\n new_config,\n )" }, { "identifier": "plot_alignment", "path": "TTS/tts/utils/visual.py", "snippet": "def plot_alignment(alignment, info=None, fig_size=(16, 10), title=None, output_fig=False, plot_log=False):\n if isinstance(alignment, torch.Tensor):\n alignment_ = alignment.detach().cpu().numpy().squeeze()\n else:\n alignment_ = alignment\n alignment_ = alignment_.astype(np.float32) if alignment_.dtype == np.float16 else alignment_\n fig, ax = plt.subplots(figsize=fig_size)\n im = ax.imshow(\n alignment_.T, aspect=\"auto\", origin=\"lower\", interpolation=\"none\", norm=LogNorm() if plot_log else None\n )\n fig.colorbar(im, ax=ax)\n xlabel = \"Decoder timestep\"\n if info is not None:\n xlabel += \"\\n\\n\" + info\n plt.xlabel(xlabel)\n plt.ylabel(\"Encoder timestep\")\n # plt.yticks(range(len(text)), list(text))\n plt.tight_layout()\n if title is not None:\n plt.title(title)\n if not output_fig:\n plt.close()\n return fig" }, { "identifier": "plot_spectrogram", "path": "TTS/tts/utils/visual.py", "snippet": "def plot_spectrogram(spectrogram, ap=None, fig_size=(16, 10), output_fig=False):\n if isinstance(spectrogram, torch.Tensor):\n spectrogram_ = spectrogram.detach().cpu().numpy().squeeze().T\n else:\n spectrogram_ = spectrogram.T\n spectrogram_ = spectrogram_.astype(np.float32) if spectrogram_.dtype == np.float16 else spectrogram_\n if ap is not None:\n spectrogram_ = ap.denormalize(spectrogram_) # pylint: disable=protected-access\n fig = plt.figure(figsize=fig_size)\n plt.imshow(spectrogram_, aspect=\"auto\", origin=\"lower\")\n plt.colorbar()\n plt.tight_layout()\n if not output_fig:\n plt.close()\n return fig" }, { "identifier": "CapacitronOptimizer", "path": "TTS/utils/capacitron_optimizer.py", "snippet": "class CapacitronOptimizer:\n \"\"\"Double optimizer class for the Capacitron model.\"\"\"\n\n def __init__(self, config: dict, model_params: Generator) -> None:\n self.primary_params, self.secondary_params = self.split_model_parameters(model_params)\n\n optimizer_names = list(config.optimizer_params.keys())\n optimizer_parameters = list(config.optimizer_params.values())\n\n self.primary_optimizer = get_optimizer(\n optimizer_names[0],\n optimizer_parameters[0],\n config.lr,\n parameters=self.primary_params,\n )\n\n self.secondary_optimizer = get_optimizer(\n optimizer_names[1],\n self.extract_optimizer_parameters(optimizer_parameters[1]),\n optimizer_parameters[1][\"lr\"],\n parameters=self.secondary_params,\n )\n\n self.param_groups = self.primary_optimizer.param_groups\n\n def first_step(self):\n self.secondary_optimizer.step()\n self.secondary_optimizer.zero_grad()\n self.primary_optimizer.zero_grad()\n\n def step(self):\n # Update param groups to display the correct learning rate\n self.param_groups = self.primary_optimizer.param_groups\n self.primary_optimizer.step()\n\n def zero_grad(self, set_to_none=False):\n self.primary_optimizer.zero_grad(set_to_none)\n self.secondary_optimizer.zero_grad(set_to_none)\n\n def load_state_dict(self, state_dict):\n self.primary_optimizer.load_state_dict(state_dict[0])\n self.secondary_optimizer.load_state_dict(state_dict[1])\n\n def state_dict(self):\n return [self.primary_optimizer.state_dict(), self.secondary_optimizer.state_dict()]\n\n @staticmethod\n def split_model_parameters(model_params: Generator) -> list:\n primary_params = []\n secondary_params = []\n for name, param in model_params:\n if param.requires_grad:\n if name == \"capacitron_vae_layer.beta\":\n secondary_params.append(param)\n else:\n primary_params.append(param)\n return [iter(primary_params), iter(secondary_params)]\n\n @staticmethod\n def extract_optimizer_parameters(params: dict) -> dict:\n \"\"\"Extract parameters that are not the learning rate\"\"\"\n return {k: v for k, v in params.items() if k != \"lr\"}" } ]
from typing import Dict, List, Tuple, Union from torch import nn from torch.cuda.amp.autocast_mode import autocast from trainer.trainer_utils import get_optimizer, get_scheduler from TTS.tts.layers.tacotron.capacitron_layers import CapacitronVAE from TTS.tts.layers.tacotron.gst_layers import GST from TTS.tts.layers.tacotron.tacotron import Decoder, Encoder, PostCBHG from TTS.tts.models.base_tacotron import BaseTacotron from TTS.tts.utils.measures import alignment_diagonal_score from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.text.tokenizer import TTSTokenizer from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.capacitron_optimizer import CapacitronOptimizer from TTS.utils.audio import AudioProcessor import torch
14,140
""" text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] linear_input = batch["linear_input"] stop_targets = batch["stop_targets"] stop_target_lengths = batch["stop_target_lengths"] speaker_ids = batch["speaker_ids"] d_vectors = batch["d_vectors"] aux_input = {"speaker_ids": speaker_ids, "d_vectors": d_vectors} outputs = self.forward(text_input, text_lengths, mel_input, mel_lengths, aux_input) # set the [alignment] lengths wrt reduction factor for guided attention if mel_lengths.max() % self.decoder.r != 0: alignment_lengths = ( mel_lengths + (self.decoder.r - (mel_lengths.max() % self.decoder.r)) ) // self.decoder.r else: alignment_lengths = mel_lengths // self.decoder.r # compute loss with autocast(enabled=False): # use float32 for the criterion loss_dict = criterion( outputs["model_outputs"].float(), outputs["decoder_outputs"].float(), mel_input.float(), linear_input.float(), outputs["stop_tokens"].float(), stop_targets.float(), stop_target_lengths, outputs["capacitron_vae_outputs"] if self.capacitron_vae else None, mel_lengths, None if outputs["decoder_outputs_backward"] is None else outputs["decoder_outputs_backward"].float(), outputs["alignments"].float(), alignment_lengths, None if outputs["alignments_backward"] is None else outputs["alignments_backward"].float(), text_lengths, ) # compute alignment error (the lower the better ) align_error = 1 - alignment_diagonal_score(outputs["alignments"]) loss_dict["align_error"] = align_error return outputs, loss_dict def get_optimizer(self) -> List: if self.use_capacitron_vae: return CapacitronOptimizer(self.config, self.named_parameters()) return get_optimizer(self.config.optimizer, self.config.optimizer_params, self.config.lr, self) def get_scheduler(self, optimizer: object): opt = optimizer.primary_optimizer if self.use_capacitron_vae else optimizer return get_scheduler(self.config.lr_scheduler, self.config.lr_scheduler_params, opt) def before_gradient_clipping(self): if self.use_capacitron_vae: # Capacitron model specific gradient clipping model_params_to_clip = [] for name, param in self.named_parameters(): if param.requires_grad: if name != "capacitron_vae_layer.beta": model_params_to_clip.append(param) torch.nn.utils.clip_grad_norm_(model_params_to_clip, self.capacitron_vae.capacitron_grad_clip) def _create_logs(self, batch, outputs, ap): postnet_outputs = outputs["model_outputs"] decoder_outputs = outputs["decoder_outputs"] alignments = outputs["alignments"] alignments_backward = outputs["alignments_backward"] mel_input = batch["mel_input"] linear_input = batch["linear_input"] pred_linear_spec = postnet_outputs[0].data.cpu().numpy() pred_mel_spec = decoder_outputs[0].data.cpu().numpy() gt_linear_spec = linear_input[0].data.cpu().numpy() gt_mel_spec = mel_input[0].data.cpu().numpy() align_img = alignments[0].data.cpu().numpy() figures = { "pred_linear_spec": plot_spectrogram(pred_linear_spec, ap, output_fig=False), "real_linear_spec": plot_spectrogram(gt_linear_spec, ap, output_fig=False), "pred_mel_spec": plot_spectrogram(pred_mel_spec, ap, output_fig=False), "real_mel_spec": plot_spectrogram(gt_mel_spec, ap, output_fig=False), "alignment": plot_alignment(align_img, output_fig=False), } if self.bidirectional_decoder or self.double_decoder_consistency: figures["alignment_backward"] = plot_alignment(alignments_backward[0].data.cpu().numpy(), output_fig=False) # Sample audio audio = ap.inv_spectrogram(pred_linear_spec.T) return figures, {"audio": audio} def train_log( self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int ) -> None: # pylint: disable=no-self-use figures, audios = self._create_logs(batch, outputs, self.ap) logger.train_figures(steps, figures) logger.train_audios(steps, audios, self.ap.sample_rate) def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: figures, audios = self._create_logs(batch, outputs, self.ap) logger.eval_figures(steps, figures) logger.eval_audios(steps, audios, self.ap.sample_rate) @staticmethod def init_from_config(config: "TacotronConfig", samples: Union[List[List], List[Dict]] = None): """Initiate model from config Args: config (TacotronConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. """ ap = AudioProcessor.init_from_config(config)
# coding: utf-8 class Tacotron(BaseTacotron): """Tacotron as in https://arxiv.org/abs/1703.10135 It's an autoregressive encoder-attention-decoder-postnet architecture. Check `TacotronConfig` for the arguments. Args: config (TacotronConfig): Configuration for the Tacotron model. speaker_manager (SpeakerManager): Speaker manager to handle multi-speaker settings. Only use if the model is a multi-speaker model. Defaults to None. """ def __init__( self, config: "TacotronConfig", ap: "AudioProcessor" = None, tokenizer: "TTSTokenizer" = None, speaker_manager: SpeakerManager = None, ): super().__init__(config, ap, tokenizer, speaker_manager) # pass all config fields to `self` # for fewer code change for key in config: setattr(self, key, config[key]) # set speaker embedding channel size for determining `in_channels` for the connected layers. # `init_multispeaker` needs to be called once more in training to initialize the speaker embedding layer based # on the number of speakers infered from the dataset. if self.use_speaker_embedding or self.use_d_vector_file: self.init_multispeaker(config) self.decoder_in_features += self.embedded_speaker_dim # add speaker embedding dim if self.use_gst: self.decoder_in_features += self.gst.gst_embedding_dim if self.use_capacitron_vae: self.decoder_in_features += self.capacitron_vae.capacitron_VAE_embedding_dim # embedding layer self.embedding = nn.Embedding(self.num_chars, 256, padding_idx=0) self.embedding.weight.data.normal_(0, 0.3) # base model layers self.encoder = Encoder(self.encoder_in_features) self.decoder = Decoder( self.decoder_in_features, self.decoder_output_dim, self.r, self.memory_size, self.attention_type, self.windowing, self.attention_norm, self.prenet_type, self.prenet_dropout, self.use_forward_attn, self.transition_agent, self.forward_attn_mask, self.location_attn, self.attention_heads, self.separate_stopnet, self.max_decoder_steps, ) self.postnet = PostCBHG(self.decoder_output_dim) self.last_linear = nn.Linear(self.postnet.cbhg.gru_features * 2, self.out_channels) # setup prenet dropout self.decoder.prenet.dropout_at_inference = self.prenet_dropout_at_inference # global style token layers if self.gst and self.use_gst: self.gst_layer = GST( num_mel=self.decoder_output_dim, num_heads=self.gst.gst_num_heads, num_style_tokens=self.gst.gst_num_style_tokens, gst_embedding_dim=self.gst.gst_embedding_dim, ) # Capacitron layers if self.capacitron_vae and self.use_capacitron_vae: self.capacitron_vae_layer = CapacitronVAE( num_mel=self.decoder_output_dim, encoder_output_dim=self.encoder_in_features, capacitron_VAE_embedding_dim=self.capacitron_vae.capacitron_VAE_embedding_dim, speaker_embedding_dim=self.embedded_speaker_dim if self.use_speaker_embedding and self.capacitron_vae.capacitron_use_speaker_embedding else None, text_summary_embedding_dim=self.capacitron_vae.capacitron_text_summary_embedding_dim if self.capacitron_vae.capacitron_use_text_summary_embeddings else None, ) # backward pass decoder if self.bidirectional_decoder: self._init_backward_decoder() # setup DDC if self.double_decoder_consistency: self.coarse_decoder = Decoder( self.decoder_in_features, self.decoder_output_dim, self.ddc_r, self.memory_size, self.attention_type, self.windowing, self.attention_norm, self.prenet_type, self.prenet_dropout, self.use_forward_attn, self.transition_agent, self.forward_attn_mask, self.location_attn, self.attention_heads, self.separate_stopnet, self.max_decoder_steps, ) def forward( # pylint: disable=dangerous-default-value self, text, text_lengths, mel_specs=None, mel_lengths=None, aux_input={"speaker_ids": None, "d_vectors": None} ): """ Shapes: text: [B, T_in] text_lengths: [B] mel_specs: [B, T_out, C] mel_lengths: [B] aux_input: 'speaker_ids': [B, 1] and 'd_vectors':[B, C] """ aux_input = self._format_aux_input(aux_input) outputs = {"alignments_backward": None, "decoder_outputs_backward": None} inputs = self.embedding(text) input_mask, output_mask = self.compute_masks(text_lengths, mel_lengths) # B x T_in x encoder_in_features encoder_outputs = self.encoder(inputs) # sequence masking encoder_outputs = encoder_outputs * input_mask.unsqueeze(2).expand_as(encoder_outputs) # global style token if self.gst and self.use_gst: # B x gst_dim encoder_outputs = self.compute_gst(encoder_outputs, mel_specs) # speaker embedding if self.use_speaker_embedding or self.use_d_vector_file: if not self.use_d_vector_file: # B x 1 x speaker_embed_dim embedded_speakers = self.speaker_embedding(aux_input["speaker_ids"])[:, None] else: # B x 1 x speaker_embed_dim embedded_speakers = torch.unsqueeze(aux_input["d_vectors"], 1) encoder_outputs = self._concat_speaker_embedding(encoder_outputs, embedded_speakers) # Capacitron if self.capacitron_vae and self.use_capacitron_vae: # B x capacitron_VAE_embedding_dim encoder_outputs, *capacitron_vae_outputs = self.compute_capacitron_VAE_embedding( encoder_outputs, reference_mel_info=[mel_specs, mel_lengths], text_info=[inputs, text_lengths] if self.capacitron_vae.capacitron_use_text_summary_embeddings else None, speaker_embedding=embedded_speakers if self.capacitron_vae.capacitron_use_speaker_embedding else None, ) else: capacitron_vae_outputs = None # decoder_outputs: B x decoder_in_features x T_out # alignments: B x T_in x encoder_in_features # stop_tokens: B x T_in decoder_outputs, alignments, stop_tokens = self.decoder(encoder_outputs, mel_specs, input_mask) # sequence masking if output_mask is not None: decoder_outputs = decoder_outputs * output_mask.unsqueeze(1).expand_as(decoder_outputs) # B x T_out x decoder_in_features postnet_outputs = self.postnet(decoder_outputs) # sequence masking if output_mask is not None: postnet_outputs = postnet_outputs * output_mask.unsqueeze(2).expand_as(postnet_outputs) # B x T_out x posnet_dim postnet_outputs = self.last_linear(postnet_outputs) # B x T_out x decoder_in_features decoder_outputs = decoder_outputs.transpose(1, 2).contiguous() if self.bidirectional_decoder: decoder_outputs_backward, alignments_backward = self._backward_pass(mel_specs, encoder_outputs, input_mask) outputs["alignments_backward"] = alignments_backward outputs["decoder_outputs_backward"] = decoder_outputs_backward if self.double_decoder_consistency: decoder_outputs_backward, alignments_backward = self._coarse_decoder_pass( mel_specs, encoder_outputs, alignments, input_mask ) outputs["alignments_backward"] = alignments_backward outputs["decoder_outputs_backward"] = decoder_outputs_backward outputs.update( { "model_outputs": postnet_outputs, "decoder_outputs": decoder_outputs, "alignments": alignments, "stop_tokens": stop_tokens, "capacitron_vae_outputs": capacitron_vae_outputs, } ) return outputs @torch.no_grad() def inference(self, text_input, aux_input=None): aux_input = self._format_aux_input(aux_input) inputs = self.embedding(text_input) encoder_outputs = self.encoder(inputs) if self.gst and self.use_gst: # B x gst_dim encoder_outputs = self.compute_gst(encoder_outputs, aux_input["style_mel"], aux_input["d_vectors"]) if self.capacitron_vae and self.use_capacitron_vae: if aux_input["style_text"] is not None: style_text_embedding = self.embedding(aux_input["style_text"]) style_text_length = torch.tensor([style_text_embedding.size(1)], dtype=torch.int64).to( encoder_outputs.device ) # pylint: disable=not-callable reference_mel_length = ( torch.tensor([aux_input["style_mel"].size(1)], dtype=torch.int64).to(encoder_outputs.device) if aux_input["style_mel"] is not None else None ) # pylint: disable=not-callable # B x capacitron_VAE_embedding_dim encoder_outputs, *_ = self.compute_capacitron_VAE_embedding( encoder_outputs, reference_mel_info=[aux_input["style_mel"], reference_mel_length] if aux_input["style_mel"] is not None else None, text_info=[style_text_embedding, style_text_length] if aux_input["style_text"] is not None else None, speaker_embedding=aux_input["d_vectors"] if self.capacitron_vae.capacitron_use_speaker_embedding else None, ) if self.num_speakers > 1: if not self.use_d_vector_file: # B x 1 x speaker_embed_dim embedded_speakers = self.speaker_embedding(aux_input["speaker_ids"]) # reshape embedded_speakers if embedded_speakers.ndim == 1: embedded_speakers = embedded_speakers[None, None, :] elif embedded_speakers.ndim == 2: embedded_speakers = embedded_speakers[None, :] else: # B x 1 x speaker_embed_dim embedded_speakers = torch.unsqueeze(aux_input["d_vectors"], 1) encoder_outputs = self._concat_speaker_embedding(encoder_outputs, embedded_speakers) decoder_outputs, alignments, stop_tokens = self.decoder.inference(encoder_outputs) postnet_outputs = self.postnet(decoder_outputs) postnet_outputs = self.last_linear(postnet_outputs) decoder_outputs = decoder_outputs.transpose(1, 2) outputs = { "model_outputs": postnet_outputs, "decoder_outputs": decoder_outputs, "alignments": alignments, "stop_tokens": stop_tokens, } return outputs def before_backward_pass(self, loss_dict, optimizer) -> None: # Extracting custom training specific operations for capacitron # from the trainer if self.use_capacitron_vae: loss_dict["capacitron_vae_beta_loss"].backward() optimizer.first_step() def train_step(self, batch: Dict, criterion: torch.nn.Module) -> Tuple[Dict, Dict]: """Perform a single training step by fetching the right set of samples from the batch. Args: batch ([Dict]): A dictionary of input tensors. criterion ([torch.nn.Module]): Callable criterion to compute model loss. """ text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] linear_input = batch["linear_input"] stop_targets = batch["stop_targets"] stop_target_lengths = batch["stop_target_lengths"] speaker_ids = batch["speaker_ids"] d_vectors = batch["d_vectors"] aux_input = {"speaker_ids": speaker_ids, "d_vectors": d_vectors} outputs = self.forward(text_input, text_lengths, mel_input, mel_lengths, aux_input) # set the [alignment] lengths wrt reduction factor for guided attention if mel_lengths.max() % self.decoder.r != 0: alignment_lengths = ( mel_lengths + (self.decoder.r - (mel_lengths.max() % self.decoder.r)) ) // self.decoder.r else: alignment_lengths = mel_lengths // self.decoder.r # compute loss with autocast(enabled=False): # use float32 for the criterion loss_dict = criterion( outputs["model_outputs"].float(), outputs["decoder_outputs"].float(), mel_input.float(), linear_input.float(), outputs["stop_tokens"].float(), stop_targets.float(), stop_target_lengths, outputs["capacitron_vae_outputs"] if self.capacitron_vae else None, mel_lengths, None if outputs["decoder_outputs_backward"] is None else outputs["decoder_outputs_backward"].float(), outputs["alignments"].float(), alignment_lengths, None if outputs["alignments_backward"] is None else outputs["alignments_backward"].float(), text_lengths, ) # compute alignment error (the lower the better ) align_error = 1 - alignment_diagonal_score(outputs["alignments"]) loss_dict["align_error"] = align_error return outputs, loss_dict def get_optimizer(self) -> List: if self.use_capacitron_vae: return CapacitronOptimizer(self.config, self.named_parameters()) return get_optimizer(self.config.optimizer, self.config.optimizer_params, self.config.lr, self) def get_scheduler(self, optimizer: object): opt = optimizer.primary_optimizer if self.use_capacitron_vae else optimizer return get_scheduler(self.config.lr_scheduler, self.config.lr_scheduler_params, opt) def before_gradient_clipping(self): if self.use_capacitron_vae: # Capacitron model specific gradient clipping model_params_to_clip = [] for name, param in self.named_parameters(): if param.requires_grad: if name != "capacitron_vae_layer.beta": model_params_to_clip.append(param) torch.nn.utils.clip_grad_norm_(model_params_to_clip, self.capacitron_vae.capacitron_grad_clip) def _create_logs(self, batch, outputs, ap): postnet_outputs = outputs["model_outputs"] decoder_outputs = outputs["decoder_outputs"] alignments = outputs["alignments"] alignments_backward = outputs["alignments_backward"] mel_input = batch["mel_input"] linear_input = batch["linear_input"] pred_linear_spec = postnet_outputs[0].data.cpu().numpy() pred_mel_spec = decoder_outputs[0].data.cpu().numpy() gt_linear_spec = linear_input[0].data.cpu().numpy() gt_mel_spec = mel_input[0].data.cpu().numpy() align_img = alignments[0].data.cpu().numpy() figures = { "pred_linear_spec": plot_spectrogram(pred_linear_spec, ap, output_fig=False), "real_linear_spec": plot_spectrogram(gt_linear_spec, ap, output_fig=False), "pred_mel_spec": plot_spectrogram(pred_mel_spec, ap, output_fig=False), "real_mel_spec": plot_spectrogram(gt_mel_spec, ap, output_fig=False), "alignment": plot_alignment(align_img, output_fig=False), } if self.bidirectional_decoder or self.double_decoder_consistency: figures["alignment_backward"] = plot_alignment(alignments_backward[0].data.cpu().numpy(), output_fig=False) # Sample audio audio = ap.inv_spectrogram(pred_linear_spec.T) return figures, {"audio": audio} def train_log( self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int ) -> None: # pylint: disable=no-self-use figures, audios = self._create_logs(batch, outputs, self.ap) logger.train_figures(steps, figures) logger.train_audios(steps, audios, self.ap.sample_rate) def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: figures, audios = self._create_logs(batch, outputs, self.ap) logger.eval_figures(steps, figures) logger.eval_audios(steps, audios, self.ap.sample_rate) @staticmethod def init_from_config(config: "TacotronConfig", samples: Union[List[List], List[Dict]] = None): """Initiate model from config Args: config (TacotronConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. """ ap = AudioProcessor.init_from_config(config)
tokenizer, new_config = TTSTokenizer.init_from_config(config)
8
2023-11-29 08:15:06+00:00
16k
wenquanlu/HandRefiner
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * (1. - mask) + mask * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,479
assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
2
2023-11-24 10:19:23+00:00
16k
eth-sri/language-model-arithmetic
src/model_arithmetic/model_arithmetic.py
[ { "identifier": "load_model", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_model(dir_or_model, classification=False, token_classification=False, return_tokenizer=False, dtype=torch.bfloat16, load_dtype=True, \n rl=False, peft_config=None):\n \"\"\"\n This function is used to load a model based on several parameters including the type of task it is targeted to perform.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n\n classification (bool): If True, loads the model for sequence classification.\n\n token_classification (bool): If True, loads the model for token classification.\n\n return_tokenizer (bool): If True, returns the tokenizer along with the model.\n\n dtype: The data type that PyTorch should use internally to store the model’s parameters and do the computation.\n\n load_dtype (bool): If False, sets dtype as torch.float32 regardless of the passed dtype value.\n\n rl (bool): If True, loads model specifically designed to be used in reinforcement learning environment.\n\n peft_config: Configuration details for Peft models. \n \n Returns:\n It returns a model for the required task along with its tokenizer, if specified.\n \"\"\"\n log(logger.debug, f\"Loading model for {dir_or_model} with {classification}, {dtype}, {load_dtype}\")\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if not load_dtype:\n dtype = torch.float32\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n\n original_model_name = model_name\n\n if classification:\n model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\") # to investigate: calling torch_dtype here fails.\n elif token_classification:\n model = AutoModelForTokenClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n elif rl:\n model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, \n peft_config=peft_config, device_map=\"auto\")\n else:\n if model_name.endswith(\"GPTQ\") or model_name.endswith(\"GGML\"):\n model = AutoGPTQForCausalLM.from_quantized(model_name,\n use_safetensors=True,\n trust_remote_code=True,\n # use_triton=True, # breaks currently, unfortunately generation time of the GPTQ model is quite slow\n quantize_config=None, device_map=\"auto\")\n else:\n model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n\n if is_lora_dir:\n model = PeftModel.from_pretrained(model, dir_or_model)\n \n try:\n tokenizer = load_tokenizer(original_model_name)\n model.config.pad_token_id = tokenizer.pad_token_id\n except Exception:\n pass\n if return_tokenizer:\n return model, load_tokenizer(original_model_name)\n return model" }, { "identifier": "load_tokenizer", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_tokenizer(dir_or_model):\n \"\"\"\n This function is used to load the tokenizer for a specific pre-trained model.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n \n Returns:\n It returns a tokenizer that can convert text to tokens for the specific model input.\n \"\"\"\n log(logger.debug, f\"Loading tokenizer for {dir_or_model}\")\n\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n \n if os.path.isfile(os.path.join(dir_or_model, \"config.json\")):\n loaded_json = json.load(open(os.path.join(dir_or_model, \"config.json\"), \"r\"))\n model_name = loaded_json[\"_name_or_path\"]\n\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n if tokenizer.pad_token is None:\n log(logger.debug, \"Setting pad token to eos token\")\n tokenizer.pad_token = tokenizer.eos_token\n tokenizer.pad_token_id = tokenizer.eos_token_id\n \n return tokenizer" }, { "identifier": "get_max_length", "path": "src/model_arithmetic/utils.py", "snippet": "def get_max_length(model_config, default_length=1024):\n \"\"\"\n Get the maximum length from the model configuration.\n\n Args:\n model_config (object): The model configuration object.\n default_length (int, optional): The default maximum length. Defaults to 1024.\n\n Returns:\n int: The maximum length.\n \"\"\"\n max_length = None\n for length_setting in [\"n_positions\", \"max_position_embeddings\", \"seq_length\"]:\n max_length = getattr(model_config, length_setting, None)\n if max_length:\n if ENABLE_LOGGING:\n logger.debug(f\"Found max length: {max_length}\")\n break\n if not max_length:\n max_length = default_length\n if ENABLE_LOGGING:\n logger.debug(f\"Using default max length: {max_length}\")\n\n return max_length" }, { "identifier": "ENABLE_LOGGING", "path": "src/model_arithmetic/utils.py", "snippet": "ENABLE_LOGGING = False" }, { "identifier": "log", "path": "src/model_arithmetic/utils.py", "snippet": "def log(function, message):\n \"\"\"\n Logs the given message using the provided function if logging is enabled.\n \n Parameters:\n function (callable): The logging function to use.\n message (str): The message to be logged.\n \"\"\"\n if ENABLE_LOGGING:\n function(message)" }, { "identifier": "Operator", "path": "src/model_arithmetic/operators.py", "snippet": "class Operator(BaseClass):\n def __init__(self, minimum_value=-10 ** 8, **kwargs):\n \"\"\"Initializes an operator with the given keyword arguments.\n\n Args:\n minimum_value (float, optional): The minimum value any element can have: this is important when doing calculations where several logprobs have been made -torch.inf but we still want to do meaningful computations with them.\n **kwargs: The keyword arguments.\n \"\"\"\n super().__init__(**kwargs)\n self.minimum_value = minimum_value\n \n def set_to_minimum(self, output):\n \"\"\"Sets the output to the minimum value if it is smaller than the minimum value.\n\n Args:\n output (List || torch.tensor): List or torch.tensor\n \"\"\"\n if isinstance(output, list):\n for el in range(len(output)):\n if torch.is_tensor(output[el]):\n output[el][output[el] < self.minimum_value] = self.minimum_value\n elif torch.is_tensor(output):\n output[output < self.minimum_value] = self.minimum_value\n return output\n \n def evaluate(self, runnable_operator_outputs : Dict, normalize : bool = True):\n \"\"\"Evaluates the given object in the formula based on the language model outputs\n\n Args:\n runnable_operator_outputs (Dict): Maps Runnable Operators to their outputs\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n\n def clone(self):\n \"\"\"Creates a deep copy of the object.\n\n Returns:\n A deep copy of the object.\n \"\"\"\n return copy.deepcopy(self)\n\n def norm(self, runnable_operator_outputs : Dict = None):\n \"\"\"Returns the norm of the object\n \n Args:\n runnable_operator_outputs (Dict): Maps Runnable Operators to their outputs\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n \n def runnable_operators(self):\n \"\"\"Returns the Runnable Operators in the object\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n\n def is_finished(self, runnable_operator_outputs : Dict) -> bool:\n \"\"\"Returns whether the object is finished\n\n Args:\n runnable_operator_outputs (Dict): Maps Runnable Operators to their outputs\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n\n def normalize(self, output, runnable_operator_outputs : Dict):\n \"\"\"\n Normalizes the output of the operator\n \n Args:\n output (torch.tensor || float): The output of the operator\n runnable_operator_outputs (Dict): The outputs of the runnable operators\n \"\"\"\n norm = self.norm(runnable_operator_outputs)\n if (torch.is_tensor(norm) and torch.count_nonzero(norm == 0) > 0) or (not torch.is_tensor(norm) and norm == 0):\n return output\n if not torch.is_tensor(output):\n return output\n output /= norm\n output -= torch.logsumexp(output, dim=-1, keepdim=True)\n return output\n\n\n def __add__(self, other):\n if isinstance(other, (float, int)):\n return Sum([self, Constant(other)])\n return Sum([self, other])\n\n def __radd__(self, other):\n return self.__add__(other)\n \n def __multiply__(self, other):\n if isinstance(other, (float, int)):\n return Product([self, Constant(other)])\n return Product([self, other])\n\n def __div__(self, other):\n if isinstance(other, (float, int)):\n return Product([self, Constant(1 / other)])\n raise NotImplementedError\n\n def __rdiv__(self, other):\n raise NotImplementedError\n\n def __sub__(self, other):\n return self.__add__(-other)\n\n def __neg__(self):\n return self.__multiply__(-1)\n\n def __rmultiply__(self, other):\n return self.__multiply__(other)\n\n def __mul__(self, other):\n return self.__multiply__(other)\n\n def __rmul__(self, other):\n return self.__multiply__(other)\n\n def __rsub__(self, other):\n self_ = self.__neg__()\n return self_.__add__(other)\n \n def __str__(self):\n return f\"{self.__class__.__name__}({self.kwargs})\"" }, { "identifier": "Monitor", "path": "src/model_arithmetic/monitor.py", "snippet": "class Monitor(MultipleMonitor):\n \"\"\"\n Final monitor object that keeps track of values for runnable operators, but also for the whole formula\n \"\"\"\n def __init__(self, runnable_operators):\n \"\"\"\n Initialize the Monitor object.\n \n Args:\n runnable_operators(List[RunnableOperator]): A list of runnable operators.\n \"\"\"\n super().__init__(models_monitor=ModelsMonitor(runnable_operators))\n \n def pop_results(self, n=1, runnable_operator=None, indicator=None):\n \"\"\"Pop results from the monitor.\n\n Args:\n n (int, optional): Number of elements to pop. Defaults to 1.\n runnable_operator (RunnableOperator, optional): From which ModelMonitor to pop the results. Defaults to None.\n indicator (string, optional): Name of the type to pop. Defaults to None.\n \"\"\"\n if runnable_operator is None:\n super().pop_results(n, indicator=indicator)\n else:\n self.models_monitor.pop_results(n, runnable_operator, indicator=indicator)\n \n def merge(self, other):\n \"\"\"\n Merge the elements of another Monitor object with the elements of this object.\n Args:\n other (Monitor): The other Monitor object.\n \"\"\"\n super().merge(other)\n self.models_monitor.merge(other.models_monitor)\n \n def add_result(self, element, runnable_operator=None, indicator=None):\n \"\"\"\n Add a result to the monitor.\n Args:\n element (float): The result to be added.\n runnable_operator (RunnableOperator): The runnable operator associated with the result.\n indicator (string, optional): The name of the time type.\n \"\"\"\n if runnable_operator is None:\n super().add_result(element, indicator=indicator)\n else:\n self.models_monitor.add_result(element, runnable_operator, indicator=indicator)\n \n def get_store_settings(self):\n \"\"\"\n Gets the store settings of the parent class and the models monitor.\n \"\"\"\n sum_vals = [monitor.total() for monitor in self.models_monitor.monitors.values()]\n if len(sum_vals) > 0:\n total_time_no_model_calls = self.total() - sum(sum_vals)\n else:\n total_time_no_model_calls = self.total()\n\n return {\n **super().get_store_settings(),\n \"total_time_no_model_calls\": total_time_no_model_calls,\n \"models_monitor\": self.models_monitor.get_store_settings()\n }" }, { "identifier": "RunnableOperator", "path": "src/model_arithmetic/runnable_operators.py", "snippet": "class RunnableOperator(Operator):\n def __init__(self, prompt_string=\"\", model=None, speculative_factor=1, \n prompt_template = lambda prompt_string, input_string: prompt_string + input_string, run_priority=0, group=None, \n outputs_logprobs=True, **kwargs):\n \"\"\"\n Initialize a runnable operator instance. A runnable operator is an operator that generates a probability distribution instead of modifies an existing one.\n \n Args:\n prompt_string (str): String to be used as a prompt. Only used in specific runnable operators\n model (optional): Model to be used for operation. If None, the model must be set later to the default model to be used.\n speculative_factor (int): Factor for speculative sampling.\n prompt_template (callable): Function for generating prompt. Takes two arguments: prompt_string and input_string. The operator will be run on prompt_template(..., ...) + continuation_tokens\n run_priority (int): Priority for running the operation. Higher priority means the operation will be run first, especially important for the classifier.\n group (optional): Group to which the operator belongs. This ensures that speculative sampling will not be tried when not all operators of a group are finished.\n outputs_logprobs (bool): Whether the operator outputs logprobs.\n **kwargs: Arbitrary keyword arguments.\n \"\"\"\n super().__init__(speculative_factor=speculative_factor, model=model, prompt_string=prompt_string,\n prompt_template=prompt_template, run_priority=run_priority, group=group, outputs_logprobs=outputs_logprobs, **kwargs)\n self.cache = None\n \n def run_condition(self, new_tokens, trigger_end):\n \"\"\"\n Determine if the run condition is met.\n \n Args:\n new_tokens (List[int]): Number of new tokens per sample in the batch\n trigger_end (List[bool]): Whether to trigger the end for each sample in the batch.\n \n Returns:\n bool: Whether the run condition is met.\n \"\"\"\n new_tokens = [new_tokens[i] if not trigger_end[i] or new_tokens[i] < 0 else max(new_tokens[i], self.speculative_factor) for i in range(len(new_tokens))]\n return np.mean(new_tokens) >= self.speculative_factor \n # other possibility:\n # return np.max(new_tokens) + 1 >= speculative_factor\n \n def delete_cache(self, index=None, from_=None):\n \"\"\"\n Delete the cache.\n \"\"\"\n if from_ is None and index is None:\n self.cache = None\n \n def run(self, tokenized_inputs, **kwargs):\n \"\"\"\n Run the operation. This method needs to be implemented by subclasses.\n \n Args:\n tokenized_inputs (torch.tensor): Inputs that have been tokenized.\n **kwargs: Arbitrary keyword arguments.\n \n Raises:\n NotImplementedError: This method needs to be implemented by subclasses.\n \"\"\"\n raise NotImplementedError(\"This method needs to be implemented by subclasses.\")\n \n def runnable_operators(self):\n \"\"\"\n Get a list of runnable operators used by the operator, usually only this operator itself.\n \n Returns:\n list: List of runnable operators.\n \"\"\"\n return [self]\n \n def same_operator(self, other):\n \"\"\"\n Determine if the other operator is the same as this one. This is important to avoid redundant runs of the same operator in a formula\n \n Args:\n other: Other operator to be compared.\n \n Returns:\n bool: Whether the other operator is the same as this one.\n \"\"\"\n if isinstance(other, str):\n return self.id() == other\n elif isinstance(other, RunnableOperator):\n return self.id() == other.id()\n return False\n\n def norm(self, runnable_operator_outputs=None):\n \"\"\"\n Compute the norm of the operator.\n \n Args:\n runnable_operator_outputs (optional): Outputs of runnable operators.\n \n Returns:\n int: The norm of the operator.\n \"\"\"\n if runnable_operator_outputs is None or self.is_finished(runnable_operator_outputs):\n return 1\n return 0\n \n def is_finished(self, runnable_operator_outputs):\n \"\"\"\n Determine if the operation is finished.\n \n Args:\n runnable_operator_outputs: Outputs of runnable operators.\n \n Returns:\n bool: Whether the operation is finished.\n \"\"\"\n return any([self.same_operator(output) and runnable_operator_outputs[output] is not None for output in runnable_operator_outputs])\n \n def evaluate(self, runnable_operator_outputs : Dict, normalize : bool = True):\n \"\"\"\n Evaluate the operation.\n \n Args:\n runnable_operator_outputs (Dict): Outputs of runnable operators.\n normalize (bool): Whether to normalize the evaluation.\n \n Returns:\n int: The evaluation of the operation.\n \"\"\"\n for output in runnable_operator_outputs:\n if self.same_operator(output) and runnable_operator_outputs[output] is not None:\n return runnable_operator_outputs[output]\n return 0\n \n def generate_settings(self):\n \"\"\"\n Generate settings for the operation.\n \n Returns:\n dict: Settings for the operation.\n \"\"\"\n kwargs = super().generate_settings()\n kwargs[\"prompt_template\"] = self.prompt_template(\"{{prompt_string}}\", \"{{input_string}}\")\n return kwargs\n\n @staticmethod\n def load_from_settings(settings):\n \"\"\"\n Load operator from settings.\n \n Args:\n settings (dict): Settings for the operation.\n \n Returns:\n Operator: Operator loaded from settings.\n \"\"\"\n copy = settings[\"prompt_template\"]\n prompt_template = lambda prompt_string, input_string: copy.replace(\"{{prompt_string}}\", prompt_string).replace(\"{{input_string}}\", input_string)\n settings[\"prompt_template\"] = prompt_template\n return Operator.load_from_settings(settings)\n \n def get_prompt(self, input_string):\n \"\"\"\n Get the prompt for the operation.\n \n Args:\n input_string (str): String to be used as input.\n \n Returns:\n callable: Function for generating prompt.\n \"\"\"\n return self.prompt_template(self.prompt_string, input_string)\n \n def get_store_params(self):\n \"\"\"\n Get parameters for storing the operation.\n \n Returns:\n dict: Parameters for storing the operation.\n \"\"\"\n return {\n \"class\": self.__class__.__name__,\n \"model\": self.model,\n \"speculative_factor\": self.speculative_factor,\n \"prompt_template\": self.prompt_template(self.prompt_string, \"{{input_string}}\")\n }\n \n def id(self):\n \"\"\"\n Get the ID of the operation.\n \n Returns:\n str: ID of the operation.\n \"\"\"\n kwargs = self.kwargs.copy()\n kwargs[\"prompt_template\"] = self.prompt_template(self.prompt_string, \"{{input_string}}\")\n return f\"{self.__class__.__name__}(**{kwargs})\"\n \n def load_model(self, dtype):\n \"\"\"\n Load the model for the operation. Only needs to be overwritten when a model is necessary\n \n Args:\n dtype: Data type for the model.\n \n Returns:\n None\n \"\"\"\n return None\n \n def initialize_after_model_set(self):\n \"\"\"\n Initialize the operation after the model is set (to the default model if necessary).\n \n Raises:\n AssertionError: If the model is not set before initializing.\n \"\"\"\n assert self.model is not None, \"Model must be set before initializing.\"" }, { "identifier": "PromptedLLM", "path": "src/model_arithmetic/runnable_operators.py", "snippet": "class PromptedLLM(RunnableOperator):\n def __init__(self, prompt_string, model=None, speculative_factor=1, \n prompt_template = lambda prompt_string, input_string, : prompt_string + \"\\n\" + input_string, dtype=None, group=None,\n enable_cache=True, dim_keys_past=2, dim_values_past=2, run_eager=False, tokenizer=None, **kwargs):\n \"\"\"\n Initializes an LLM Prompt. This is a runnable operator that uses a language model to generate a probability distribution.\n Args:\n prompt_string (str): String to be used as a prompt. Only used in specific runnable operators\n model (optional): Model to be used for operation. If None, the model must be set later to the default model to be used.\n speculative_factor (int): Factor for speculative sampling.\n prompt_template (callable): Function for generating prompt. Takes two arguments: prompt_string and input_string. The operator will be run on prompt_template(..., ...) + continuation_tokens\n run_priority (int): Priority for running the operation. Higher priority means the operation will be run first, especially important for the classifier.\n dtype (optional): Data type for the model.\n group (optional): Group to which the operator belongs. This ensures that speculative sampling will not be tried when not all operators of a group are finished.\n enable_cache (bool): Whether to enable the key-value cache.\n dim_keys_past (int): Dimension of the keys in the key-value cache. Usually 2, but for other models this can be different.\n dim_values_past (int): Dimension of the values in the key-value cache. Usually 2, but for other models this can be different.\n run_eager (bool): Whether to run the model in eager mode. This is necessary for some models, but incompatible with speculative sampling and some other features.\n tokenizer (Tokenizer): Tokenizer to be used for the operation. If None, the default tokenizer will be used.\n **kwargs: Arbitrary keyword arguments.\n \"\"\"\n if dim_keys_past == 2 and dim_values_past == 2:\n # set the dims based on the model\n if model in [\"tiiuae/falcon-7b\", \"tiiuae/falcon-7b-instruct\", \"tiiuae/falcon-40b\", \"tiiuae/falcon-40b-instruct\"]:\n dim_keys_past = 1\n dim_values_past = 1\n \n super().__init__(prompt_string=prompt_string, model=model, speculative_factor=speculative_factor, \n prompt_template=prompt_template, group=group, enable_cache=enable_cache, \n dim_keys_past=dim_keys_past, dim_values_past=dim_values_past, run_eager=run_eager)\n self.dtype = dtype\n self.tokenizer_length = None\n self.tokenizer = tokenizer\n self.previous_input_ids = None\n self.default_dim = 2\n if self.run_eager:\n log(logger.warning, \"Eager mode is enabled. This will make several features, such as speculative sampling, inaccessible.\")\n \n def load_model(self, dtype):\n \"\"\"\n Loads the model for the operation.\n :param dtype: Data type for the model.\n \"\"\"\n if not isinstance(self.model, str):\n return self.model\n if self.dtype is None:\n return load_model(self.model, dtype=dtype)\n return load_model(self.model, dtype=self.dtype)\n \n def initialize_after_model_set(self):\n if self.tokenizer is None:\n tokenizer = load_tokenizer(self.model)\n self.tokenizer_length = len(tokenizer)\n \n def select_from_sample_cache(self, sample, from_=None, until=None):\n \"\"\"Selects the cache from a sample that needs to be stored\n\n Args:\n sample (torch.tensor): Torch tensor, the samples key-value past as stored by the LLM\n from_ (int, optional): From which value to store the key-value past. Defaults to None.\n until (int, optional): Until which value to store the key-value past. Defaults to None.\n \"\"\"\n for i in range(len(sample)):\n for j in range(len(sample[i])):\n sample[i][j] = sample[i][j][:, from_:until]\n \n return sample\n \n def swap_dimensions(self, sample):\n \"\"\"Swaps dimensions in order to make the dimensions match the default dimensions. This is necessary because models do not use the same semantics for the key-value storage\n\n Args:\n sample (List[torch.tensor]): Key-value past as stored by the LLM\n \"\"\"\n for i in range(len(sample)):\n # keys, values\n if self.default_dim != self.dim_keys_past:\n sample[i][0] = sample[i][0].transpose(self.default_dim - 1, self.dim_keys_past - 1)\n if self.default_dim != self.dim_values_past:\n sample[i][1] = sample[i][1].transpose(self.default_dim - 1, self.dim_values_past - 1)\n \n return sample\n \n def select_sample_cache(self, cache, sample_index):\n \"\"\"Stores the key value past by selecting the sample index from the cache and storing them in a list\n\n Args:\n cache (List[torch.tensor]): Key-value cache as returned by the model\n sample_index (int): Which sample to select\n \"\"\"\n sample = []\n for i in range(len(cache)):\n sample.append([\n cache[i][0][sample_index],\n cache[i][1][sample_index]\n ])\n sample = self.swap_dimensions(sample)\n return sample\n \n def pad_sample(self, sample, target_size):\n \"\"\"Pads all samples key-value cache to a specific size\n\n Args:\n sample (torch.tensor): Key-value cache as stored by the LLM\n target_size (int): Target size\n \"\"\"\n for i in range(len(sample)):\n for j in range(len(sample[i])):\n pad_size = target_size - sample[i][j].size(1)\n pad = (0, 0, pad_size, 0)\n if pad_size > 0:\n sample[i][j] = torch.nn.functional.pad(sample[i][j], pad, \"constant\", 0)\n elif pad_size < 0:\n sample[i][j] = sample[i][j][:, :target_size]\n return sample\n \n def stack_samples(self, samples):\n \"\"\"Stacks the samples key-value cache by removing the List dimension and reordering to be appropriate for storing\n\n Args:\n samples (List[torch.tensor]): Key-value cache as returend by the model\n \"\"\"\n stacked_samples = []\n for i in range(len(samples[0])):\n stacked_mult = []\n for j in range(len(samples[0][i])):\n stacked = torch.stack(\n [samples[k][i][j] for k in range(len(samples))], dim=0\n )\n stacked_mult.append(stacked)\n stacked_samples.append(stacked_mult)\n return stacked_samples\n \n def store_cache(self, past_key_values, input_ids, lengths):\n \"\"\"Stores the past key values returned by the model in an appropriate way\n\n Args:\n past_key_values (List[torch.tensor]): Tensor in which the key values where reutrned\n input_ids (torch.tensor): Input ids\n lengths (List[int]): Length of each sample in the batch\n \"\"\"\n if self.run_eager:\n self.cache = past_key_values\n return\n self.cache = []\n self.previous_input_ids = []\n for i, length in enumerate(lengths):\n self.cache.append(\n self.select_from_sample_cache(self.select_sample_cache(past_key_values, i), from_=-length)\n )\n self.previous_input_ids.append(\n input_ids[i, -length:]\n )\n def common_starting_elements(self, t1, t2):\n \"\"\"Check for the common starting elements in two tensors\n\n Args:\n t1 (torch.tensor): First Tensor\n t2 (torch.tensor): Second Tensor\n \"\"\"\n min_length = min(t1.size(0), t2.size(0))\n eq = torch.eq(t1[:min_length], t2[:min_length])\n if not eq.any():\n return 0\n if eq.all():\n return min_length\n\n return torch.where(eq == 0)[0][0].item()\n \n def delete_previous_cache(self, new_input_ids, lengths):\n \"\"\"Deletes previous cache by only keeping the common elements between the previous input ids and the new input ids\n\n Args:\n new_input_ids (torch.tensor): New input ids\n lengths (List[int]): List of lengths\n \"\"\"\n if self.run_eager:\n return\n input_ids = [\n new_input_ids[i, -lengths[i]:] for i in range(len(lengths))\n ]\n elements = [self.common_starting_elements(input_ids[i], self.previous_input_ids[i]) for i in range(len(lengths))]\n self.cache = [\n self.select_from_sample_cache(self.cache[i], until=elements[i]) for i in range(len(lengths))\n ]\n \n \n def prepare_inputs(self, input_ids, attention_mask, n_new_tokens):\n \"\"\"Prepares the inputs for the model\n\n Args:\n input_ids (torch.tensor): Input ids\n attention_mask (torch.tensor): Attention Mask\n n_new_tokens (int): Number of new tokens since last run\n \"\"\"\n max_new_tokens = max(n_new_tokens)\n past_key_values = None\n if self.cache is not None and self.enable_cache:\n input_ids = input_ids[:, -max_new_tokens:]\n if self.run_eager:\n past_key_values = self.cache\n else:\n past_key_values = self.pad_cache(\n [self.select_from_sample_cache(self.cache[i], until=-max_new_tokens + n_new_tokens[i]) if max_new_tokens > n_new_tokens[i] else self.cache[i]\n for i in range(len(n_new_tokens))],\n attention_mask.shape[1] - max_new_tokens\n )\n return {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"use_cache\": True,\n \"past_key_values\": past_key_values\n }\n \n def pad_cache(self, cache, length):\n \"\"\"Pads the cache and prepares them for the model\n\n Args:\n cache (torch.tensor): Key-value cache as stored by the LLM\n lengths (List[int]): List of lengths\n \"\"\"\n for i in range(len(cache)):\n cache[i] = self.pad_sample(cache[i], length)\n cache[i] = self.swap_dimensions(cache[i])\n stacked_samples = self.stack_samples(cache)\n\n return stacked_samples\n \n def delete_cache(self, index=None, from_=None):\n \"\"\"Deletes all cache\n\n Args:\n index (int, optional): _description_. Defaults to None.\n from_ (int, optional): _description_. Defaults to None.\n \"\"\"\n # if index is not None and self.cache is not None:\n # self.previous_input_ids = self.previous_input_ids[:index] + self.previous_input_ids[index + 1:]\n # cache_shape = list(self.cache[0].shape)\n # device = self.cache[0].device\n # dtype = self.cache[0].dtype\n # cache_shape[-2] = 0\n # self.cache = self.cache[:index] + self.cache[index + 1:]\n # self.previous_input_ids.append(torch.tensor([]))\n # self.cache.append(torch.tensor([], device=device, dtype=dtype).reshape(cache_shape))\n # return\n # else:\n self.previous_input_ids = None\n self.cache = None\n\n def run(self, tokenized_inputs, loaded_models, model_new_tokens, use_cache, **kwargs):\n \"\"\"\n Runs the model on the tokenized inputs.\n Args:\n tokenized_inputs (torch.tensor): Inputs that have been tokenized.\n loaded_models (dict[PreTrainedModel]): Models that have been loaded. The model for this operation is in loaded_models[self.model]\n model_new_tokens (List[int]): Number of new tokens per sample in the batch\n use_cache (bool): Whether to use the key-value cache.\n \"\"\"\n if isinstance(self.model, str):\n model = loaded_models[self.model]\n else:\n model = self.model\n lengths = torch.sum(tokenized_inputs.attention_mask, dim=-1)\n if self.cache is not None and self.enable_cache and use_cache:\n self.delete_previous_cache(tokenized_inputs.input_ids, lengths)\n \n # if self.cache is not None:\n # length_common_input_ids_per_sample = [\n \n # ]\n actual_inputs = self.prepare_inputs(input_ids=tokenized_inputs.input_ids.to(model.device),\n attention_mask=tokenized_inputs.attention_mask.to(model.device),\n n_new_tokens=model_new_tokens)\n # run model \n with torch.no_grad():\n try:\n model_output = model(**actual_inputs, return_dict=True)\n except RuntimeError as e:\n raise RuntimeError(f\"Error thrown when running model. This is probably caused because the model handles the key-value cache differently. Consider setting dim_values_past and dim_keys_past values or disabling the key-value cache. Alternatively, you can set run_eager=True, but this feature is incompatible with speculative sampling and some other features.\")\n logprobs = torch.log_softmax(model_output.logits[:, :, :self.tokenizer_length], dim=-1)\n \n if self.enable_cache and use_cache:\n self.store_cache(model_output.past_key_values, tokenized_inputs.input_ids, lengths)\n \n logprobs = [logprobs[i, -model_new_tokens[i] : ].to(torch.float32) for i in range(logprobs.shape[0])]\n return logprobs\n\n def __str__(self):\n return f\"PromptedLLM('{self.prompt_string}', model='{self.model}')\"" }, { "identifier": "TokenizedInput", "path": "src/model_arithmetic/input.py", "snippet": "class TokenizedInput:\n \"\"\"\n Keeps track of the tokenized input of a runnable operator. Automatically sets the correct tokens, by using the runnable operator's get_prompt method.\n \"\"\"\n def __init__(self, runnable_operator, model_name, model_config, tokenizer):\n \"\"\"\n Initialize the TokenizedInput object.\n\n Args:\n runnable_operator (RunnableOperator): An object that provides a get_prompt method.\n model_name (str): The name of the model.\n model_config (object): The configuration of the model.\n tokenizer (object): The tokenizer to be used.\n \"\"\"\n self.runnable_operator = runnable_operator\n self.input_tokens = []\n self.only_input_tokens = None\n self.tokenizer = tokenizer\n self.max_length = get_max_length(model_config)\n self.set_inputs([\"\"])\n # this is essentially what huggingface also does, but it is kinda hidden in their sample code (GenerationMixin.generate)\n self.tokenizer.padding_side = \"left\"\n \n def extend_batch_size(self, batch_size):\n \"\"\"\n Extend the size of the batch to the given size. If the current size is less than the given size, \n the first element is repeated to fill the batch.\n \n Necessary for compatibility with lm_eval\n\n Args:\n batch_size (int): The desired batch size.\n \"\"\"\n if len(self.input_tokens) != batch_size:\n self.input_tokens = [self.input_tokens[0]] * batch_size\n \n def set_inputs(self, inputs):\n \"\"\"\n Set the inputs for the TokenizedInput object.\n\n Args:\n inputs (list): A list of input strings.\n \"\"\"\n self.input_tokens = [self.runnable_operator.get_prompt(input_string) for input_string in inputs]\n bos_token = \"\"\n if self.tokenizer.bos_token_id is not None:\n self.input_tokens = [\n [self.tokenizer.bos_token_id] + self.tokenizer(input_string, truncation=True, max_length=self.max_length, add_special_tokens=False).input_ids\n for input_string in self.input_tokens\n ]\n bos_token = self.tokenizer.bos_token\n else:\n self.input_tokens = [\n self.tokenizer(input_string, truncation=True, max_length=self.max_length, add_special_tokens=False).input_ids\n for input_string in self.input_tokens\n ]\n \n only_prompt = [bos_token + self.runnable_operator.get_prompt(\"\")]\n self.only_input_tokens = self.tokenizer(only_prompt, padding=True, return_tensors=\"pt\", truncation=True, max_length=self.max_length, add_special_tokens=False)\n \n if \"token_type_ids\" in self.only_input_tokens:\n del self.only_input_tokens[\"token_type_ids\"]\n \n def get_only_input_tokens(self):\n \"\"\"\n Get the input tokens without any continuation tokens.\n\n Returns:\n object: The input tokens without any continuation tokens.\n \"\"\"\n return self.only_input_tokens\n \n def add_continuation_tokens(self, tokens):\n \"\"\"\n Add continuation tokens to the input tokens.\n\n Args:\n tokens (list): A list of continuation tokens.\n\n Returns:\n object: The input tokens with the continuation tokens added.\n \"\"\"\n output = [\n input_token + token for input_token, token in zip(self.input_tokens, tokens)\n ]\n truncated_output = [\n output[:self.max_length] for output in output\n ]\n padded_output = self.tokenizer.pad({\"input_ids\": truncated_output}, padding=True, return_tensors=\"pt\")\n return padded_output" }, { "identifier": "Compatibility", "path": "src/model_arithmetic/lm_eval_compatibility.py", "snippet": "class Compatibility:\n \"\"\"Compatibility class to allow the use of LM eval. Main compatibility issue is that lm eval does not allow to distinguish between the input tokens and the continuation tokens. This class fixes this manually by going\n through the task inputs and finding the one that matches the input tokens.\n \"\"\"\n def __init__(\n self,\n task_name,\n needs_input_tokens_lm_eval,\n tokenizer,\n device,\n max_length,\n ): \n \n \"\"\"Initializes the compatibility class.\n \n Args:\n task_name (str): Name of the task.\n needs_input_tokens_lm_eval (bool): Whether the task needs the input tokens or not. If it does, the program will try to find the input tokens in the task inputs.\n tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase): Tokenizer to be used.\n device (torch.device): Device to be used.\n max_length (int): Maximum length of the input tokens.\n \"\"\"\n self.task_name = task_name\n self.needs_input_tokens_lm_eval = needs_input_tokens_lm_eval\n self.tokenizer = tokenizer\n self.task_inputs = []\n self.device = device\n self.task_initialized = False\n self.max_length = max_length\n \n def initialize_task(self):\n \"\"\"Initializes the task. Looks up all the task inputs and stores them in a list. Gets encoded inputs along with the input length\n \"\"\"\n if self.task_initialized:\n return\n self.task_initialized = True\n self.task_inputs = []\n task = get_task(self.task_name)()\n \n if task.has_test_docs():\n task_doc_func = task.test_docs\n elif task.has_validation_docs():\n task_doc_func = task.validation_docs\n \n dataset = pd.DataFrame(task_doc_func())\n rnd = random.Random()\n rnd.seed(42)\n list_indices = list(range(len(dataset)))\n rnd.shuffle(list_indices)\n dataset = dataset.iloc[list_indices]\n # rnd.shuffle(dataset)\n \n for index in range(len(dataset)):\n doc = dict(dataset.iloc[index])\n ctx = task.fewshot_context(\n doc=doc, num_fewshot=0, rnd=rnd, description=\"\"\n )\n requests = task.construct_requests(doc, ctx)\n input_ = task.doc_to_text(doc)\n input_encoded = self.tokenizer(input_, return_tensors=\"pt\", truncation=True, max_length=self.max_length).input_ids[0]\n for request in requests:\n task_input = self.tokenizer(\"\".join(request.args), return_tensors=\"pt\", truncation=True, max_length=self.max_length).input_ids.to(self.device)[0]\n task_input_length = len(input_encoded)\n # double encoding decoding is necessary for the llama tokenizer (for example, a \"...\" got an extra space in front of it if you don't do this)\n self.task_inputs.append((task_input, len(task_input) - task_input_length, self.tokenizer.decode(task_input[:-1])))\n \n def is_target(self, input_tokens, task_input):\n \"\"\"Checks whether the input tokens are the target tokens starting from the end of the input tokens.\n\n Args:\n input_tokens (torch.tensor): Input tokens\n task_input (torch.tensor): Task Input Tokens\n \"\"\"\n return torch.all(input_tokens[-len(task_input):] == task_input)\n \n def find_in_task(self, input_tokens):\n \"\"\"Finds the input tokens in the task inputs. First does an exact match and then a fuzzy match if the exact match came up empty .\n\n Args:\n input_tokens (torch.tensor): Input Tokens\n \"\"\"\n if not self.task_initialized:\n self.initialize_task()\n \n decoded = self.tokenizer.decode(input_tokens)\n for i in range(len(self.task_inputs)):\n guess = self.task_inputs[i][2]\n if guess in decoded:\n return self.task_inputs[i]\n fuzzes = []\n for i in range(len(self.task_inputs)):\n guess = self.task_inputs[i][2]\n fuzzes.append(fuzz.partial_ratio(guess, decoded))\n\n return self.task_inputs[fuzzes.index(max(fuzzes))]\n \n def forward_preprocessing(self, input_ids, model_input_tokens, **kwargs):\n \"\"\"Implements the main preprocessing step. This is necessary to be able to use lm-evaluation-harness. This function finds the input tokens in the task inputs and then extends the batch size of the model input tokens\n\n Args:\n input_ids (torch.tensor): Input ids\n model_input_tokens (Input): Input classes to be used for the various models in the Model Arithmetic class\n \"\"\"\n ### this is a bit cheeky, but in order to be compatible with lm-evaluation-harness, we need to implement this method\n if not isinstance(input_ids, list):\n continuation_tokens = input_ids.tolist()\n else:\n continuation_tokens = input_ids\n \n # necessary for no context\n if self.needs_input_tokens_lm_eval and get_task is not None:\n inputs = []\n continuation_tokens = []\n for i in range(len(input_ids)):\n task_element = self.find_in_task(input_ids[i])\n if task_element[1] > 1:\n inputs.append(self.tokenizer.decode(input_ids[i][:-task_element[1] + 1]))\n continuation_tokens.append(input_ids[i][-task_element[1] + 1:].tolist())\n else:\n inputs.append(self.tokenizer.decode(input_ids[i]))\n continuation_tokens.append([])\n \n for runnable_operator_id in model_input_tokens:\n model_input_tokens[runnable_operator_id].extend_batch_size(len(continuation_tokens))\n model_input_tokens[runnable_operator_id].set_inputs(inputs)\n else: \n for runnable_operator_id in model_input_tokens:\n model_input_tokens[runnable_operator_id].extend_batch_size(len(continuation_tokens))\n \n return continuation_tokens\n \n def forward_post_processing(self, logprobs, input_shape):\n \"\"\"Does some small post processing steps to make sure the correct shape is returned for the logprobs.\n\n Args:\n logprobs (torch.tensor): Returned logprobs\n input_shape (torch.tensor): The shape of the input tokens\n \"\"\"\n if self.needs_input_tokens_lm_eval:\n if torch.is_tensor(logprobs) and len(logprobs.shape) == 3 and logprobs.shape[1] != input_shape[1]:\n # set the output to the correct shape, by adding zeros in the beggining in the first axis\n logprobs = torch.cat([torch.zeros((logprobs.shape[0], input_shape[1] - logprobs.shape[1], logprobs.shape[2]), device=logprobs.device), logprobs], dim=1)\n \n return logprobs" } ]
from transformers import PreTrainedModel from .basic_model_loader import load_model, load_tokenizer from .utils import get_max_length, ENABLE_LOGGING, log from collections import namedtuple from transformers import top_k_top_p_filtering from loguru import logger from .operators import Operator from .monitor import Monitor from .runnable_operators import RunnableOperator, PromptedLLM from .input import TokenizedInput from .lm_eval_compatibility import Compatibility import json import numpy as np import torch import os import time import random
11,440
class ModelArithmetic(PreTrainedModel): """ Main class for prompt arithmetic. Handles the generation of text based on the formula. """ SAVE_FILE = "prompt_arithmetic.json" _supports_sdpa = True def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, retroactive_operators = [], calculate_statistics=True, needs_input_tokens_lm_eval=False, lm_eval_task=None, tokenizer=None): """Initializes the prompt arithmetic model. Args: formula (Operator): The formula for which generations need to be made. default_model (str, optional): Default model for RunnableOperators that don't have a model associated with them. Defaults to None. dtype (torch.dtype, optional): Dtype of the models to load by default. Defaults to torch.bfloat16. intermediate_argmax (bool, optional): Something unimportant that was tried out, but now deprecated. Defaults to False. epsilon (float, optional): Just some small value. Defaults to 1e-12. retroactive_operators (list, optional): The retroactive operators that need to be applied. Defaults to []. calculate_statistics (bool, optional): Whether or not to calculate some statistics, can be a tad bit expensive. Defaults to True. needs_input_tokens_lm_eval (bool, optional): Whether or not lm eval is used and whether or not the task needs the input tokens. Defaults to False. Only set to true for an lm eval task. lm_eval_task (str, optional): Name of the lm eval task. Defaults to None. tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase, optional): Tokenizer to use. Defaults to None. """ self.formula = formula.clone() self.default_model = default_model self.loaded_models = dict() self.model_prediction_history = [] # keeps track of the RunnableOperators predictions for each token (that hasn't finished computing) self.logprobs_history = [] # keeps track of the current probability distribution for which each token has been drawn self.model_last_token_prediction = [] # keeps track of the last token that has been predicted for each RunnableOperator self.output_type = namedtuple("ModelArithmeticOutput", ["logits", "logprobs_per_model"]) self.intermediate_argmax = intermediate_argmax self.retroactive_operators = retroactive_operators self.calculate_statistics = calculate_statistics self.runnable_operators = [] for runnable_operator in self.formula.runnable_operators(): if not any([runnable_operator.same_operator(output) for output in self.runnable_operators]): self.runnable_operators.append(runnable_operator) # sort the prompts by speculative factor, putting the one with highest speculative factor first # => run model with highest speculative factor first, since otherwise the computation might be wasted for the first ones # however, we first need to sort by run_priority and then within that by speculative factor self.runnable_operators = sorted(self.runnable_operators, key=lambda runnable_operator: (runnable_operator.run_priority, runnable_operator.speculative_factor), reverse=True) self.load_all_models(dtype=dtype) if self.default_model not in self.loaded_models: for runnable_operator in self.runnable_operators: if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None: self.default_model = runnable_operator.model break if self.default_model is None: raise ValueError("Default model must be specified if not specified in an llm prompt") self.config = self.loaded_models[str(self.default_model)].config if tokenizer is None: self.tokenizer = load_tokenizer(self.default_model) else: self.tokenizer = tokenizer self.init_runnable_operators() self.model_input_tokens = {
class ModelArithmetic(PreTrainedModel): """ Main class for prompt arithmetic. Handles the generation of text based on the formula. """ SAVE_FILE = "prompt_arithmetic.json" _supports_sdpa = True def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, retroactive_operators = [], calculate_statistics=True, needs_input_tokens_lm_eval=False, lm_eval_task=None, tokenizer=None): """Initializes the prompt arithmetic model. Args: formula (Operator): The formula for which generations need to be made. default_model (str, optional): Default model for RunnableOperators that don't have a model associated with them. Defaults to None. dtype (torch.dtype, optional): Dtype of the models to load by default. Defaults to torch.bfloat16. intermediate_argmax (bool, optional): Something unimportant that was tried out, but now deprecated. Defaults to False. epsilon (float, optional): Just some small value. Defaults to 1e-12. retroactive_operators (list, optional): The retroactive operators that need to be applied. Defaults to []. calculate_statistics (bool, optional): Whether or not to calculate some statistics, can be a tad bit expensive. Defaults to True. needs_input_tokens_lm_eval (bool, optional): Whether or not lm eval is used and whether or not the task needs the input tokens. Defaults to False. Only set to true for an lm eval task. lm_eval_task (str, optional): Name of the lm eval task. Defaults to None. tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase, optional): Tokenizer to use. Defaults to None. """ self.formula = formula.clone() self.default_model = default_model self.loaded_models = dict() self.model_prediction_history = [] # keeps track of the RunnableOperators predictions for each token (that hasn't finished computing) self.logprobs_history = [] # keeps track of the current probability distribution for which each token has been drawn self.model_last_token_prediction = [] # keeps track of the last token that has been predicted for each RunnableOperator self.output_type = namedtuple("ModelArithmeticOutput", ["logits", "logprobs_per_model"]) self.intermediate_argmax = intermediate_argmax self.retroactive_operators = retroactive_operators self.calculate_statistics = calculate_statistics self.runnable_operators = [] for runnable_operator in self.formula.runnable_operators(): if not any([runnable_operator.same_operator(output) for output in self.runnable_operators]): self.runnable_operators.append(runnable_operator) # sort the prompts by speculative factor, putting the one with highest speculative factor first # => run model with highest speculative factor first, since otherwise the computation might be wasted for the first ones # however, we first need to sort by run_priority and then within that by speculative factor self.runnable_operators = sorted(self.runnable_operators, key=lambda runnable_operator: (runnable_operator.run_priority, runnable_operator.speculative_factor), reverse=True) self.load_all_models(dtype=dtype) if self.default_model not in self.loaded_models: for runnable_operator in self.runnable_operators: if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None: self.default_model = runnable_operator.model break if self.default_model is None: raise ValueError("Default model must be specified if not specified in an llm prompt") self.config = self.loaded_models[str(self.default_model)].config if tokenizer is None: self.tokenizer = load_tokenizer(self.default_model) else: self.tokenizer = tokenizer self.init_runnable_operators() self.model_input_tokens = {
runnable_operator.id(): TokenizedInput(runnable_operator,
9
2023-11-21 20:01:08+00:00
16k
huang-yh/SelfOcc
model/encoder/tpvformer/tpvformer_encoder.py
[ { "identifier": "BaseEncoder", "path": "model/encoder/base_encoder.py", "snippet": "class BaseEncoder(BaseModule):\n \"\"\"Further encode 3D representations.\n image backbone -> neck -> lifter -> encoder -> segmentor\n \"\"\"\n\n def __init__(self, init_cfg=None, **kwargs):\n super().__init__(init_cfg)\n \n def forward(\n self, \n representation,\n ms_img_feats=None,\n metas=None,\n **kwargs\n ):\n pass" }, { "identifier": "point_sampling", "path": "model/encoder/bevformer/utils.py", "snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef point_sampling(reference_points, img_metas):\n reference_points = reference_points.float()\n\n lidar2img = []\n for img_meta in img_metas:\n lidar2img.append(img_meta['lidar2img'])\n if isinstance(lidar2img[0], (np.ndarray, list)):\n lidar2img = np.asarray(lidar2img)\n lidar2img = reference_points.new_tensor(lidar2img) # (B, N, 4, 4)\n else:\n lidar2img = torch.stack(lidar2img, dim=0)\n\n reference_points = torch.cat(\n (reference_points, torch.ones_like(reference_points[..., :1])), -1)\n\n reference_points = reference_points.permute(1, 0, 2, 3)\n D, B, num_query = reference_points.size()[:3]\n num_cam = lidar2img.size(1)\n\n reference_points = reference_points.view(\n D, B, 1, num_query, 4, 1)\n\n lidar2img = lidar2img.view(\n 1, B, num_cam, 1, 4, 4)\n\n reference_points_cam = torch.matmul(\n lidar2img.to(torch.float32),\n reference_points.to(torch.float32)).squeeze(-1)\n \n eps = 1e-5\n\n # reference_points_cam[..., 0:2] = reference_points_cam[..., 0:2] * \\\n # img_metas[0]['scale_rate']\n \n if 'img_augmentation' in img_metas[0] and \\\n 'post_rots' in img_metas[0]['img_augmentation'] and \\\n 'post_trans' in img_metas[0]['img_augmentation']:\n post_rots = []\n post_trans = []\n for img_meta in img_metas:\n post_rots.append(img_meta['img_augmentation']['post_rots'].numpy())\n post_trans.append(img_meta['img_augmentation']['post_trans'].numpy())\n post_rots = np.asarray(post_rots)\n post_trans = np.asarray(post_trans)\n post_rots = reference_points.new_tensor(post_rots)\n post_trans = reference_points.new_tensor(post_trans)\n\n reference_points_cam[..., :2] = reference_points_cam[..., :2] / torch.maximum(\n reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps)\n \n # D, B, N, Q, 3, 1\n reference_points_cam = reference_points_cam[..., :3].unsqueeze(-1)\n post_rots = post_rots.view(1, B, num_cam, 1, 3, 3)\n reference_points_cam = torch.matmul(\n post_rots.to(torch.float32),\n reference_points_cam.to(torch.float32)).squeeze(-1)\n # D, B, N, Q, 3\n post_trans = post_trans.view(1, B, num_cam, 1, 3)\n reference_points_cam = reference_points_cam + post_trans\n tpv_mask = (reference_points_cam[..., 2:3] > eps) \n reference_points_cam = reference_points_cam[..., :2]\n else:\n tpv_mask = (reference_points_cam[..., 2:3] > eps)\n reference_points_cam = reference_points_cam[..., 0:2] / torch.maximum(\n reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps)\n\n # reference_points_cam[..., 0] /= img_metas[0]['img_shape'][0][1]\n # reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0][0]\n\n reference_points_cam[..., 0] /= img_metas[0]['img_shape'][1]\n reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0] # D, B, N, Q, 2\n\n tpv_mask = (tpv_mask & (reference_points_cam[..., 1:2] > 0.0)\n & (reference_points_cam[..., 1:2] < 1.0)\n & (reference_points_cam[..., 0:1] < 1.0)\n & (reference_points_cam[..., 0:1] > 0.0))\n\n tpv_mask = torch.nan_to_num(tpv_mask)\n\n reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) # N, B, Q, D, 2\n tpv_mask = tpv_mask.permute(2, 1, 3, 0, 4).squeeze(-1)\n\n if 'focal_ratios_x' in img_metas[0]:\n scales_x = np.asarray(img_metas[0]['focal_ratios_x'])\n scales_x = reference_points.new_tensor(scales_x).view(-1, 1, 1, 1, 1)\n reference_points_cam[..., :1] = reference_points_cam[..., :1] * scales_x\n scales_y = np.asarray(img_metas[0]['focal_ratios_y'])\n scales_y = reference_points.new_tensor(scales_y).view(-1, 1, 1, 1, 1)\n reference_points_cam[..., 1:] = reference_points_cam[..., 1:] * scales_y\n\n return reference_points_cam, tpv_mask" }, { "identifier": "get_cross_view_ref_points", "path": "model/encoder/tpvformer/utils.py", "snippet": "def get_cross_view_ref_points(tpv_h, tpv_w, tpv_z, num_points_in_pillar, offset=0):\n # ref points generating target: (#query)hw+zh+wz, (#level)3, #p, 2\n # generate points for hw and level 1\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n h_ranges = h_ranges.unsqueeze(-1).expand(-1, tpv_w).flatten()\n w_ranges = w_ranges.unsqueeze(0).expand(tpv_h, -1).flatten()\n hw_hw = torch.stack([w_ranges, h_ranges], dim=-1) # hw, 2\n hw_hw = hw_hw.unsqueeze(1).expand(-1, num_points_in_pillar[2], -1) # hw, #p, 2\n # generate points for hw and level 2\n z_ranges = torch.linspace(offset, tpv_z-1+offset, num_points_in_pillar[2]) / tpv_z # #p\n z_ranges = z_ranges.unsqueeze(0).expand(tpv_h*tpv_w, -1) # hw, #p\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n h_ranges = h_ranges.reshape(-1, 1, 1).expand(-1, tpv_w, num_points_in_pillar[2]).flatten(0, 1)\n hw_zh = torch.stack([h_ranges, z_ranges], dim=-1) # hw, #p, 2\n # generate points for hw and level 3\n z_ranges = torch.linspace(offset, tpv_z-1+offset, num_points_in_pillar[2]) / tpv_z # #p\n z_ranges = z_ranges.unsqueeze(0).expand(tpv_h*tpv_w, -1) # hw, #p\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n w_ranges = w_ranges.reshape(1, -1, 1).expand(tpv_h, -1, num_points_in_pillar[2]).flatten(0, 1)\n hw_wz = torch.stack([z_ranges, w_ranges], dim=-1) # hw, #p, 2\n \n # generate points for zh and level 1\n w_ranges = torch.linspace(offset, tpv_w-1+offset, num_points_in_pillar[1]) / tpv_w\n w_ranges = w_ranges.unsqueeze(0).expand(tpv_z*tpv_h, -1)\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n h_ranges = h_ranges.reshape(1, -1, 1).expand(tpv_z, -1, num_points_in_pillar[1]).flatten(0, 1)\n zh_hw = torch.stack([w_ranges, h_ranges], dim=-1)\n # generate points for zh and level 2\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(-1, 1, 1).expand(-1, tpv_h, num_points_in_pillar[1]).flatten(0, 1)\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n h_ranges = h_ranges.reshape(1, -1, 1).expand(tpv_z, -1, num_points_in_pillar[1]).flatten(0, 1)\n zh_zh = torch.stack([h_ranges, z_ranges], dim=-1) # zh, #p, 2\n # generate points for zh and level 3\n w_ranges = torch.linspace(offset, tpv_w-1+offset, num_points_in_pillar[1]) / tpv_w\n w_ranges = w_ranges.unsqueeze(0).expand(tpv_z*tpv_h, -1)\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(-1, 1, 1).expand(-1, tpv_h, num_points_in_pillar[1]).flatten(0, 1)\n zh_wz = torch.stack([z_ranges, w_ranges], dim=-1)\n\n # generate points for wz and level 1\n h_ranges = torch.linspace(offset, tpv_h-1+offset, num_points_in_pillar[0]) / tpv_h\n h_ranges = h_ranges.unsqueeze(0).expand(tpv_w*tpv_z, -1)\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n w_ranges = w_ranges.reshape(-1, 1, 1).expand(-1, tpv_z, num_points_in_pillar[0]).flatten(0, 1)\n wz_hw = torch.stack([w_ranges, h_ranges], dim=-1)\n # generate points for wz and level 2\n h_ranges = torch.linspace(offset, tpv_h-1+offset, num_points_in_pillar[0]) / tpv_h\n h_ranges = h_ranges.unsqueeze(0).expand(tpv_w*tpv_z, -1)\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(1, -1, 1).expand(tpv_w, -1, num_points_in_pillar[0]).flatten(0, 1)\n wz_zh = torch.stack([h_ranges, z_ranges], dim=-1)\n # generate points for wz and level 3\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n w_ranges = w_ranges.reshape(-1, 1, 1).expand(-1, tpv_z, num_points_in_pillar[0]).flatten(0, 1)\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(1, -1, 1).expand(tpv_w, -1, num_points_in_pillar[0]).flatten(0, 1)\n wz_wz = torch.stack([z_ranges, w_ranges], dim=-1)\n\n reference_points = torch.cat([\n torch.stack([hw_hw, hw_zh, hw_wz], dim=1),\n torch.stack([zh_hw, zh_zh, zh_wz], dim=1),\n torch.stack([wz_hw, wz_zh, wz_wz], dim=1)\n ], dim=0) # hw+zh+wz, 3, #p, 2\n \n return reference_points" }, { "identifier": "GridMeterMapping", "path": "model/encoder/bevformer/mappings.py", "snippet": "class GridMeterMapping:\n\n def __init__(\n self,\n nonlinear_mode: Literal['linear_upscale', 'linear'] = 'linear_upscale',\n h_size=[128, 32],\n h_range=[51.2, 28.8],\n h_half=False,\n w_size=[128, 32],\n w_range=[51.2, 28.8],\n w_half=False,\n d_size=[20, 10],\n d_range=[-4.0, 4.0, 12.0]\n ) -> None:\n self.nonlinear_mode = nonlinear_mode\n if nonlinear_mode == 'linear_upscale':\n assert all([h == w for h, w in zip(h_size, w_size)])\n assert all([h == w for h, w in zip(h_range, w_range)])\n assert (not h_half) and (not w_half)\n self.mapping = NonLinearMapping(\n h_size[0],\n h_size[1],\n h_range[0],\n h_range[1],\n d_size[0],\n d_size[1],\n d_range)\n self.size_h = self.size_w = self.mapping.bev_size\n self.size_d = self.mapping.z_size\n elif nonlinear_mode == 'linear':\n self.mapping = LinearMapping(\n h_size,\n h_range,\n h_half,\n w_size,\n w_range,\n w_half,\n d_size,\n d_range)\n self.size_h = self.mapping.h_tot_len\n self.size_w = self.mapping.w_tot_len\n self.size_d = self.mapping.d_tot_len\n self.grid2meter = self.mapping.grid2meter\n self.meter2grid = self.mapping.meter2grid" }, { "identifier": "BEVCrossAttention", "path": "model/encoder/bevformer/attention/image_cross_attention.py", "snippet": "class BEVCrossAttention(BaseModule):\r\n \"\"\"\r\n Image cross-attention in TPVFormer. Enable every tpv query to interact with its corresponding \r\n area on the image feature plane.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims=256,\r\n num_cams=6,\r\n dropout=0.1,\r\n init_cfg=None,\r\n batch_first=True,\r\n deformable_attention=dict(\r\n type='MSDeformableAttention3D',\r\n embed_dims=256,\r\n num_levels=4),\r\n **kwargs):\r\n super().__init__(init_cfg)\r\n\r\n self.init_cfg = init_cfg\r\n self.dropout = nn.Dropout(dropout)\r\n self.deformable_attention = build_attention(deformable_attention)\r\n self.embed_dims = embed_dims\r\n self.num_cams = num_cams\r\n self.output_proj = nn.Linear(embed_dims, embed_dims)\r\n self.batch_first = batch_first\r\n self.init_weight()\r\n\r\n def init_weight(self):\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n\r\n # @force_fp32(apply_to=('query', 'key', 'value', 'reference_points_cams'))\r\n # @torch.cuda.amp.autocast(enabled=False)\r\n def forward(self,\r\n query,\r\n key,\r\n value,\r\n residual=None,\r\n spatial_shapes=None,\r\n reference_points_cams=None,\r\n bev_masks=None,\r\n level_start_index=None,\r\n **kwargs):\r\n \"\"\"Forward Function of Detr3DCrossAtten.\r\n Args:\r\n query (Tensor): Query of Transformer with shape\r\n (bs, num_query, embed_dims).\r\n key (Tensor): The key tensor with shape\r\n (bs, num_key, embed_dims).\r\n value (Tensor): The value tensor with shape\r\n (bs, num_key, embed_dims).\r\n residual (Tensor): The tensor used for addition, with the\r\n same shape as `x`. Default None. If None, `x` will be used.\r\n spatial_shapes (Tensor): Spatial shape of features in\r\n different level. With shape (num_levels, 2),\r\n last dimension represent (h, w).\r\n level_start_index (Tensor): The start index of each level.\r\n A tensor has shape (num_levels) and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n Returns:\r\n Tensor: forwarded results with shape [num_query, bs, embed_dims].\r\n \"\"\"\r\n if key is None:\r\n key = query\r\n if value is None:\r\n value = key\r\n\r\n if residual is None:\r\n residual = query \r\n bs, num_query, _ = query.size()\r\n\r\n slots = torch.zeros_like(query)\r\n # indexeses = []\r\n # max_lens = []\r\n # queries_rebatches = []\r\n # reference_points_rebatches = []\r\n # for tpv_idx, tpv_mask in enumerate(tpv_masks):\r\n indexes = []\r\n for _, mask_per_img in enumerate(bev_masks):\r\n index_query_per_img = mask_per_img[0].sum(-1).nonzero().squeeze(-1)\r\n indexes.append(index_query_per_img)\r\n max_len = max([len(each) for each in indexes])\r\n # max_lens.append(max_len)\r\n # indexeses.append(indexes)\r\n\r\n reference_points_cam = reference_points_cams\r\n D = reference_points_cam.size(3)\r\n\r\n queries_rebatch = query.new_zeros(\r\n [bs * self.num_cams, max_len, self.embed_dims])\r\n reference_points_rebatch = reference_points_cam.new_zeros(\r\n [bs * self.num_cams, max_len, D, 2])\r\n\r\n for i, reference_points_per_img in enumerate(reference_points_cam):\r\n for j in range(bs):\r\n index_query_per_img = indexes[i]\r\n queries_rebatch[j * self.num_cams + i, :len(index_query_per_img)] = query[j, index_query_per_img]\r\n reference_points_rebatch[j * self.num_cams + i, :len(index_query_per_img)] = reference_points_per_img[j, index_query_per_img]\r\n \r\n # queries_rebatches.append(queries_rebatch)\r\n # reference_points_rebatches.append(reference_points_rebatch)\r\n\r\n num_cams, l, bs, embed_dims = key.shape\r\n\r\n key = key.permute(2, 0, 1, 3).reshape(\r\n self.num_cams * bs, l, self.embed_dims)\r\n value = value.permute(2, 0, 1, 3).reshape(\r\n self.num_cams * bs, l, self.embed_dims)\r\n\r\n query = self.deformable_attention(\r\n query=queries_rebatch, key=key, value=value,\r\n reference_points=reference_points_rebatch, \r\n spatial_shapes=spatial_shapes,\r\n level_start_index=level_start_index,)\r\n \r\n # for tpv_idx, indexes in enumerate(indexeses):\r\n for i, index_query_per_img in enumerate(indexes):\r\n for j in range(bs):\r\n slots[j, index_query_per_img] += query[j * self.num_cams + i, :len(index_query_per_img)]\r\n\r\n count = bev_masks.sum(-1) > 0\r\n count = count.permute(1, 2, 0).sum(-1)\r\n count = torch.clamp(count, min=1.0)\r\n slots = slots / count[..., None]\r\n slots = self.output_proj(slots)\r\n\r\n return self.dropout(slots) + residual\r" }, { "identifier": "BEVDeformableAttention", "path": "model/encoder/bevformer/attention/image_cross_attention.py", "snippet": "class BEVDeformableAttention(BaseModule):\r\n \"\"\"An attention module used in Deformable-Detr.\r\n\r\n `Deformable DETR: Deformable Transformers for End-to-End Object Detection.\r\n <https://arxiv.org/pdf/2010.04159.pdf>`_.\r\n\r\n Args:\r\n embed_dims (int): The embedding dimension of Attention.\r\n Default: 256.\r\n num_heads (int): Parallel attention heads. Default: 8.\r\n num_levels (int): The number of feature map used in\r\n Attention. Default: 4.\r\n num_points (int): The number of sampling points for\r\n each query in each head. Default: 4.\r\n im2col_step (int): The step used in image_to_column.\r\n Default: 64.\r\n dropout (float): A Dropout layer on `inp_identity`.\r\n Default: 0.1.\r\n batch_first (bool): Key, Query and Value are shape of\r\n (batch, n, embed_dim)\r\n or (n, batch, embed_dim). Default to False.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: None.\r\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\r\n Default: None.\r\n value_proj_ratio (float): The expansion ratio of value_proj.\r\n Default: 1.0.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims: int = 256,\r\n num_heads: int = 8,\r\n num_levels: int = 4,\r\n num_points: int = 4,\r\n im2col_step: int = 64,\r\n dropout: float = 0.1,\r\n batch_first: bool = False,\r\n norm_cfg: Optional[dict] = None,\r\n init_cfg: Optional[mmengine.ConfigDict] = None,\r\n value_proj_ratio: float = 1.0):\r\n super().__init__(init_cfg)\r\n if embed_dims % num_heads != 0:\r\n raise ValueError(f'embed_dims must be divisible by num_heads, '\r\n f'but got {embed_dims} and {num_heads}')\r\n dim_per_head = embed_dims // num_heads\r\n self.norm_cfg = norm_cfg\r\n self.batch_first = batch_first\r\n\r\n # you'd better set dim_per_head to a power of 2\r\n # which is more efficient in the CUDA implementation\r\n def _is_power_of_2(n):\r\n if (not isinstance(n, int)) or (n < 0):\r\n raise ValueError(\r\n 'invalid input for _is_power_of_2: {} (type: {})'.format(\r\n n, type(n)))\r\n return (n & (n - 1) == 0) and n != 0\r\n\r\n if not _is_power_of_2(dim_per_head):\r\n warnings.warn(\r\n \"You'd better set embed_dims in \"\r\n 'MultiScaleDeformAttention to make '\r\n 'the dimension of each attention head a power of 2 '\r\n 'which is more efficient in our CUDA implementation.')\r\n\r\n self.im2col_step = im2col_step\r\n self.embed_dims = embed_dims\r\n self.num_levels = num_levels\r\n self.num_heads = num_heads\r\n self.num_points = num_points\r\n self.sampling_offsets = nn.Linear(\r\n embed_dims, num_heads * num_levels * num_points * 2)\r\n self.attention_weights = nn.Linear(embed_dims,\r\n num_heads * num_levels * num_points)\r\n value_proj_size = int(embed_dims * value_proj_ratio)\r\n self.value_proj = nn.Linear(embed_dims, value_proj_size)\r\n self.init_weights()\r\n\r\n def init_weights(self) -> None:\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n constant_init(self.sampling_offsets, 0.)\r\n device = next(self.parameters()).device\r\n thetas = torch.arange(\r\n self.num_heads, dtype=torch.float32,\r\n device=device) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels, self.num_points, 1)\r\n # for i in range(self.num_points):\r\n # grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n self._is_init = True\r\n\r\n @no_type_check\r\n def forward(self,\r\n query: torch.Tensor,\r\n key: Optional[torch.Tensor] = None,\r\n value: Optional[torch.Tensor] = None,\r\n identity: Optional[torch.Tensor] = None,\r\n query_pos: Optional[torch.Tensor] = None,\r\n key_padding_mask: Optional[torch.Tensor] = None,\r\n reference_points: Optional[torch.Tensor] = None,\r\n spatial_shapes: Optional[torch.Tensor] = None,\r\n level_start_index: Optional[torch.Tensor] = None,\r\n **kwargs) -> torch.Tensor:\r\n \"\"\"Forward Function of MultiScaleDeformAttention.\r\n\r\n Args:\r\n query (torch.Tensor): Query of Transformer with shape\r\n (num_query, bs, embed_dims).\r\n key (torch.Tensor): The key tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n value (torch.Tensor): The value tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n identity (torch.Tensor): The tensor used for addition, with the\r\n same shape as `query`. Default None. If None,\r\n `query` will be used.\r\n query_pos (torch.Tensor): The positional encoding for `query`.\r\n Default: None.\r\n key_padding_mask (torch.Tensor): ByteTensor for `query`, with\r\n shape [bs, num_key].\r\n reference_points (torch.Tensor): The normalized reference\r\n points with shape (bs, num_query, num_levels, 2),\r\n all elements is range in [0, 1], top-left (0,0),\r\n bottom-right (1, 1), including padding area.\r\n or (N, Length_{query}, num_levels, 4), add\r\n additional two dimensions is (w, h) to\r\n form reference boxes.\r\n spatial_shapes (torch.Tensor): Spatial shape of features in\r\n different levels. With shape (num_levels, 2),\r\n last dimension represents (h, w).\r\n level_start_index (torch.Tensor): The start index of each level.\r\n A tensor has shape ``(num_levels, )`` and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n\r\n Returns:\r\n torch.Tensor: forwarded results with shape\r\n [num_query, bs, embed_dims].\r\n \"\"\"\r\n\r\n if value is None:\r\n value = query\r\n\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n\r\n bs, num_query, _ = query.shape\r\n bs, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n\r\n value = self.value_proj(value)\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n value = value.view(bs, num_value, self.num_heads, -1)\r\n sampling_offsets = self.sampling_offsets(query).view(\r\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_levels,\r\n self.num_points)\r\n if reference_points.shape[-1] == 2:\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n sampling_locations = reference_points[:, :, None, None, :, :] \\\r\n + sampling_offsets \\\r\n / offset_normalizer[None, None, None, :, None, :]\r\n elif reference_points.shape[-1] == 4:\r\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\r\n + sampling_offsets / self.num_points \\\r\n * reference_points[:, :, None, :, None, 2:] \\\r\n * 0.5\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n if ((IS_CUDA_AVAILABLE and value.is_cuda)\r\n or (IS_MLU_AVAILABLE and value.is_mlu)):\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n\r\n if not self.batch_first:\r\n # (num_query, bs ,embed_dims)\r\n output = output.permute(1, 0, 2)\r\n\r\n return output\r" }, { "identifier": "TPVCrossAttention", "path": "model/encoder/tpvformer/attention/image_cross_attention.py", "snippet": "class TPVCrossAttention(BaseModule):\r\n\r\n def __init__(\r\n self,\r\n embed_dims=256,\r\n num_cams=6,\r\n dropout=0.1, \r\n init_cfg=None,\r\n batch_first=True,\r\n num_heads=16,\r\n num_levels=4,\r\n num_points=[64, 64, 8]):\r\n super().__init__(init_cfg)\r\n\r\n deformable_attn_config_hw = dict(\r\n type='BEVCrossAttention',\r\n embed_dims=embed_dims,\r\n num_cams=num_cams,\r\n dropout=dropout,\r\n batch_first=batch_first,\r\n deformable_attention=dict(\r\n type='BEVDeformableAttention',\r\n embed_dims=embed_dims,\r\n num_heads=num_heads,\r\n num_levels=num_levels,\r\n num_points=num_points[2],\r\n dropout=dropout,\r\n batch_first=batch_first))\r\n self.attn_hw = build_attention(deformable_attn_config_hw)\r\n\r\n deformable_attn_config_zh = dict(\r\n type='BEVCrossAttention',\r\n embed_dims=embed_dims,\r\n num_cams=num_cams,\r\n dropout=dropout,\r\n batch_first=batch_first,\r\n deformable_attention=dict(\r\n type='BEVDeformableAttention',\r\n embed_dims=embed_dims,\r\n num_heads=num_heads,\r\n num_levels=num_levels,\r\n num_points=num_points[1],\r\n dropout=dropout,\r\n batch_first=batch_first))\r\n self.attn_zh = build_attention(deformable_attn_config_zh)\r\n \r\n deformable_attn_config_wz = dict(\r\n type='BEVCrossAttention',\r\n embed_dims=embed_dims,\r\n num_cams=num_cams,\r\n dropout=dropout,\r\n batch_first=batch_first,\r\n deformable_attention=dict(\r\n type='BEVDeformableAttention',\r\n embed_dims=embed_dims,\r\n num_heads=num_heads,\r\n num_levels=num_levels,\r\n num_points=num_points[0],\r\n dropout=dropout,\r\n batch_first=batch_first))\r\n self.attn_wz = build_attention(deformable_attn_config_wz)\r\n self.attns = [self.attn_hw, self.attn_zh, self.attn_wz]\r\n\r\n def forward(self,\r\n query,\r\n key,\r\n value,\r\n residual=None,\r\n spatial_shapes=None,\r\n reference_points_cams=None,\r\n tpv_masks=None,\r\n level_start_index=None,\r\n **kwargs):\r\n result = []\r\n\r\n for i in range(3):\r\n out = self.attns[i](\r\n query[i],\r\n key,\r\n value,\r\n residual[i] if residual is not None else None,\r\n spatial_shapes=spatial_shapes,\r\n level_start_index=level_start_index,\r\n reference_points_cams=reference_points_cams[i],\r\n bev_masks=tpv_masks[i])\r\n result.append(out)\r\n\r\n return result\r" }, { "identifier": "CrossViewHybridAttention", "path": "model/encoder/tpvformer/attention/cross_view_hybrid_attention.py", "snippet": "class CrossViewHybridAttention(MultiScaleDeformableAttention):\n\n @no_type_check\n @deprecated_api_warning({'residual': 'identity'},\n cls_name='MultiScaleDeformableAttention')\n def forward(self,\n query: torch.Tensor,\n key: Optional[torch.Tensor] = None,\n value: Optional[torch.Tensor] = None,\n identity: Optional[torch.Tensor] = None,\n query_pos: Optional[torch.Tensor] = None,\n key_padding_mask: Optional[torch.Tensor] = None,\n reference_points: Optional[torch.Tensor] = None,\n spatial_shapes: Optional[torch.Tensor] = None,\n level_start_index: Optional[torch.Tensor] = None,\n **kwargs) -> torch.Tensor:\n \"\"\"Forward Function of MultiScaleDeformAttention.\n\n Args:\n query (torch.Tensor): Query of Transformer with shape\n (num_query, bs, embed_dims).\n key (torch.Tensor): The key tensor with shape\n `(num_key, bs, embed_dims)`.\n value (torch.Tensor): The value tensor with shape\n `(num_key, bs, embed_dims)`.\n identity (torch.Tensor): The tensor used for addition, with the\n same shape as `query`. Default None. If None,\n `query` will be used.\n query_pos (torch.Tensor): The positional encoding for `query`.\n Default: None.\n key_padding_mask (torch.Tensor): ByteTensor for `query`, with\n shape [bs, num_key].\n reference_points (torch.Tensor): The normalized reference\n points with shape (bs, num_query, num_levels, 2),\n all elements is range in [0, 1], top-left (0,0),\n bottom-right (1, 1), including padding area.\n or (N, Length_{query}, num_levels, 4), add\n additional two dimensions is (w, h) to\n form reference boxes.\n spatial_shapes (torch.Tensor): Spatial shape of features in\n different levels. With shape (num_levels, 2),\n last dimension represents (h, w).\n level_start_index (torch.Tensor): The start index of each level.\n A tensor has shape ``(num_levels, )`` and can be represented\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n\n Returns:\n torch.Tensor: forwarded results with shape\n [num_query, bs, embed_dims].\n \"\"\"\n\n if value is None:\n value = query\n\n if identity is None:\n identity = query\n if query_pos is not None:\n query = query + query_pos\n if not self.batch_first:\n # change to (bs, num_query ,embed_dims)\n query = query.permute(1, 0, 2)\n value = value.permute(1, 0, 2)\n\n bs, num_query, _ = query.shape\n bs, num_value, _ = value.shape\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\n\n value = self.value_proj(value)\n if key_padding_mask is not None:\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\n value = value.view(bs, num_value, self.num_heads, -1)\n sampling_offsets = self.sampling_offsets(query).view(\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\n attention_weights = self.attention_weights(query).view(\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\n attention_weights = attention_weights.softmax(-1)\n\n attention_weights = attention_weights.view(bs, num_query,\n self.num_heads,\n self.num_levels,\n self.num_points)\n if reference_points.shape[-1] == 2:\n offset_normalizer = torch.stack(\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\n ### changed here\n sampling_locations = reference_points[:, :, None, :, :, :] \\\n + sampling_offsets \\\n / offset_normalizer[None, None, None, :, None, :]\n elif reference_points.shape[-1] == 4:\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\n + sampling_offsets / self.num_points \\\n * reference_points[:, :, None, :, None, 2:] \\\n * 0.5\n else:\n raise ValueError(\n f'Last dim of reference_points must be'\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\n if ((IS_CUDA_AVAILABLE and value.is_cuda)\n or (IS_MLU_AVAILABLE and value.is_mlu)):\n output = MultiScaleDeformableAttnFunction.apply(\n value, spatial_shapes, level_start_index, sampling_locations,\n attention_weights, self.im2col_step)\n else:\n output = multi_scale_deformable_attn_pytorch(\n value, spatial_shapes, sampling_locations, attention_weights)\n\n output = self.output_proj(output)\n\n if not self.batch_first:\n # (num_query, bs ,embed_dims)\n output = output.permute(1, 0, 2)\n\n return self.dropout(output) + identity" }, { "identifier": "CameraAwareSE", "path": "model/encoder/tpvformer/modules/camera_se_net.py", "snippet": "class CameraAwareSE(nn.Module):\n\n def __init__(\n self,\n in_channels=96,\n mid_channels=192,\n out_channles=96):\n super().__init__()\n self.in_channels = in_channels\n self.mid_channels = mid_channels\n self.out_channels = out_channles\n self._init_layers()\n\n def _init_layers(self):\n self.bn = nn.BatchNorm1d(16)\n self.context_mlp = Mlp(16, self.mid_channels, self.mid_channels)\n self.context_se = SELayer(self.mid_channels) # NOTE: add camera-aware\n self.context_conv = nn.Conv2d(self.mid_channels,\n self.out_channels,\n kernel_size=1,\n stride=1,\n padding=0)\n \n if self.in_channels == self.mid_channels:\n self.reduce_conv = nn.Identity()\n else:\n self.reduce_conv = nn.Sequential(\n nn.Conv2d(self.in_channels,\n self.mid_channels,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(self.mid_channels),\n nn.ReLU(inplace=True))\n \n def init_weight(self):\n # nn.init.zeros_(self.context_se.conv_expand.weight)\n # nn.init.constant_(self.context_se.conv_expand.bias, 10.0)\n nn.init.zeros_(self.context_mlp.fc2.weight)\n nn.init.constant_(self.context_mlp.fc2.bias, 10.0)\n\n def forward(self, ms_img_feats, metas):\n intrins, sensor2ego = [], []\n for meta in metas:\n intrins.append(meta['intrinsic'])\n sensor2ego.append(meta['cam2ego'])\n intrins = np.asarray(intrins)\n intrins = ms_img_feats[0].new_tensor(intrins) # bs, N, 4, 4\n sensor2ego = np.asarray(sensor2ego)\n sensor2ego = ms_img_feats[0].new_tensor(sensor2ego)[..., :3, :]\n\n batch_size = intrins.shape[0]\n num_cams = intrins.shape[1]\n mlp_input = torch.cat(\n [\n torch.stack(\n [\n intrins[..., 0, 0],\n intrins[..., 1, 1],\n intrins[..., 0, 2],\n intrins[..., 1, 2],\n ],\n dim=-1,\n ),\n sensor2ego.view(batch_size, num_cams, -1),\n ],\n -1,\n ) # bs, N, 16\n mlp_input = self.bn(mlp_input.reshape(-1, mlp_input.shape[-1]))\n context_se = self.context_mlp(mlp_input)[..., None, None] # bs*N, c, 1, 1\n context_se = torch.sigmoid(context_se)\n\n outputs = []\n for i_scale, img_feats in enumerate(ms_img_feats):\n img_feats = self.reduce_conv(img_feats.flatten(0, 1)) # bs*N, c, h, w\n img_feats = self.context_se(img_feats, context_se)\n img_feats = self.context_conv(img_feats)\n outputs.append(img_feats.unflatten(0, (batch_size, num_cams)))\n\n return outputs" } ]
from mmseg.registry import MODELS from mmcv.cnn.bricks.transformer import build_positional_encoding, build_transformer_layer from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention from mmengine.model import ModuleList from torch.nn.init import normal_ from mmengine.logging import MMLogger from ..base_encoder import BaseEncoder from ..bevformer.utils import point_sampling from .utils import get_cross_view_ref_points from ..bevformer.mappings import GridMeterMapping from ..bevformer.attention import BEVCrossAttention, BEVDeformableAttention from .attention import TPVCrossAttention, CrossViewHybridAttention from .modules import CameraAwareSE import torch.nn as nn, torch, copy
11,884
self.camera_aware = camera_aware if camera_aware: if camera_aware_mid_channels is None: camera_aware_mid_channels = embed_dims self.camera_se_net = CameraAwareSE( embed_dims, camera_aware_mid_channels, embed_dims) self.mapping = GridMeterMapping( # bev_inner, # bev_outer, # range_inner, # range_outer, # nonlinear_mode, # z_inner, # z_outer, # z_ranges **mapping_args) size_h = self.mapping.size_h size_w = self.mapping.size_w size_d = self.mapping.size_d hw_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(-1).expand(-1, size_w), torch.arange(size_w, dtype=torch.float).unsqueeze(0).expand(size_h, -1), torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False) cross_view_ref_points = get_cross_view_ref_points(size_h, size_w, size_d, num_points_self) self.register_buffer('cross_view_ref_points', cross_view_ref_points, False) # hw_grid_normed = hw_grid[..., [0, 1]].clone() # hw_grid_normed[..., 0] = hw_grid_normed[..., 0] / (size_h - 1) # hw_grid_normed[..., 1] = hw_grid_normed[..., 1] / (size_w - 1) # zh_grid_normed = zh_grid[..., [2, 0]].clone() # zh_grid_normed[..., 0] = zh_grid_normed[..., 0] / (size_d - 1) # zh_grid_normed[..., 1] = zh_grid_normed[..., 1] / (size_h - 1) # wz_grid_normed = wz_grid[..., [1, 2]].clone() # wz_grid_normed[..., 0] = wz_grid_normed[..., 0] / (size_w - 1) # wz_grid_normed[..., 1] = wz_grid_normed[..., 1] / (size_d - 1) # self.register_buffer('ref_2d_hw', hw_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_zh', zh_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_wz', wz_grid_normed, False) # H, W, 2 def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules():
logger = MMLogger.get_instance('selfocc') @MODELS.register_module() class TPVFormerEncoder(BaseEncoder): def __init__( self, mapping_args: dict, # bev_inner=128, # bev_outer=32, # range_inner=51.2, # range_outer=51.2, # nonlinear_mode='linear_upscale', # z_inner=20, # z_outer=10, # z_ranges=[-5.0, 3.0, 11.0], embed_dims=128, num_cams=6, num_feature_levels=4, positional_encoding=None, num_points_cross=[64, 64, 8], num_points_self=[16, 16, 16], transformerlayers=None, num_layers=None, camera_aware=False, camera_aware_mid_channels=None, init_cfg=None): super().__init__(init_cfg) # self.bev_inner = bev_inner # self.bev_outer = bev_outer # self.range_inner = range_inner # self.range_outer = range_outer # assert nonlinear_mode == 'linear_upscale' # TODO # self.nonlinear_mode = nonlinear_mode # self.z_inner = z_inner # self.z_outer = z_outer # self.z_ranges = z_ranges self.embed_dims = embed_dims self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.camera_aware = camera_aware if camera_aware: if camera_aware_mid_channels is None: camera_aware_mid_channels = embed_dims self.camera_se_net = CameraAwareSE( embed_dims, camera_aware_mid_channels, embed_dims) self.mapping = GridMeterMapping( # bev_inner, # bev_outer, # range_inner, # range_outer, # nonlinear_mode, # z_inner, # z_outer, # z_ranges **mapping_args) size_h = self.mapping.size_h size_w = self.mapping.size_w size_d = self.mapping.size_d hw_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(-1).expand(-1, size_w), torch.arange(size_w, dtype=torch.float).unsqueeze(0).expand(size_h, -1), torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False) cross_view_ref_points = get_cross_view_ref_points(size_h, size_w, size_d, num_points_self) self.register_buffer('cross_view_ref_points', cross_view_ref_points, False) # hw_grid_normed = hw_grid[..., [0, 1]].clone() # hw_grid_normed[..., 0] = hw_grid_normed[..., 0] / (size_h - 1) # hw_grid_normed[..., 1] = hw_grid_normed[..., 1] / (size_w - 1) # zh_grid_normed = zh_grid[..., [2, 0]].clone() # zh_grid_normed[..., 0] = zh_grid_normed[..., 0] / (size_d - 1) # zh_grid_normed[..., 1] = zh_grid_normed[..., 1] / (size_h - 1) # wz_grid_normed = wz_grid[..., [1, 2]].clone() # wz_grid_normed[..., 0] = wz_grid_normed[..., 0] / (size_w - 1) # wz_grid_normed[..., 1] = wz_grid_normed[..., 1] / (size_d - 1) # self.register_buffer('ref_2d_hw', hw_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_zh', zh_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_wz', wz_grid_normed, False) # H, W, 2 def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules():
if isinstance(m, BEVCrossAttention) or \
4
2023-11-20 12:49:14+00:00
16k
MobileTeleSystems/CoolGraph
cool_graph/runners.py
[ { "identifier": "RawDataProcessor", "path": "cool_graph/data/data_processor.py", "snippet": "class RawDataProcessor:\n \"\"\"\n Preprocessing datasets.\n\n Args:\n groups_names (Dict[int, str]): Name of groups in nodes.\n group_names_node_features (Dict[str, List[str]]): Name of features in groups in nodes.\n mon_nodes_path (str): path to nodes\n mon_edges_path (str): path to edges\n mon_labels_path (str): path to labels\n edge_index_cols (List[str]): columns of edge index in dataset\n label_index_col (str): columns of label index in dataset\n label_mask_col (str): mask of label columns\n read_edge_attr (bool): is set True - read edge features. Default to True.\n group_mask_col (str): Mask for group in data. Default to None.\n features_edges_names (List[str]): List of features on edge. Default to None.\n label_cols (List[str]): List of label columns. Default to None.\n target_names (List[str]): List of target names. Default to None.\n \"\"\"\n\n @staticmethod\n def _check_cols_in_parquet(columns: List[str], path: str) -> bool:\n \"\"\"Cheking colomns in parquet files.\n\n Args:\n columns (List[str]): columns of dataset\n path (str): path to dataset\n\n Raises:\n ValueError: if there is no any files with parquet extension\n ValueError: if there is no path with parquet extension\n\n Returns:\n bool: True if columns and path are right\n \"\"\"\n if columns:\n set_cols = set(columns if type(columns) == list else [columns])\n try:\n parquet_file = [path] if path.endswith(\".parquet\") else []\n parquet_file = (\n parquet_file\n + glob.glob(os.path.join(path, \"*.parquet\"), recursive=True)\n + glob.glob(os.path.join(path, \"**/*.parquet\"), recursive=True)\n )\n parquet_file = parquet_file[0]\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Couldn't find any files with parquet extension in {path}\\n\n Original exception: \\n\n {str(ex)}\n \"\"\"\n )\n pqt_cols = set(pq.read_schema(parquet_file).names)\n if not set_cols.issubset(pqt_cols):\n diff = set_cols - pqt_cols\n raise ValueError(\n f\"\"\"\n \"{'\", \"'.join(diff)}\" were not found in {path}\n \"\"\"\n )\n return True\n\n def __init__(\n self,\n groups_names: Dict[int, str],\n group_names_node_features: Dict[str, List[str]],\n mon_nodes_path: str,\n mon_edges_path: str,\n mon_labels_path: str,\n edge_index_cols: List[str],\n label_index_col: str,\n label_mask_col: Optional[str] = None,\n read_edge_attr: bool = True,\n group_mask_col: Optional[str] = None,\n features_edges_names: Optional[List[str]] = None,\n label_cols: Optional[List[str]] = None,\n target_names: Optional[List[str]] = None,\n ) -> None:\n self._check_cols_in_parquet(group_mask_col, mon_nodes_path)\n self._check_cols_in_parquet(label_cols, mon_labels_path)\n self._check_cols_in_parquet([label_mask_col], mon_labels_path)\n self._check_cols_in_parquet([label_index_col], mon_labels_path)\n\n for key, val in group_names_node_features.items():\n try:\n self._check_cols_in_parquet(val, mon_nodes_path)\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n {str(ex)} for group {key} aka {groups_names[key]}\n \"\"\"\n )\n\n df_node_feats = pq.read_table(mon_nodes_path).to_pandas()\n df_labels = pq.read_table(mon_labels_path, columns=label_cols).to_pandas()\n df_edge_index = pq.read_table(\n mon_edges_path, columns=edge_index_cols\n ).to_pandas()\n\n # Nodes\n node_features = torch.FloatTensor(df_node_feats.values)\n group_mask = torch.IntTensor(df_node_feats[group_mask_col].values)\n node_features_names_fixed = df_node_feats.columns.tolist()\n\n # Labels\n df_labels.set_index(label_index_col, inplace=True)\n df_labels.sort_index(inplace=True)\n df_labels.reset_index(inplace=True)\n targets = {t: torch.LongTensor(df_labels[t].values) for t in target_names}\n label_mask = torch.BoolTensor(df_labels[label_mask_col].values)\n index = torch.LongTensor(df_labels[label_index_col].values)\n\n try:\n df_node_feats.shape[0] == df_labels.shape[0]\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Length of features must be equal to the length of labels.\n \"\"\"\n )\n\n # Edges\n edge_index = torch.LongTensor(df_edge_index.values).T\n\n # Nodes\n self.node_features = node_features\n self.group_mask = group_mask\n self.targets = targets\n self.label_mask = label_mask\n self.index = index\n self.edge_index = edge_index\n\n # Edge features\n if read_edge_attr:\n df_edge_feats = pq.read_table(\n mon_edges_path, columns=features_edges_names\n ).to_pandas()\n\n self.edge_features = torch.FloatTensor(df_edge_feats.values)\n self.edge_features_names = df_edge_feats.columns.tolist()\n else:\n self.edge_features = None\n self.edge_features_names = None\n\n self.read_edge_attr = read_edge_attr\n\n # Mappings\n inverse = {v: k for k, v in groups_names.items()}\n self.group_indices_node_findex = {\n inverse[key]: [node_features_names_fixed.index(f) for f in value]\n for key, value in group_names_node_features.items()\n }\n self.groups_names = groups_names\n\n def sample_data(\n self, num_neighbors: int, batch_size: int, seed: int = 0\n ) -> Dict[str, List[torch.utils.data.DataLoader]]:\n \"\"\"Samling data.\n\n Args:\n num_neighbors (int): Number of neighbors are sampled for each node in each iteration.\n batch_size (int): Numbers of samples per batch to load.\n seed (int, optional): Number of seed of samples. Defaults to 0.\n\n Returns:\n Dict[str, List[torch.utils.data.DataLoader]]: Sampled data.\n \"\"\"\n\n return create_loaders(\n self.node_features,\n self.edge_features,\n self.edge_index,\n self.read_edge_attr,\n num_neighbors,\n batch_size,\n self.group_mask,\n self.group_indices_node_findex,\n self.groups_names,\n self.label_mask,\n self.index,\n targets=self.targets,\n )" }, { "identifier": "get_auto_batch_size", "path": "cool_graph/data/batch.py", "snippet": "def get_auto_batch_size(\n groups_num_features: List[int],\n conv_type: Optional[Literal[\"NNConv\", \"GraphConv\"]] = None,\n conv1_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n conv2_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n conv3_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n n_hops: Optional[int] = None,\n lin_prep_size_common: Optional[int] = None,\n lin_prep_sizes: Optional[List[int]] = None,\n edge_attr_repr_sizes: Optional[List[int]] = None,\n num_edge_features: Optional[int] = None,\n device: str = \"cuda:0\",\n num_neighbors: Optional[List[int]] = None,\n) -> int:\n \"\"\"\n Аutomatic batch size calculation.\n Depending on model size and free GPU memory.\n\n Args:\n groups_num_features (List[int]): Number of feats in groups on nodes.\n conv_type (Literal[NNConv, GraphConv]): Model type\n conv1_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 1. Defaults to None.\n conv2_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 2. Defaults to None.\n conv3_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 3. Defaults to None.\n n_hops (int): Hop with neighbors. Defaults to None.\n lin_prep_size_common (int): Size of linear layer (in). Defaults to None.\n lin_prep_sizes (int): Size of linear layer (out). Defaults to None.\n edge_attr_repr_sizes (List[int]): Size of layer of edges attributes. Defaults to None.\n num_edge_features (int): Number of feats on edges. Defaults to None.\n device (str): The current GPU memory usage. Defaults to \"cuda:0\".\n num_neighbors (List[int]): Number of neighbors are sampled for each node in each iteration. Defaults to None.\n\n Returns:\n batch_size (int): Numbers of samples per batch to load.\n \"\"\"\n if lin_prep_sizes is None:\n lin_prep_sizes = []\n if device is None:\n device = \"cuda:0\"\n\n hop1_size = sum(conv1_aggrs.values())\n hop2_size = sum(conv2_aggrs.values()) if n_hops >= 2 else 0\n hop3_size = sum(conv3_aggrs.values()) if n_hops == 3 else 0\n\n max_size_node = max(\n *groups_num_features,\n lin_prep_size_common,\n *lin_prep_sizes,\n hop1_size,\n hop2_size,\n hop3_size,\n )\n\n max_size_edge = 0\n if conv_type == \"NNConv\":\n max_size_edge = max(\n *edge_attr_repr_sizes,\n num_edge_features,\n )\n\n max_size = max_size_node + max_size_edge * 1.5\n\n try:\n all([n != -1 for n in num_neighbors])\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Found -1, Need to know max neighbors per hop.\n \"\"\"\n )\n m_neighbors = np.prod(num_neighbors)\n\n free_memory = torch.cuda.mem_get_info(device=device)[0] / (1024**3) # GB\n\n floats_per_node_ = 320000\n batch_size_ = 250\n memory_reserved_max_ = 3.8\n\n batch_size = (\n 0.5\n * batch_size_\n * floats_per_node_\n / (m_neighbors * max_size)\n * (free_memory / memory_reserved_max_)\n )\n\n if conv_type == \"NNConv\":\n batch_size /= edge_attr_repr_sizes[-1] * 4\n\n batch_size = int(batch_size)\n\n return batch_size" }, { "identifier": "create_loaders", "path": "cool_graph/data/loaders.py", "snippet": "def create_loaders(\n data: Data = None,\n node_features: torch.FloatTensor = None,\n edge_features: torch.FloatTensor = None,\n edge_index: torch.LongTensor = None,\n read_edge_attr: bool = None,\n num_neighbors: List[int] = None,\n batch_size: int = None,\n group_mask: torch.LongTensor = None,\n groups_features: Dict[int, List[int]] = None,\n groups_names: Dict[int, str] = None,\n label_mask: torch.BoolTensor = None,\n index: torch.LongTensor = None,\n targets: Dict[str, torch.Tensor] = None,\n input_nodes: Optional[List] = None,\n node_feature_indices: Optional[List] = None,\n unique_groups: Optional[int] = None,\n) -> List[torch.utils.data.DataLoader]:\n \"\"\"\n Creating list loaders.\n\n Args:\n node_features (torch.FloatTensor): features on nodes on FloatTensor\n edge_features (torch.FloatTensor): features on edge on FloatTensor\n edge_index (torch.LongTensor): edge indices\n read_edge_attr (bool): if set True - read edge features.\n num_neighbors (List[int]): Number of neighbors are sampled for each node in each iteration.\n batch_size (int): Numbers of samples per batch to load.\n group_mask (torch.LongTensor): Mask for groups in nodes.\n groups_features (Dict[int, List[int]]): Features in groups in nodes.\n groups_names (Dict[int, str]): Name of featutes in groups in nodes.\n label_mask (torch.BoolTensor): Mask for label.\n index (torch.LongTensor): index\n targets (Dict[str, torch.Tensor]): Labels.\n\n Returns:\n List[torch.utils.data.DataLoader]: Created DataLoader object. https://pytorch.org/docs/stable/data.html\n \"\"\"\n unique_groups = np.unique(group_mask)\n try:\n set(unique_groups).issubset(set(groups_features.keys()))\n except Exception as ex:\n raise ValueError(\n f\"\"\"Group mask values should be a subset of feature groups keys\"\"\"\n )\n\n try:\n set(groups_features).issubset(set(groups_names.keys()))\n except Exception as ex:\n raise ValueError(\n f\"\"\"Feature groups keys should be a subset of feature_groups_names\"\"\"\n )\n if data is None:\n data = Data(\n x=node_features,\n edge_index=edge_index,\n edge_attr=edge_features if read_edge_attr else None,\n group_mask=group_mask,\n label_mask=label_mask,\n index=index,\n **targets,\n )\n input_nodes = torch.nonzero(label_mask)[:, 0]\n\n loader = NeighborLoader(\n data,\n num_neighbors=num_neighbors,\n batch_size=batch_size,\n shuffle=True,\n input_nodes=input_nodes,\n )\n\n list_loader = []\n for sampled_data in tqdm(loader, desc=\"Sample data\"):\n sampled_data.label_mask[sampled_data.batch_size :] = False\n\n for group in unique_groups:\n name = groups_names[group]\n mask = sampled_data.group_mask == group\n features = groups_features[group]\n setattr(sampled_data, name, sampled_data.x[mask][:, features])\n\n del sampled_data.x\n\n list_loader.append(sampled_data)\n\n return list_loader" }, { "identifier": "setup_mlflow_from_config", "path": "cool_graph/logging/mlflow_logging.py", "snippet": "def setup_mlflow_from_config(config: Dict) -> None:\n \"\"\"\n Setup mlflow using logging.mlflow section of a config\n \"\"\"\n\n if config.get(\"MLFLOW_DISABLE_INSECURE_REQUEST_WARNING\", False):\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n for key, value in config.items():\n os.environ[key] = str(value)\n\n mlflow.set_tracking_uri(config.get(\"MLFLOW_TRACKING_URI\"))" }, { "identifier": "model_params_to_trial_params", "path": "cool_graph/parameter_search/example_objective.py", "snippet": "def model_params_to_trial_params(\n **model_params: Dict[str, Union[Literal[str], int, float, List, Dict]]\n) -> Dict[str, Union[Literal[str], int, float, List, Dict]]:\n \"\"\"\n Convert readable model_params to trial_params\n for example to run study.enqueue_trial(trial_params)\n \"\"\"\n trial = {}\n trial[\"activation\"] = model_params[\"activation\"]\n trial[\"lin_prep_len\"] = model_params[\"lin_prep_len\"]\n trial[\"lin_prep_dropout_rate\"] = model_params[\"lin_prep_dropout_rate\"]\n trial[\"lin_prep_weight_norm_flag\"] = model_params[\"lin_prep_weight_norm_flag\"]\n last_size = model_params[\"lin_prep_size_common\"]\n trial[\"lin_prep_size_common\"] = last_size\n for i in range(model_params[\"lin_prep_len\"]):\n trial[f\"lin_prep_size{i}_fraction\"] = np.clip(\n model_params[\"lin_prep_sizes\"][i] / last_size, 0.2, 1.0\n )\n last_size = model_params[\"lin_prep_sizes\"][i]\n\n trial[\"conv1_aggrs_mean_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"mean\"] / last_size, 0.1, 1.0\n )\n trial[\"conv1_aggrs_max_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"max\"] / last_size, 0.05, 0.7\n )\n trial[\"conv1_aggrs_add_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"add\"] / last_size, 0.05, 0.7\n )\n\n trial[\"conv1_dropout_rate\"] = model_params[\"conv1_dropout_rate\"]\n\n if model_params[\"n_hops\"] == 2:\n last_size = sum(model_params[\"conv1_aggrs\"].values())\n\n trial[\"conv2_aggrs_mean_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"mean\"] / last_size, 0.1, 0.7\n )\n trial[\"conv2_aggrs_max_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"max\"] / last_size, 0.05, 0.5\n )\n trial[\"conv2_aggrs_add_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"add\"] / last_size, 0.05, 0.5\n )\n\n trial[\"conv2_dropout_rate\"] = model_params[\"conv2_dropout_rate\"]\n\n if model_params[\"conv_type\"] == \"GraphConv\":\n trial[\"graph_conv_weight_norm_flag\"] = model_params[\n \"graph_conv_weight_norm_flag\"\n ]\n\n if model_params[\"conv_type\"] == \"NNConv\":\n trial[\"edge_attr_repr_len\"] = model_params[\"edge_attr_repr_len\"]\n for i in range(model_params[\"edge_attr_repr_len\"] - 1):\n if i == 0:\n trial[f\"edge_attr_repr_size{i}\"] = model_params[\"edge_attr_repr_sizes\"][\n i\n ]\n\n else:\n trial[f\"edge_attr_repr_size{i}_fraction\"] = np.clip(\n model_params[\"edge_attr_repr_sizes\"][i]\n / model_params[\"edge_attr_repr_sizes\"][i - 1],\n 0.2,\n 1.0,\n )\n\n trial[\"edge_attr_repr_size_last\"] = model_params[\"edge_attr_repr_sizes\"][-1]\n\n trial[\"edge_attr_repr_dropout_rate\"] = model_params[\n \"edge_attr_repr_dropout_rate\"\n ]\n\n trial[\"edge_attr_repr_last_dropout_rate_zero\"] = (\n model_params[\"edge_attr_repr_last_dropout_rate\"] == 0\n )\n if not trial[\"edge_attr_repr_last_dropout_rate_zero\"]:\n trial[\"edge_attr_repr_last_dropout_rate\"] = model_params[\n \"edge_attr_repr_last_dropout_rate\"\n ]\n\n trial[\"edge_attr_repr_weight_norm_flag\"] = model_params[\n \"edge_attr_repr_weight_norm_flag\"\n ]\n\n return trial" }, { "identifier": "sample_model_params", "path": "cool_graph/parameter_search/example_objective.py", "snippet": "def sample_model_params(trial: optuna.Trial, conv_type: str = \"GraphConv\") -> Dict:\n params = {}\n params[\"conv_type\"] = conv_type\n params[\"activation\"] = trial.suggest_categorical(\n \"activation\",\n [\n \"relu\", # 1st place\n \"prelu\", # 2nd place\n \"leakyrelu\",\n \"elu\",\n \"gelu\",\n ],\n )\n # NODE FEATURES PREP params\n params[\"lin_prep_len\"] = trial.suggest_int(\"lin_prep_len\", low=0, high=2)\n params[\"lin_prep_dropout_rate\"] = trial.suggest_uniform(\n \"lin_prep_dropout_rate\", low=0, high=0.5\n )\n params[\"lin_prep_weight_norm_flag\"] = trial.suggest_categorical(\n \"lin_prep_weight_norm_flag\", [False, True]\n )\n\n min_lin_prep_size_common = 32\n max_lin_prep_size_common = 1024\n\n last_size = trial.suggest_int(\n \"lin_prep_size_common\",\n min_lin_prep_size_common,\n max_lin_prep_size_common,\n log=True,\n )\n params[\"lin_prep_size_common\"] = last_size\n params[\"lin_prep_sizes\"] = []\n for i in range(params[\"lin_prep_len\"]):\n fraction = trial.suggest_loguniform(\n f\"lin_prep_size{i}_fraction\", low=0.2, high=1.0\n )\n last_size = max(16, int(np.round(last_size * fraction)))\n params[\"lin_prep_sizes\"].append(last_size)\n params[\"n_hops\"] = 2\n\n # CONV1 params\n\n params[\"conv1_aggrs\"] = {}\n fraction = trial.suggest_loguniform(\"conv1_aggrs_mean_fraction\", low=0.1, high=1.0)\n params[\"conv1_aggrs\"][\"mean\"] = max(8, int(np.round(last_size * fraction)))\n\n fraction = trial.suggest_loguniform(\"conv1_aggrs_max_fraction\", low=0.05, high=0.7)\n params[\"conv1_aggrs\"][\"max\"] = int(np.round(last_size * fraction))\n\n fraction = trial.suggest_loguniform(\"conv1_aggrs_add_fraction\", low=0.05, high=0.7)\n params[\"conv1_aggrs\"][\"add\"] = int(np.round(last_size * fraction))\n\n params[\"conv1_dropout_rate\"] = trial.suggest_uniform(\n \"conv1_dropout_rate\", low=0, high=0.5\n )\n\n # return params\n # CONV2 params\n if params[\"n_hops\"] == 2:\n last_size = sum(params[\"conv1_aggrs\"].values())\n params[\"conv2_aggrs\"] = {}\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_mean_fraction\", low=0.1, high=0.7\n )\n params[\"conv2_aggrs\"][\"mean\"] = max(8, int(np.round(last_size * fraction)))\n\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_max_fraction\", low=0.05, high=0.5\n )\n params[\"conv2_aggrs\"][\"max\"] = int(np.round(last_size * fraction))\n\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_add_fraction\", low=0.05, high=0.5\n )\n params[\"conv2_aggrs\"][\"add\"] = int(np.round(last_size * fraction))\n\n params[\"conv2_dropout_rate\"] = trial.suggest_uniform(\n \"conv2_dropout_rate\", low=0, high=0.5\n )\n if params[\"conv_type\"] == \"GraphConv\":\n params[\"graph_conv_weight_norm_flag\"] = trial.suggest_categorical(\n \"graph_conv_weight_norm_flag\", [False, True]\n )\n\n # EDGE ATTR params\n if params[\"conv_type\"] == \"NNConv\":\n params[\"edge_attr_repr_len\"] = trial.suggest_int(\n \"edge_attr_repr_len\", low=1, high=3\n )\n params[\"edge_attr_repr_sizes\"] = []\n for i in range(params[\"edge_attr_repr_len\"] - 1):\n if i == 0:\n params[\"edge_attr_repr_sizes\"].append(\n trial.suggest_int(\n f\"edge_attr_repr_size{i}\", low=4, high=40, log=True\n )\n )\n else:\n fraction = trial.suggest_loguniform(\n f\"edge_attr_repr_size{i}_fraction\", low=0.2, high=1.0\n )\n params[\"edge_attr_repr_sizes\"].append(\n max(4, int(np.round(params[\"edge_attr_repr_sizes\"][-1] * fraction)))\n )\n params[\"edge_attr_repr_sizes\"].append(\n trial.suggest_int(\"edge_attr_repr_size_last\", low=1, high=5, log=True)\n )\n\n params[\"edge_attr_repr_dropout_rate\"] = trial.suggest_uniform(\n \"edge_attr_repr_dropout_rate\", low=0, high=0.5\n )\n if trial.suggest_categorical(\n \"edge_attr_repr_last_dropout_rate_zero\", [True, False]\n ):\n params[\"edge_attr_repr_last_dropout_rate\"] = 0.0\n else:\n params[\"edge_attr_repr_last_dropout_rate\"] = trial.suggest_uniform(\n \"edge_attr_repr_last_dropout_rate\", low=0, high=0.5\n )\n\n params[\"edge_attr_repr_weight_norm_flag\"] = trial.suggest_categorical(\n \"edge_attr_repr_weight_norm_flag\", [False, True]\n )\n\n params[\"edge_attr_repr_last_activation\"] = \"sigmoid\"\n\n return params" }, { "identifier": "Trainer", "path": "cool_graph/train/trainer.py", "snippet": "class Trainer(object):\n def __init__(\n self,\n list_loader_train: List[torch.utils.data.DataLoader],\n list_loader_test: List[torch.utils.data.DataLoader],\n checkpoint_dir: Union[str, pathlib.PosixPath],\n device: str = \"cuda:0\",\n eval_freq: int = 5,\n fill_value: Union[int, float] = -100,\n initial_lr: float = 0.0023,\n weight_decay: float = 0.001,\n loss_name: str = \"CrossEntropyLoss\",\n loss_label_smoothing: bool = False,\n loss_target_weights: Optional[Dict[str, Union[int, float]]] = None,\n loss_group_weights: Optional[List[float]] = None,\n groups_names: Optional[Dict[int, str]] = None,\n groups_names_num_features: Optional[Dict[str, int]] = None,\n num_edge_features: Optional[int] = None,\n main_metric_name: str = \"main_metric\",\n mlflow_experiment_name: Optional[str] = None,\n n_epochs: int = 10,\n scheduler_params: Dict[Literal[\"milestones\", \"gamma\"], int] = {\n \"milestones\": [10, 20, 35, 50, 70, 90, 105],\n \"gamma\": 0.25,\n },\n scheduler_type: str = \"MultiStepLR\",\n target_names: List[str] = [\"y\"],\n target_sizes: Optional[List[int]] = None,\n use_mlflow: bool = False,\n tqdm_disable=False,\n conv_type: Literal[\"NNConv\", \"GraphConv\"] = \"NNConv\",\n metrics: Optional[float] = None,\n log_all_metrics: bool = True,\n **model_params,\n ) -> None:\n \"\"\"\n Training model (GraphConv or NNConv).\n Class that training / logging / saving model. Using train_epoch\n and eval_epoch from helpers.py in training loop below.\n\n Args:\n list_loader_train (List[torch.utils.data.DataLoader]): Train list with Data loader. Combines a dataset\n and a sampler, and provides an iterable over the given dataset.\n https://pytorch.org/docs/stable/data.html\n list_loader_test (List[torch.utils.data.DataLoader]): Test list with Data loader. Combines a dataset\n and a sampler, and provides an iterable over the given dataset.\n https://pytorch.org/docs/stable/data.html\n checkpoint_dir (Union[str, pathlib.PosixPath]): Path for training checkpoints\n device (_type_, optional): The device is an object representing the device on\n which a torch.Tensor is or will be allocated.. Defaults to \"cuda:0\".\n eval_freq (int, optional): Number of epoch group. Defaults to 5.\n fill_value (Union[int, float], optional): If value is None. Defaults to -100.\n initial_lr (float, optional): The learning rate param for Optimization. Defaults to 0.0023.\n weight_decay (float, optional): weight decay (L2 penalty). Defaults to 0.001.\n loss_name (str, optional): This criterion computes the cross entropy loss between\n input logits and target. Defaults to \"CrossEntropyLoss\".\n https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html\n loss_label_smoothing (bool, optional): If set True, use label smoothing. Defaults to False.\n loss_target_weights (Optional[Dict[str, Union[int, float]]], optional): Weights for targets. Defaults to None.\n loss_group_weights (Optional[List[float]], optional): Weights for groups. Defaults to None.\n groups_names (Optional[Dict[int, str]], optional): List with group names in nodes. Defaults to None.\n groups_names_num_features (Optional[Dict[str, int]], optional): Number of feats in groups in nodes. Defaults to None.\n num_edge_features (Optional[int], optional): Number of feats on edges. Defaults to None.\n main_metric_name (str, optional): Main metric for maximaze. Defaults to \"main_metric\".\n mlflow_experiment_name (Optional[str], optional): Name of mlflow experiment. Defaults to None.\n n_epochs (int, optional): Number of epochs. Defaults to 10.\n scheduler_params (Dict, optional): Milestones (list) – List of epoch indices. Must be increasing.\n gamma (float) – Multiplicative factor of learning rate decay.\n Defaults to { \"milestones\": [10, 20, 35, 50, 70, 90, 105], \"gamma\": 0.25, }.\n scheduler_type (str, optional): Decays the learning rate of each parameter group\n by gamma once the number of epoch reaches one of the milestones. Defaults to \"MultiStepLR\".\n target_names (List[str], optional): List of target names. Defaults to [\"y\"].\n target_sizes (Optional[List[int]], optional): Size of list with target. Defaults to None.\n use_mlflow (bool, optional): If set True, use MLFlow. Defaults to False.\n tqdm_disable (bool, optional): Display progress. Defaults to False.\n conv_type (Literal[NNConv, GraphConv], optional): The graph neural network operator. Defaults to \"NNConv\".\n metrics (float, optional): Metrics. Defaults to None.\n log_all_metrics (bool, optional): If set True, logging all metrics. Defaults to True.\n\n Raises:\n NotImplementedError: _description_\n \"\"\"\n for key, value in locals().items():\n setattr(self, key, value)\n\n self._metrics = {}\n self._main_metric = {}\n if isinstance(metrics, str):\n metrics = [metrics]\n if isinstance(\n metrics,\n (\n list,\n tuple,\n ),\n ):\n metrics = {name: metrics for name in target_names}\n\n for k, names in metrics.items():\n self._metrics[k] = {name: get_metric(name) for name in names}\n self._main_metric[k] = names[0]\n\n os.makedirs(checkpoint_dir, exist_ok=True)\n\n torch.cuda.empty_cache()\n gc.collect()\n\n if conv_type == \"NNConv\":\n self._model = NNConvGNN(\n **model_params,\n target_names=target_names,\n target_sizes=target_sizes,\n groups_names=groups_names,\n groups_names_num_features=groups_names_num_features,\n num_edge_features=num_edge_features,\n )\n elif conv_type == \"GraphConv\":\n self._model = GraphConvGNN(\n **model_params,\n target_names=target_names,\n target_sizes=target_sizes,\n groups_names=groups_names,\n groups_names_num_features=groups_names_num_features,\n num_edge_features=num_edge_features,\n )\n else:\n raise NotImplementedError(f\"{conv_type} is not implemented\")\n\n self._model.to(device)\n\n self._optimizer = torch.optim.Adam(\n self._model.parameters(),\n lr=initial_lr,\n weight_decay=weight_decay,\n )\n\n self._loss_criteria = getattr(torch.nn, loss_name)(\n reduction=\"none\", label_smoothing=loss_label_smoothing\n )\n self._use_edge_attr = conv_type == \"NNConv\"\n\n self._scheduler = getattr(torch.optim.lr_scheduler, scheduler_type)(\n self._optimizer, **scheduler_params\n )\n\n self._best_loss = {main_metric_name: -np.inf}\n\n self._train_run_lst = []\n self._test_metric_lst = []\n self._train_metric_lst = []\n\n def train(\n self, start_epoch: int = 0, end_epoch: Optional[int] = None\n ) -> Dict[\n Literal[\n \"best_loss\", \"global_calc_time\", \"train_loss\", \"test_metric\", \"train_metric\"\n ],\n float,\n ]:\n \"\"\"\n Training model and logging metrics.\n \"\"\"\n if end_epoch is None:\n end_epoch = self.n_epochs\n\n self.global_start_time = time.time()\n\n if self.use_mlflow:\n mlflow.end_run()\n mlflow.set_experiment(self.mlflow_experiment_name)\n mlflow.start_run()\n mlflow.log_params(\n {\n \"LossCriteria\": self._loss_criteria,\n \"checkpoint_dir\": self.checkpoint_dir,\n **self.model_params,\n }\n )\n\n for epoch in range(start_epoch, end_epoch):\n self.epoch = epoch\n # TRAIN\n train_run = train_epoch(\n self._model,\n self.list_loader_train,\n self.device,\n self._optimizer,\n self._use_edge_attr,\n target_weights=self.loss_target_weights,\n loss_criteria=self._loss_criteria,\n group_weights=self.loss_group_weights,\n tqdm_disable=self.tqdm_disable,\n )\n train_run[\"lr\"] = self._optimizer.param_groups[0][\"lr\"]\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(train_run, \"run_\"), step=epoch\n )\n train_run[\"epoch\"] = epoch\n self._train_run_lst.append(train_run)\n with open(\n os.path.join(self.checkpoint_dir, \"train_running_loss.txt\"), \"a\"\n ) as f:\n json.dump(train_run, f)\n f.write(\"\\n\")\n\n # calc metrics and perform scheduler step\n if (epoch - 0) % self.eval_freq == 0:\n # calc metrics\n # test\n logger.info(\"\\nEpoch {:03d}: \".format(epoch))\n test_metric = eval_epoch(\n self._model,\n self.list_loader_test,\n self.device,\n self.target_names,\n self.groups_names,\n postfix=\"test\",\n use_edge_attr=self._use_edge_attr,\n tqdm_disable=self.tqdm_disable,\n fill_value=self.fill_value,\n metrics=self._metrics,\n main_metric=self._main_metric,\n log_all_metrics=self.log_all_metrics,\n )\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(test_metric, \"test_\"), step=epoch\n )\n test_metric[\"epoch\"] = epoch\n self._test_metric_lst.append(test_metric)\n with open(\n os.path.join(self.checkpoint_dir, \"test_metric.txt\"), \"a\"\n ) as f:\n json.dump(test_metric, f)\n f.write(\"\\n\")\n\n # train\n logger.info(\"Epoch {:03d}: \".format(epoch))\n train_metric = eval_epoch(\n self._model,\n self.list_loader_train,\n self.device,\n self.target_names,\n self.groups_names,\n postfix=\"train\",\n use_edge_attr=self._use_edge_attr,\n tqdm_disable=self.tqdm_disable,\n metrics=self._metrics,\n main_metric=self._main_metric,\n log_all_metrics=self.log_all_metrics,\n )\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(train_metric, \"train_\"), step=epoch\n )\n train_metric[\"epoch\"] = epoch\n self._train_metric_lst.append(train_metric)\n with open(\n os.path.join(self.checkpoint_dir, \"train_metric.txt\"), \"a\"\n ) as f:\n json.dump(train_metric, f)\n f.write(\"\\n\")\n\n # save model\n checkpoint_file = os.path.join(\n self.checkpoint_dir, f\"state_dict_{epoch:0>4d}.pt\"\n )\n torch.save(self._model.cpu().state_dict(), checkpoint_file)\n self._model.to(self.device)\n\n if (\n test_metric[self.main_metric_name]\n > self._best_loss[self.main_metric_name]\n ):\n self._best_loss = test_metric\n self._best_loss[\"epoch\"] = epoch\n checkpoint_file = os.path.join(\n self.checkpoint_dir, \"state_dict_best.pt\"\n )\n torch.save(self._model.cpu().state_dict(), checkpoint_file)\n self._model.to(self.device)\n with open(\n os.path.join(self.checkpoint_dir, \"best_loss.txt\"), \"w\"\n ) as f:\n json.dump(self._best_loss, f, indent=4)\n\n self.mlflow_log_metrics(\n {\n \"best_epoch\": self._best_loss[\"epoch\"],\n f\"best_{self.main_metric_name}\": self._best_loss[\n self.main_metric_name\n ],\n },\n step=epoch,\n )\n\n if self.scheduler_type == \"ReduceLROnPlateau\":\n self._scheduler.step(train_run[\"total_loss\"])\n if (\n self._optimizer.param_groups[0][\"lr\"]\n <= self.scheduler_params[\"min_lr\"]\n ):\n break\n else:\n self._scheduler.step()\n\n self.global_calc_time = time.time() - self.global_start_time\n train_loss = pd.DataFrame(self._train_run_lst)\n test_metric = pd.DataFrame(self._test_metric_lst)\n train_metric = pd.DataFrame(self._train_metric_lst)\n\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(self._best_loss, \"best_\")\n )\n self.mlflow_log_metrics({\"global_calc_time\": self.global_calc_time})\n\n if self.use_mlflow:\n mlflow.end_run()\n torch.cuda.empty_cache()\n\n return {\n \"best_loss\": self._best_loss,\n \"global_calc_time\": self.global_calc_time,\n \"train_loss\": train_loss,\n \"test_metric\": test_metric,\n \"train_metric\": train_metric,\n }\n\n def mlflow_log_metrics(\n self, metrics: Dict[str, Any], step: Optional[int] = None\n ) -> None:\n if self.use_mlflow:\n try:\n mlflow.log_metrics(metrics, step)\n except MlflowException as e:\n save_str_e = traceback.format_exc()\n logger.info(\n \"Epoch {:03d}::\\nCaught exception:\\n{}\".format(\n self.epoch, save_str_e\n )\n )\n with open(\n os.path.join(self.checkpoint_dir, \"MlflowExceptions.txt\"), \"a\"\n ) as f:\n f.write(\n \"Epoch {:03d}::\\nCaught exception:\\n{}\".format(\n self.epoch, save_str_e\n )\n )" } ]
import os import pathlib import hydra import numpy as np import optuna import pandas as pd import torch from datetime import datetime from itertools import product from pathlib import Path from typing import Dict, List, Literal, Optional from hydra import ( compose, core, initialize, initialize_config_dir, initialize_config_module, ) from omegaconf import DictConfig, OmegaConf from optuna.trial import TrialState from sklearn.model_selection import train_test_split from torch_geometric.data import Data from torch_geometric.loader import NeighborLoader, NeighborSampler from tqdm import tqdm from cool_graph.data import RawDataProcessor from cool_graph.data.batch import get_auto_batch_size from cool_graph.data.loaders import create_loaders from cool_graph.logging import setup_mlflow_from_config from cool_graph.parameter_search import ( model_params_to_trial_params, sample_model_params, ) from cool_graph.train import Trainer
12,129
>>> runner = HypeRunner(data) >>> result = runner.run(optimize_run) Study statistics: Number of finished trials: 5 Number of complete trials: 5 Best trial: Value: 0.922 Params: {'conv_type': 'GraphConv', 'activation': 'leakyrelu', 'lin_prep_len': 1, 'lin_prep_dropout_rate': 0.4, 'lin_prep_weight_norm_flag': True, 'lin_prep_size_common': 512, 'lin_prep_sizes': [256], 'n_hops': 2, 'conv1_aggrs': {'mean': 128, 'max': 64, 'add': 32}, 'conv1_dropout_rate': 0.2, 'conv2_aggrs': {'mean': 64, 'max': 32, 'add': 16}, 'conv2_dropout_rate': 0.2, 'graph_conv_weight_norm_flag': True} """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, ) if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/in_memory_data.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) self.study = optuna.study def optimize_run( self, n_trials: int = 100, storage: Optional[str] = None, study_name: Optional[str] = None, enqueue_trial: Optional[List[Dict]] = None, ) -> pd.DataFrame: if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() """ Method for running objective function in Optuna. Args: n_trials (int, optional): The number of trials for each process. None represents no limit in terms of the number of trials. Defaults to 100. storage (Optional[str], optional): Database URL. If this argument is set to None, in-memory storage is used, and the Study will not be persistent. Defaults to None. study_name (Optional[str], optional): Study name. If this argument is set to None, a unique name is generated automatically. Defaults to None. enqueue_trial (Optional[List[Dict]], optional): Enqueue a trial with given parameter values. Defaults to None. Returns: trials_dataset (pd.DataFrame): Result dataframe with trial params. """ list_with_params = [] def objective(trial) -> float: self.cfg["model_params"] = sample_model_params( trial, conv_type=self.cfg["model_params"]["conv_type"] ) list_with_params.append(self.cfg["model_params"]) self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get( "mlflow_experiment_name" ), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() output = result["best_loss"]["main_metric"] output = round(output, 3) return output # default params for the 1st trial in Optuna optimization
def create_cfg(config: str, overrides: List[str], path_base: str = "cfg") -> Dict: assert path_base in ("cfg", "cwd") core.global_hydra.GlobalHydra.instance().clear() if os.path.isabs(config): config_path = pathlib.Path(config).parent else: config_path = pathlib.Path(os.getcwd()) / pathlib.Path(config).parent config_name = pathlib.Path(config).name.replace(".yaml", "") initialize_config_dir(str(config_path), version_base=None) cfg = compose(config_name=config_name, overrides=overrides) return cfg class ConfigRunner: r"""Runner for cli mode. Using only in cli. This class allows to load data + split data per batchs + split data per train/val + training. See the config full.yaml in ./config for knowing what excactly using as data/logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ def __init__(self, config: Optional[DictConfig]) -> None: cfg = OmegaConf.to_container(config, resolve=True) self.cfg = cfg self.target_names = cfg["training"]["targets"] self.groups_names = cfg["data"]["groups_names"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.read_edge_attr = cfg["data"].get("read_edge_attr", True) self.batch_size = cfg["training"]["batch_size"] self.group_mask_col = cfg["data"]["group_mask_col"] self.label_mask_col = cfg["data"]["label_mask_col"] self.label_cols = cfg["data"]["label_cols"] self.label_index_col = cfg["data"]["label_index_col"] self.edge_index_cols = cfg["data"]["edge_index_cols"] self.num_neighbors = cfg["training"]["num_neighbors"] self.features_edges_names = cfg["data"].get("features_edges") self.group_names_node_features = cfg["data"]["features"] self.train_paths = cfg["data"]["train"] self.val_paths = cfg["data"]["validation"] self.metrics = cfg["metrics"] self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) os.makedirs(self.chkpt_dir, exist_ok=True) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Using RawDataProcessor from cool_graph.data for preprocessing data from disk. """ self.train_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.train_paths["nodes_path"], mon_edges_path=self.train_paths["edges_path"], mon_labels_path=self.train_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) self.val_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.val_paths["nodes_path"], mon_edges_path=self.val_paths["edges_path"], mon_labels_path=self.val_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) def sample_data( self, seed=0 ) -> Dict[Literal["train", "validation"], List[torch.utils.data.DataLoader]]: """ Sampling data in batches. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [len(v) for _, v in self.group_names_node_features.items()], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=len(self.cfg["data"].get("features_edges", [])), device=self.cfg["training"]["device"], num_neighbors=self.cfg["training"]["num_neighbors"], ) else: self._batch_size = self.batch_size train_loaders = self.train_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) val_loaders = self.val_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) return {"train": train_loaders, "validation": val_loaders} def run(self, seed: int = 0) -> Dict[str, float]: """ Train model for train_samples and val_sampler. Args: seed (int): seed for training. Default to 0. Returns: result (dict): Result of training for each 5 epochs with metrics from config. """ if not (hasattr(self, "train_sampler") and hasattr(self, "val_sampler")): self.init_loaders() sampled = self.sample_data(seed=seed) train_loaders = sampled["train"] val_loaders = sampled["validation"] self.trainer = Trainer( train_loaders, val_loaders, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.cfg["training"]["loss"].get("target_weights"), loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.cfg["data"]["groups_names"], mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.cfg["training"]["targets"], use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, **self.cfg["model_params"], groups_names_num_features={ k: len(v) for k, v in self.group_names_node_features.items() }, num_edge_features=len(self.cfg["data"].get("features_edges", [])), metrics=self.metrics, ) result = self.trainer.train() return result class BaseRunner: def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ) -> None: """ Main class for Basic runner and Runner with Optuna. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (list): Indices for train data. Defaults to None. test_idx (list): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. """ if config is None: if config_path is None: if use_edge_attr: config_path = "./config/in_memory_data2.yaml" else: config_path = "./config/in_memory_data.yaml" config_path = os.path.join(os.path.dirname(__file__), config_path) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) cfg = OmegaConf.to_container(config, resolve=True) self.data = data self.cfg = cfg self.test_size = test_size self.train_size = train_size self.seed = seed self.train_idx = train_idx self.test_idx = test_idx self.use_edge_attr = use_edge_attr if use_edge_attr and data.edge_attr is None: raise BaseException( "data does not contain edge_attr, please set use_edge_attr=False" ) self.target_names = cfg["training"]["targets"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.batch_size = cfg["training"]["batch_size"] self.num_neighbors = cfg["training"]["num_neighbors"] self.metrics = cfg["metrics"] self.data.group_mask = torch.zeros(len(data.x), dtype=torch.int8) self.data.label_mask = torch.ones(len(data.x), dtype=torch.bool) self.groups_names = {0: "x"} self.groups_names_num_features = {"x": data.x.shape[1]} if len(data.y.shape) == 2: self.target_sizes = [] self.target_names = [] self.target_weights = {} for i in range(data.y.shape[1]): y_sub = data.y[:, i] setattr(data, f"y{i}", y_sub) self.target_sizes.append(len(y_sub.unique())) self.target_names.append(f"y{i}") self.target_weights[f"y{i}"] = 1 else: self.target_names = ["y"] self.target_sizes = [len(data.y.unique())] self.target_weights = {"y": 1} if use_edge_attr: self.num_edge_features = data.edge_attr.shape[1] else: self.num_edge_features = 0 self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) for k, v in kwargs.items(): setattr(self, k, v) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Sampling data into batches and sampling data with NeighborLoader into list loaders. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [ self.groups_names_num_features[self.groups_names[i]] for i in range(len(self.groups_names)) ], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=self.num_edge_features, device=self.cfg["training"]["device"], num_neighbors=self.num_neighbors, ) else: self._batch_size = self.batch_size if (self.train_idx is None) or (self.test_idx is None): train_idx, test_idx = train_test_split( torch.nonzero(self.data.label_mask)[:, 0], train_size=self.train_size, test_size=self.test_size, random_state=self.seed, shuffle=True, ) self.train_idx = train_idx self.test_idx = test_idx def sample_date_prerpoc(sampled_data: Data) -> Data: sampled_data.label_mask[sampled_data.batch_size :] = False for group, name in self.groups_names.items(): x = getattr(sampled_data, name)[sampled_data.group_mask == group] setattr(sampled_data, name, x) return sampled_data loader_train = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.train_idx, ) list_loader_train = [] for sampled_data in tqdm(loader_train, desc="Sample data"): list_loader_train.append(sample_date_prerpoc(sampled_data)) self.train_loader = list_loader_train loader_test = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.test_idx, ) list_loader_test = [] for sampled_data in tqdm(loader_test, desc="Sample data"): list_loader_test.append(sample_date_prerpoc(sampled_data)) self.test_loader = list_loader_test class Runner(BaseRunner): """ Runner for notebook launch. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (int): Indices for train data. Defaults to None. test_idx (int): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. Examples -------- >>> from cool_graph.runners import Runner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = Runner(data) >>> result = runner.run() >>> result["best_loss"] {'accuracy': 0.916, 'cross_entropy': 0.286, 'f1_micro': 0.916, 'calc_time': 0.004, 'main_metric': 0.916, 'epoch': 10} Also you can override params in Runner: runner = Runner(data, metrics=['accuracy'], batch_size='auto', train_size=0.7, test_size=0.3, overrides=['training.n_epochs=1'], config_path=path/to/config) result = runner.run() """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, use_edge_attr, **kwargs, ) def run(self) -> Dict[str, float]: """ Training model with params in_memory_data/in_memory_data2 config. See the configs in ./config for knowing what excactly using as logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() return result class HypeRunner(BaseRunner): """ Runner for optimization model with Optuna. https://optuna.readthedocs.io/en/stable/reference/index.html 1st trial - with default config params (hyper_params). Also, 2nd trial - you can add own trial as argument enqueue_trial in optimazire_run method, and next trial optuna optimize model params randomly, if set None randomly optimization after 1st default trial. Args: data (Data): Loaded dataset. config (DictConfig): Confif with patams (model_params, logging, training, metrics). Default to None. config_path (str): Path with config structure (can be loaded with cli get_config). Default to None. overrides (list): Own params in list. Default to None. train_size (int): Own train size. Default to None. test (int): Own test size. Default to None. seed (int): The desired seed. Default to None. train_idx (list): List of train indices. test_idx (list): List of test indices. Examples -------- >>> from cool_graph.runners import HypeRunner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = HypeRunner(data) >>> result = runner.run(optimize_run) Study statistics: Number of finished trials: 5 Number of complete trials: 5 Best trial: Value: 0.922 Params: {'conv_type': 'GraphConv', 'activation': 'leakyrelu', 'lin_prep_len': 1, 'lin_prep_dropout_rate': 0.4, 'lin_prep_weight_norm_flag': True, 'lin_prep_size_common': 512, 'lin_prep_sizes': [256], 'n_hops': 2, 'conv1_aggrs': {'mean': 128, 'max': 64, 'add': 32}, 'conv1_dropout_rate': 0.2, 'conv2_aggrs': {'mean': 64, 'max': 32, 'add': 16}, 'conv2_dropout_rate': 0.2, 'graph_conv_weight_norm_flag': True} """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, ) if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/in_memory_data.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) self.study = optuna.study def optimize_run( self, n_trials: int = 100, storage: Optional[str] = None, study_name: Optional[str] = None, enqueue_trial: Optional[List[Dict]] = None, ) -> pd.DataFrame: if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() """ Method for running objective function in Optuna. Args: n_trials (int, optional): The number of trials for each process. None represents no limit in terms of the number of trials. Defaults to 100. storage (Optional[str], optional): Database URL. If this argument is set to None, in-memory storage is used, and the Study will not be persistent. Defaults to None. study_name (Optional[str], optional): Study name. If this argument is set to None, a unique name is generated automatically. Defaults to None. enqueue_trial (Optional[List[Dict]], optional): Enqueue a trial with given parameter values. Defaults to None. Returns: trials_dataset (pd.DataFrame): Result dataframe with trial params. """ list_with_params = [] def objective(trial) -> float: self.cfg["model_params"] = sample_model_params( trial, conv_type=self.cfg["model_params"]["conv_type"] ) list_with_params.append(self.cfg["model_params"]) self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get( "mlflow_experiment_name" ), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() output = result["best_loss"]["main_metric"] output = round(output, 3) return output # default params for the 1st trial in Optuna optimization
trial_params = model_params_to_trial_params(**self.cfg["model_params"])
4
2023-11-22 09:44:16+00:00
16k
HeliosZhao/Animate124
nerf/network_grid_tcnn.py
[ { "identifier": "trunc_exp", "path": "activation.py", "snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):\ndef biased_softplus(x, bias=0):" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n def __init__(self, opt):\n super().__init__()\n\n self.opt = opt\n self.bound = opt.bound\n self.cascade = 1 + math.ceil(math.log2(opt.bound))\n self.grid_size = 128\n self.max_level = None\n self.dmtet = opt.dmtet\n self.cuda_ray = opt.cuda_ray\n self.taichi_ray = opt.taichi_ray\n self.min_near = opt.min_near\n self.density_thresh = opt.density_thresh\n\n # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)\n # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.\n aabb_train = torch.FloatTensor(\n [-opt.bound, -opt.bound, -opt.bound, opt.bound, opt.bound, opt.bound])\n aabb_infer = aabb_train.clone()\n self.register_buffer('aabb_train', aabb_train)\n self.register_buffer('aabb_infer', aabb_infer)\n\n self.glctx = None\n\n # extra state for cuda raymarching\n if self.cuda_ray:\n ## NOTE TODO the cuda ray sampling for DNeRF is different, make sure to change\n # density grid\n density_grid = torch.zeros(\n [self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(\n self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n\n # load dmtet vertices\n if self.opt.dmtet:\n self.dmtet = DMTetGeometry(opt.tet_grid_size, opt.tet_mlp, opt).to(opt.device)\n if self.opt.h <= 2048 and self.opt.w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n\n if self.taichi_ray:\n from einops import rearrange\n from taichi_modules import RayMarcherTaichi\n from taichi_modules import VolumeRendererTaichi\n from taichi_modules import RayAABBIntersector as RayAABBIntersectorTaichi\n from taichi_modules import raymarching_test as raymarching_test_taichi\n from taichi_modules import composite_test as composite_test_fw\n from taichi_modules import packbits as packbits_taichi\n self.rearrange = rearrange\n self.packbits_taichi = packbits_taichi\n self.ray_aabb_intersector = RayAABBIntersectorTaichi\n self.raymarching_test_taichi = raymarching_test_taichi\n self.composite_test_fw = composite_test_fw\n self.ray_marching = RayMarcherTaichi(\n batch_size=4096) # TODO: hard encoded batch size\n self.volume_render = VolumeRendererTaichi(\n batch_size=4096) # TODO: hard encoded batch size\n # density grid\n density_grid = torch.zeros(\n [self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(\n self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n \n if self.opt.density_activation == 'exp':\n self.density_activation = trunc_exp\n elif self.opt.density_activation == 'softplus':\n self.density_activation = F.softplus\n elif self.opt.density_activation == 'relu':\n self.density_activation = F.relu\n \n # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192\n def finite_difference_normal(self, x, epsilon=1e-2):\n # x: [N, 3]\n # ipdb.set_trace()\n dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound))\n dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound))\n \n normal = torch.stack([\n 0.5 * (dx_pos - dx_neg) / epsilon, \n 0.5 * (dy_pos - dy_neg) / epsilon, \n 0.5 * (dz_pos - dz_neg) / epsilon\n ], dim=-1)\n\n return -normal\n \n def normal(self, x):\n normal = self.finite_difference_normal(x)\n normal = safe_normalize(normal)\n normal = torch.nan_to_num(normal)\n return normal\n\n @torch.no_grad()\n def density_blob(self, x):\n # x: [B, N, 3]\n\n d = (x ** 2).sum(-1)\n\n if self.opt.density_activation == 'exp':\n g = self.opt.blob_density * \\\n torch.exp(- d / (2 * self.opt.blob_radius ** 2))\n else:\n g = self.opt.blob_density * \\\n (1 - torch.sqrt(d) / self.opt.blob_radius)\n\n return g\n\n def forward(self, x, d):\n raise NotImplementedError()\n\n def density(self, x):\n raise NotImplementedError()\n\n def reset_extra_state(self):\n if not (self.cuda_ray or self.taichi_ray):\n return\n # density grid\n self.density_grid.zero_()\n self.mean_density = 0\n self.iter_density = 0\n\n @torch.no_grad()\n def export_mesh(self, path, resolution=None, decimate_target=-1, S=128):\n from meshutils import decimate_mesh, clean_mesh, poisson_mesh_reconstruction\n if self.opt.dmtet:\n vertices, triangles = self.dmtet.get_verts_face()\n vertices = vertices.detach().cpu().numpy()\n triangles = triangles.detach().cpu().numpy()\n\n else:\n\n if resolution is None:\n resolution = self.grid_size\n\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh) \\\n if np.greater(self.mean_density, 0) else self.density_thresh\n else:\n density_thresh = self.density_thresh\n\n sigmas = np.zeros(\n [resolution, resolution, resolution], dtype=np.float32)\n\n # query\n X = torch.linspace(-1, 1, resolution).split(S)\n Y = torch.linspace(-1, 1, resolution).split(S)\n Z = torch.linspace(-1, 1, resolution).split(S)\n\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n pts = torch.cat(\n [xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n val = self.density(pts.to(self.aabb_train.device))\n sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(\n zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n\n logger.info(\n f'[INFO] marching cubes thresh: {density_thresh} ({sigmas.min()} ~ {sigmas.max()})')\n\n vertices, triangles = mcubes.marching_cubes(sigmas, density_thresh)\n vertices = vertices / (resolution - 1.0) * 2 - 1\n\n # clean\n vertices = vertices.astype(np.float32)\n triangles = triangles.astype(np.int32)\n vertices, triangles = clean_mesh(\n vertices, triangles, remesh=True, remesh_size=0.01)\n\n # decimation\n if decimate_target > 0 and triangles.shape[0] > decimate_target:\n vertices, triangles = decimate_mesh(\n vertices, triangles, decimate_target)\n\n v = torch.from_numpy(vertices).contiguous(\n ).float().to(self.aabb_train.device)\n f = torch.from_numpy(triangles).contiguous().int().to(\n self.aabb_train.device)\n\n # mesh = trimesh.Trimesh(vertices, triangles, process=False) # important, process=True leads to seg fault...\n # mesh.export(os.path.join(path, f'mesh.ply'))\n\n def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''):\n # v, f: torch Tensor\n device = v.device\n v_np = v.cpu().numpy() # [N, 3]\n f_np = f.cpu().numpy() # [M, 3]\n\n logger.info(\n f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}')\n\n # unwrap uvs\n import xatlas\n import nvdiffrast.torch as dr\n from sklearn.neighbors import NearestNeighbors\n from scipy.ndimage import binary_dilation, binary_erosion\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(v_np, f_np)\n chart_options = xatlas.ChartOptions()\n chart_options.max_iterations = 4 # for faster unwrap...\n atlas.generate(chart_options=chart_options)\n vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2]\n\n vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)\n ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)\n\n # render uv maps\n uv = vt * 2.0 - 1.0 # uvs to range [-1, 1]\n uv = torch.cat((uv, torch.zeros_like(\n uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4]\n\n if ssaa > 1:\n h = int(h0 * ssaa)\n w = int(w0 * ssaa)\n else:\n h, w = h0, w0\n \n if self.glctx is None:\n if h <= 2048 and w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n\n rast, _ = dr.rasterize(self.glctx, uv.unsqueeze(\n 0), ft, (h, w)) # [1, h, w, 4]\n xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3]\n mask, _ = dr.interpolate(torch.ones_like(\n v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1]\n\n # masked query \n xyzs = xyzs.view(-1, 3)\n mask = (mask > 0).view(-1)\n \n feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32)\n\n if mask.any():\n xyzs = xyzs[mask] # [M, 3]\n\n # batched inference to avoid OOM\n all_feats = []\n head = 0\n while head < xyzs.shape[0]:\n tail = min(head + 640000, xyzs.shape[0])\n results_ = self.density(xyzs[head:tail])\n all_feats.append(results_['albedo'].float())\n head += 640000\n\n feats[mask] = torch.cat(all_feats, dim=0)\n \n feats = feats.view(h, w, -1)\n mask = mask.view(h, w)\n\n # quantize [0.0, 1.0] to [0, 255]\n feats = feats.cpu().numpy()\n feats = (feats * 255).astype(np.uint8)\n\n ### NN search as an antialiasing ...\n mask = mask.cpu().numpy()\n\n inpaint_region = binary_dilation(mask, iterations=3)\n inpaint_region[mask] = 0\n\n search_region = mask.copy()\n not_search_region = binary_erosion(search_region, iterations=2)\n search_region[not_search_region] = 0\n\n search_coords = np.stack(np.nonzero(search_region), axis=-1)\n inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)\n\n knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords)\n _, indices = knn.kneighbors(inpaint_coords)\n\n feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)]\n\n feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR)\n\n # do ssaa after the NN search, in numpy\n if ssaa > 1:\n feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR)\n\n cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats)\n\n # save obj (v, vt, f /)\n obj_file = os.path.join(path, f'{name}mesh.obj')\n mtl_file = os.path.join(path, f'{name}mesh.mtl')\n\n logger.info(f'[INFO] writing obj mesh to {obj_file}')\n with open(obj_file, \"w\") as fp:\n fp.write(f'mtllib {name}mesh.mtl \\n')\n\n logger.info(f'[INFO] writing vertices {v_np.shape}')\n for v in v_np:\n fp.write(f'v {v[0]} {v[1]} {v[2]} \\n')\n\n logger.info(\n f'[INFO] writing vertices texture coords {vt_np.shape}')\n for v in vt_np:\n fp.write(f'vt {v[0]} {1 - v[1]} \\n')\n\n logger.info(f'[INFO] writing faces {f_np.shape}')\n fp.write(f'usemtl mat0 \\n')\n for i in range(len(f_np)):\n fp.write(\n f\"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \\n\")\n\n with open(mtl_file, \"w\") as fp:\n fp.write(f'newmtl mat0 \\n')\n fp.write(f'Ka 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Kd 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Ks 0.000000 0.000000 0.000000 \\n')\n fp.write(f'Tr 1.000000 \\n')\n fp.write(f'illum 1 \\n')\n fp.write(f'Ns 0.000000 \\n')\n fp.write(f'map_Kd {name}albedo.png \\n')\n\n _export(v, f)\n\n def run(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # bg_color: [BN, 3] in range [0, 1]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # choose aabb\n aabb = self.aabb_train if self.training else self.aabb_infer\n\n # sample steps\n # nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, aabb, self.min_near)\n # nears.unsqueeze_(-1)\n # fars.unsqueeze_(-1)\n nears, fars = near_far_from_bound(rays_o, rays_d, self.bound, type='sphere', min_near=self.min_near)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n if self.training:\n light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3]\n else:\n light_d = safe_normalize(rays_o[0:1] + torch.randn(3, device=rays_o.device)) # [N, 3]\n \n #print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}')\n\n z_vals = torch.linspace(0.0, 1.0, self.opt.num_steps, device=device).unsqueeze(0) # [1, T]\n z_vals = z_vals.expand((N, self.opt.num_steps)) # [N, T]\n z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]\n\n # perturb z_vals\n sample_dist = (fars - nears) / self.opt.num_steps\n if perturb:\n z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist\n #z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs.\n\n # generate xyzs\n xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]\n xyzs = torch.min(torch.max(xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n\n # query SDF and RGB\n density_outputs = self.density(xyzs.reshape(-1, 3))\n\n #sigmas = density_outputs['sigma'].view(N, self.opt.num_steps) # [N, T]\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(N, self.opt.num_steps, -1)\n\n # upsample z_vals (nerf-like)\n if self.opt.upsample_steps > 0:\n with torch.no_grad():\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1) ## confused with this, so the last point should be around relative distance or zero?\n\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T]\n\n # sample new z_vals\n z_vals_mid = (z_vals[..., :-1] + 0.5 * deltas[..., :-1]) # [N, T-1]\n new_z_vals = sample_pdf(z_vals_mid, weights[:, 1:-1], self.opt.upsample_steps, det=not self.training).detach() # [N, t]\n\n new_xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * new_z_vals.unsqueeze(-1) # [N, 1, 3] * [N, t, 1] -> [N, t, 3]\n new_xyzs = torch.min(torch.max(new_xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n # only forward new points to save computation\n new_density_outputs = self.density(new_xyzs.reshape(-1, 3))\n #new_sigmas = new_density_outputs['sigma'].view(N, self.opt.upsample_steps) # [N, t]\n for k, v in new_density_outputs.items():\n new_density_outputs[k] = v.view(N, self.opt.upsample_steps, -1)\n\n # re-order\n z_vals = torch.cat([z_vals, new_z_vals], dim=1) # [N, T+t]\n z_vals, z_index = torch.sort(z_vals, dim=1)\n\n xyzs = torch.cat([xyzs, new_xyzs], dim=1) # [N, T+t, 3]\n xyzs = torch.gather(xyzs, dim=1, index=z_index.unsqueeze(-1).expand_as(xyzs))\n\n for k in density_outputs:\n tmp_output = torch.cat([density_outputs[k], new_density_outputs[k]], dim=1)\n density_outputs[k] = torch.gather(tmp_output, dim=1, index=z_index.unsqueeze(-1).expand_as(tmp_output))\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T+t-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T+t]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+t+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T+t]\n\n dirs = rays_d.view(-1, 1, 3).expand_as(xyzs)\n light_d = light_d.view(-1, 1, 3).expand_as(xyzs)\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(-1, v.shape[-1])\n\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs.reshape(-1, 3), dirs.reshape(-1, 3), light_d.reshape(-1,3), ratio=ambient_ratio, shading=shading)\n rgbs = rgbs.view(N, -1, 3) # [N, T+t, 3]\n if normals is not None:\n normals = normals.view(N, -1, 3)\n\n # calculate weight_sum (mask)\n weights_sum = weights.sum(dim=-1) # [N]\n \n # calculate depth \n depth = torch.sum(weights * z_vals, dim=-1)\n\n # calculate color\n image = torch.sum(weights.unsqueeze(-1) * rgbs, dim=-2) # [N, 3], in [0, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n # ipdb.set_trace()\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n weights_sum = weights_sum.reshape(*prefix)\n\n if self.training:\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss\n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.sum(-1).mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if normals is not None:\n normal_image = torch.sum(\n weights.unsqueeze(-1) * (normals + 1) / 2, dim=-2) # [N, 3], in [0, 1]\n results['normal_image'] = normal_image\n \n results['image'] = image\n results['depth'] = depth\n results['weights'] = weights\n results['weights_sum'] = weights_sum\n\n return results\n\n\n def run_cuda(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, binarize=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: image: [B, N, 3], depth: [B, N]\n # ipdb.set_trace()\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n if self.training:\n light_d = safe_normalize(rays_o[0:1] + torch.randn(3, device=rays_o.device)) # [N, 3]\n else:\n light_d = safe_normalize(rays_o[0:1] + torch.randn(3, device=rays_o.device)) # [N, 3]\n\n results = {}\n\n if self.training:\n xyzs, dirs, ts, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n\n if light_d.shape[0] > 1:\n flatten_rays = raymarching.flatten_rays(rays, xyzs.shape[0]).long()\n light_d = light_d[flatten_rays]\n \n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n weights, weights_sum, depth, image = raymarching.composite_rays_train(sigmas, rgbs, ts, rays, T_thresh, binarize)\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if normals is not None:\n _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize)\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = nears.clone() # [N]\n\n step = 0\n \n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n n_step = max(min(N // n_alive, 8), 1)\n\n xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize)\n\n rays_alive = rays_alive[rays_alive >= 0]\n #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}')\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n\n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n image = image.view(*prefix, 3)\n\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n def get_sdf_albedo_for_init(self, points=None):\n output = self.density(self.dmtet.verts if points is None else points)\n sigma, albedo = output['sigma'], output['albedo']\n return sigma - self.density_thresh, albedo\n\n def run_dmtet(self, rays_o, rays_d, mvp, h, w, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, **kwargs):\n # mvp: [B, 4, 4]\n\n device = mvp.device\n campos = rays_o[:, 0, :] # only need one ray per batch\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(campos + torch.randn_like(campos)).view(-1, 1, 1, 3) # [B, 1, 1, 3]\n\n results = {}\n\n verts, faces = self.dmtet.get_verts_face()\n\n # get normals\n i0, i1, i2 = faces[:, 0], faces[:, 1], faces[:, 2]\n v0, v1, v2 = verts[i0, :], verts[i1, :], verts[i2, :]\n\n faces = faces.int()\n \n face_normals = torch.cross(v1 - v0, v2 - v0)\n face_normals = safe_normalize(face_normals)\n \n vn = torch.zeros_like(verts)\n vn.scatter_add_(0, i0[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i1[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i2[:, None].repeat(1,3), face_normals)\n\n vn = torch.where(torch.sum(vn * vn, -1, keepdim=True) > 1e-20, vn, torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device))\n\n # rasterization\n verts_clip = torch.bmm(F.pad(verts, pad=(0, 1), mode='constant', value=1.0).unsqueeze(0).repeat(mvp.shape[0], 1, 1), \n mvp.permute(0,2,1)).float() # [B, N, 4]\n rast, rast_db = dr.rasterize(self.glctx, verts_clip, faces, (h, w))\n \n alpha, _ = dr.interpolate(torch.ones_like(verts[:, :1]).unsqueeze(0), rast, faces) # [B, H, W, 1]\n xyzs, _ = dr.interpolate(verts.unsqueeze(0), rast, faces) # [B, H, W, 3]\n normal, _ = dr.interpolate(vn.unsqueeze(0).contiguous(), rast, faces)\n normal = safe_normalize(normal)\n\n xyzs = xyzs.view(-1, 3)\n mask = (alpha > 0).view(-1).detach()\n\n # do the lighting here since we have normal from mesh now.\n albedo = torch.zeros_like(xyzs, dtype=torch.float32)\n if mask.any():\n masked_albedo = self.density(xyzs[mask])['albedo']\n albedo[mask] = masked_albedo.float()\n albedo = albedo.view(-1, h, w, 3)\n\n if shading == 'albedo':\n color = albedo\n elif shading == 'textureless':\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = lambertian.unsqueeze(-1).repeat(1, 1, 1, 3)\n elif shading == 'normal':\n color = (normal + 1) / 2\n else: # 'lambertian'\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = albedo * lambertian.unsqueeze(-1)\n\n color = dr.antialias(color, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n alpha = dr.antialias(alpha, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n if torch.is_tensor(bg_color) and len(bg_color.shape) > 1:\n bg_color = bg_color.view(-1, h, w, 3)\n \n depth = rast[:, :, :, [2]] # [B, H, W]\n color = color + (1 - alpha) * bg_color\n\n results['depth'] = depth \n results['image'] = color\n results['weights_sum'] = alpha.squeeze(-1)\n\n normal_image = dr.antialias((normal + 1) / 2, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n results['normal_image'] = normal_image\n \n # regularizations\n if self.training:\n if self.opt.lambda_mesh_normal > 0:\n results['loss_normal'] = normal_consistency(\n face_normals, faces)\n if self.opt.lambda_mesh_lap > 0:\n results['loss_lap'] = laplacian_smooth_loss(verts, faces)\n\n return results\n\n def run_taichi(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n exp_step_factor = kwargs.get('exp_step_factor', 0.)\n MAX_SAMPLES = 1024\n NEAR_DISTANCE = 0.01\n center = torch.zeros(1, 3)\n half_size = torch.ones(1, 3)\n _, hits_t, _ = self.ray_aabb_intersector.apply(rays_o, rays_d, center, half_size, 1)\n hits_t[(hits_t[:, 0, 0] >= 0) & (hits_t[:, 0, 0] < NEAR_DISTANCE), 0, 0] = NEAR_DISTANCE\n\n # TODO: should sample different light_d for each batch... but taichi end doesn't have a flatten_ray implemented currently...\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n results = {}\n\n if self.training:\n rays_a, xyzs, dirs, deltas, ts, _ = self.ray_marching(rays_o, rays_d, hits_t[:, 0], self.density_bitfield, self.cascade, self.bound, exp_step_factor, self.grid_size, MAX_SAMPLES)\n dirs = safe_normalize(dirs)\n # plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n _, weights_sum, depth, image, weights = self.volume_render(sigmas, rgbs, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if normals is not None:\n _, _, _, normal_image, _ = self.volume_render(sigmas.detach(), (normals + 1) / 2, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = hits_t[:, 0, 0]\n step = 0\n \n min_samples = 1 if exp_step_factor == 0 else 4\n\n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n # n_step = max(min(N // n_alive, 8), 1)\n n_step = max(min(N // n_alive, 64), min_samples)\n\n xyzs, dirs, deltas, ts, N_eff_samples = \\\n self.raymarching_test_taichi(rays_o, rays_d, hits_t[:, 0], rays_alive,\n self.density_bitfield, self.cascade,\n self.bound, exp_step_factor,\n self.grid_size, MAX_SAMPLES, n_step)\n\n xyzs = self.rearrange(xyzs, 'n1 n2 c -> (n1 n2) c')\n dirs = self.rearrange(dirs, 'n1 n2 c -> (n1 n2) c')\n dirs = safe_normalize(dirs)\n valid_mask = ~torch.all(dirs == 0, dim=1)\n if valid_mask.sum() == 0:\n break\n\n sigmas = torch.zeros(len(xyzs), device=device)\n rgbs = torch.zeros(len(xyzs), 3, device=device)\n normals = torch.zeros(len(xyzs), 3, device=device)\n\n sigmas[valid_mask], _rgbs, normals = self(xyzs[valid_mask], dirs[valid_mask], light_d, ratio=ambient_ratio, shading=shading)\n rgbs[valid_mask] = _rgbs.float()\n sigmas = self.rearrange(sigmas, '(n1 n2) -> n1 n2', n2=n_step)\n rgbs = self.rearrange(rgbs, '(n1 n2) c -> n1 n2 c', n2=n_step)\n if normals is not None:\n normals = self.rearrange(normals, '(n1 n2) c -> n1 n2 c', n2=n_step)\n\n self.composite_test_fw(sigmas, rgbs, deltas, ts, hits_t[:,0], rays_alive,\n kwargs.get('T_threshold', 1e-4), N_eff_samples,\n weights_sum, depth, image)\n\n rays_alive = rays_alive[rays_alive >= 0]\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n\n image = image + self.rearrange(1 - weights_sum, 'n -> n 1') * bg_color\n image = image.view(*prefix, 3)\n\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n\n @torch.no_grad()\n def update_extra_state(self, decay=0.95, S=128):\n # call before each epoch to update extra states.\n\n if not (self.cuda_ray or self.taichi_ray):\n return \n \n ### update density grid\n tmp_grid = - torch.ones_like(self.density_grid)\n \n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach()\n # assign \n tmp_grid[cas, indices] = sigmas\n # ema update\n valid_mask = self.density_grid >= 0\n self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n self.mean_density = torch.mean(self.density_grid[valid_mask]).item()\n self.iter_density += 1\n\n # convert to bitfield\n density_thresh = min(self.mean_density, self.density_thresh)\n if self.cuda_ray:\n self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)\n elif self.taichi_ray:\n self.packbits_taichi(self.density_grid.reshape(-1).contiguous(), density_thresh, self.density_bitfield)\n\n # print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f}')\n\n\n def render(self, rays_o, rays_d, mvp, h, w, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: pred_rgb: [B, N, 3]\n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n if self.dmtet:\n results = self.run_dmtet(rays_o, rays_d, mvp, h, w, **kwargs)\n elif self.cuda_ray:\n results = self.run_cuda(rays_o, rays_d, **kwargs)\n elif self.taichi_ray:\n results = self.run_taichi(rays_o, rays_d, **kwargs)\n else:\n if staged:\n depth = torch.empty((B, N), device=device)\n image = torch.empty((B, N, 3), device=device)\n weights_sum = torch.empty((B, N), device=device)\n\n for b in range(B):\n head = 0\n while head < N:\n tail = min(head + max_ray_batch, N)\n results_ = self.run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)\n depth[b:b+1, head:tail] = results_['depth']\n weights_sum[b:b+1, head:tail] = results_['weights_sum']\n image[b:b+1, head:tail] = results_['image']\n head += max_ray_batch\n \n results = {}\n results['depth'] = depth\n results['image'] = image\n results['weights_sum'] = weights_sum\n\n else:\n results = self.run(rays_o, rays_d, **kwargs)\n\n return results\n\n def init_tet_from_nerf(self, reset_scale=True):\n sdf = self.get_sdf_from_nerf(reset_scale=reset_scale)\n self.dmtet.init_tet_from_sdf(sdf)\n logger.info(f'init dmtet from NeRF Done ...')\n\n\n @torch.no_grad()\n def get_sdf_from_nerf(self, reset_scale=True):\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh)\n else:\n density_thresh = self.density_thresh\n\n if reset_scale:\n # init scale\n sigma = self.density(self.dmtet.verts)[\n 'sigma'] # verts covers [-1, 1] now\n mask = sigma > density_thresh\n valid_verts = self.dmtet.verts[mask]\n tet_scale = valid_verts.abs().amax(dim=0) + 1e-1\n self.dmtet.reset_tet_scale(tet_scale)\n sdf = (self.density(self.dmtet.verts)[\n 'sigma'] - density_thresh).clamp(-1, 1)\n return sdf" }, { "identifier": "get_encoder", "path": "encoding.py", "snippet": "def get_encoder(encoding, input_dim=3, \n multires=6, \n degree=4,\n num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False, interpolation='linear',\n **kwargs):\n\n if encoding == 'None':\n return lambda x, **kwargs: x, input_dim\n \n elif encoding == 'frequency_torch':\n encoder = FreqEncoder_torch(input_dim=input_dim, max_freq_log2=multires-1, N_freqs=multires, log_sampling=True)\n\n elif encoding == 'frequency': # CUDA implementation, faster than torch.\n from freqencoder import FreqEncoder\n encoder = FreqEncoder(input_dim=input_dim, degree=multires)\n\n elif encoding == 'sphere_harmonics':\n from shencoder import SHEncoder\n encoder = SHEncoder(input_dim=input_dim, degree=degree)\n\n elif encoding == 'hashgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='hash', align_corners=align_corners, interpolation=interpolation)\n \n elif encoding == 'tiledgrid':\n from gridencoder import GridEncoder\n # encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners, interpolation=interpolation)\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners)\n \n elif encoding == 'hashgrid_taichi':\n from taichi_modules.hash_encoder import HashEncoderTaichi\n encoder = HashEncoderTaichi(batch_size=4096) #TODO: hard encoded batch size\n\n\n else:\n raise NotImplementedError('Unknown encoding mode, choose from [None, frequency, sphere_harmonics, hashgrid, tiledgrid]')\n\n return encoder, encoder.output_dim" }, { "identifier": "safe_normalize", "path": "nerf/utils.py", "snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import tinycudann as tcnn from activation import trunc_exp, biased_softplus from .renderer import NeRFRenderer from encoding import get_encoder from .utils import safe_normalize
13,479
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "interpolation": "Smoothstep", "per_level_scale": np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)), }, dtype=torch.float32, # ENHANCE: default float16 seems unstable... ) self.in_dim = self.encoder.n_output_dims # use torch MLP, as tcnn MLP doesn't impl second-order derivative self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt...
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "interpolation": "Smoothstep", "per_level_scale": np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)), }, dtype=torch.float32, # ENHANCE: default float16 seems unstable... ) self.in_dim = self.encoder.n_output_dims # use torch MLP, as tcnn MLP doesn't impl second-order derivative self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt...
self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6)
2
2023-11-23 10:34:08+00:00
16k
alexzhou907/DreamPropeller
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,692
padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
2
2023-11-27 23:39:49+00:00
16k
CineMingle/CineMingle
Movie_Data_Capture.py
[ { "identifier": "get_data_from_json", "path": "scraper.py", "snippet": "def get_data_from_json(\n file_number: str,\n open_cc: opencc.OpenCC,\n specified_source: str, specified_url: str) -> typing.Optional[dict]:\n \n # iterate through all services and fetch the data 从网站上查询片名解析JSON返回元数据\n # :param file_number: 影片名称\n # :param open_cc: 简繁转换器\n # :param specified_source: 指定的媒体数据源\n # :param specified_url: 指定的数据查询地址, 目前未使用\n # :return 给定影片名称的具体信息\n \n try:\n actor_mapping_data = etree.parse(str(Path.home() / '.local' / 'share' / 'mdc' / 'mapping_actor.xml'))\n info_mapping_data = etree.parse(str(Path.home() / '.local' / 'share' / 'mdc' / 'mapping_info.xml'))\n except:\n actor_mapping_data = etree.fromstring(\"<html></html>\", etree.HTMLParser())\n info_mapping_data = etree.fromstring(\"<html></html>\", etree.HTMLParser())\n\n conf = config.getInstance()\n # default fetch order list, from the beginning to the end\n sources = conf.sources()\n\n # TODO 准备参数\n # - 清理 ADC_function, webcrawler\n proxies: dict = None\n config_proxy = conf.proxy()\n if config_proxy.enable:\n proxies = config_proxy.proxies()\n\n # javdb website logic\n # javdb have suffix\n javdb_sites = conf.javdb_sites().split(',')\n for i in javdb_sites:\n javdb_sites[javdb_sites.index(i)] = \"javdb\" + i\n javdb_sites.append(\"javdb\")\n # 不加载过期的cookie,javdb登录界面显示为7天免登录,故假定cookie有效期为7天\n has_valid_cookie = False\n for cj in javdb_sites:\n javdb_site = cj\n cookie_json = javdb_site + '.json'\n cookies_dict, cookies_filepath = load_cookies(cookie_json)\n if isinstance(cookies_dict, dict) and isinstance(cookies_filepath, str):\n cdays = file_modification_days(cookies_filepath)\n if cdays < 7:\n javdb_cookies = cookies_dict\n has_valid_cookie = True\n break\n elif cdays != 9999:\n print(\n f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not be used for HTTP requests.')\n if not has_valid_cookie:\n # get real random site from javdb_sites, because random is not really random when the seed value is known\n # 已经是没有这些随机数了\n # javdb_site = secrets.choice(javdb_sites)\n javdb_site = None\n javdb_cookies = None\n\n ca_cert = None\n if conf.cacert_file():\n ca_cert = conf.cacert_file()\n\n json_data = search(file_number, sources, proxies=proxies, verify=ca_cert,\n dbsite=javdb_site, dbcookies=javdb_cookies,\n morestoryline=conf.is_storyline(),\n specifiedSource=specified_source, specifiedUrl=specified_url,\n debug = conf.debug())\n # Return if data not found in all sources\n if not json_data:\n print('[-]Movie Number not found!')\n return None\n\n # 增加number严格判断,避免提交任何number,总是返回\"本橋実来 ADZ335\",这种返回number不一致的数据源故障\n # 目前选用number命名规则是javdb.com Domain Creation Date: 2013-06-19T18:34:27Z\n # 然而也可以跟进关注其它命名规则例如airav.wiki Domain Creation Date: 2019-08-28T07:18:42.0Z\n # 如果将来javdb.com命名规则下不同Studio出现同名碰撞导致无法区分,可考虑更换规则,更新相应的number分析和抓取代码。\n if str(json_data.get('number')).upper() != file_number.upper():\n try:\n if json_data.get('allow_number_change'):\n pass\n except:\n print('[-]Movie number has changed! [{}]->[{}]'.format(file_number, str(json_data.get('number'))))\n return None\n\n # ================================================网站规则添加结束================================================\n\n if json_data.get('title') == '':\n print('[-]Movie Number or Title not found!')\n return None\n\n title = json_data.get('title')\n actor_list = str(json_data.get('actor')).strip(\"[ ]\").replace(\"'\", '').split(',') # 字符串转列表\n actor_list = [actor.strip() for actor in actor_list] # 去除空白\n director = json_data.get('director')\n release = json_data.get('release')\n number = json_data.get('number')\n studio = json_data.get('studio')\n source = json_data.get('source')\n runtime = json_data.get('runtime')\n outline = json_data.get('outline')\n label = json_data.get('label')\n series = json_data.get('series')\n year = json_data.get('year')\n\n if json_data.get('cover_small'):\n cover_small = json_data.get('cover_small')\n else:\n cover_small = ''\n\n if json_data.get('trailer'):\n trailer = json_data.get('trailer')\n else:\n trailer = ''\n\n if json_data.get('extrafanart'):\n extrafanart = json_data.get('extrafanart')\n else:\n extrafanart = ''\n\n imagecut = json_data.get('imagecut')\n tag = str(json_data.get('tag')).strip(\"[ ]\").replace(\"'\", '').replace(\" \", '').split(',') # 字符串转列表 @\n while 'XXXX' in tag:\n tag.remove('XXXX')\n while 'xxx' in tag:\n tag.remove('xxx')\n if json_data['source'] =='pissplay': # pissplay actor为英文名,不用去除空格\n actor = str(actor_list).strip(\"[ ]\").replace(\"'\", '')\n else:\n actor = str(actor_list).strip(\"[ ]\").replace(\"'\", '').replace(\" \", '')\n\n # if imagecut == '3':\n # DownloadFileWithFilename()\n\n # ====================处理异常字符====================== #\\/:*?\"<>|\n actor = special_characters_replacement(actor)\n actor_list = [special_characters_replacement(a) for a in actor_list]\n title = special_characters_replacement(title)\n label = special_characters_replacement(label)\n outline = special_characters_replacement(outline)\n series = special_characters_replacement(series)\n studio = special_characters_replacement(studio)\n director = special_characters_replacement(director)\n tag = [special_characters_replacement(t) for t in tag]\n release = release.replace('/', '-')\n tmpArr = cover_small.split(',')\n if len(tmpArr) > 0:\n cover_small = tmpArr[0].strip('\\\"').strip('\\'')\n # ====================处理异常字符 END================== #\\/:*?\"<>|\n\n # 返回处理后的json_data\n json_data['title'] = title\n json_data['original_title'] = title\n json_data['actor'] = actor\n json_data['release'] = release\n json_data['cover_small'] = cover_small\n json_data['tag'] = tag\n json_data['year'] = year\n json_data['actor_list'] = actor_list\n json_data['trailer'] = trailer\n json_data['extrafanart'] = extrafanart\n json_data['label'] = label\n json_data['outline'] = outline\n json_data['series'] = series\n json_data['studio'] = studio\n json_data['director'] = director\n\n if conf.is_translate():\n translate_values = conf.translate_values().split(\",\")\n for translate_value in translate_values:\n if json_data[translate_value] == \"\":\n continue\n if translate_value == \"title\":\n title_dict = json.loads(\n (Path.home() / '.local' / 'share' / 'mdc' / 'c_number.json').read_text(encoding=\"utf-8\"))\n try:\n json_data[translate_value] = title_dict[number]\n continue\n except:\n pass\n if conf.get_translate_engine() == \"azure\":\n t = translate(\n json_data[translate_value],\n target_language=\"zh-Hans\",\n engine=conf.get_translate_engine(),\n key=conf.get_translate_key(),\n )\n else:\n if len(json_data[translate_value]):\n if type(json_data[translate_value]) == str:\n json_data[translate_value] = special_characters_replacement(json_data[translate_value])\n json_data[translate_value] = translate(json_data[translate_value])\n else:\n for i in range(len(json_data[translate_value])):\n json_data[translate_value][i] = special_characters_replacement(\n json_data[translate_value][i])\n list_in_str = \",\".join(json_data[translate_value])\n json_data[translate_value] = translate(list_in_str).split(',')\n\n if open_cc:\n cc_vars = conf.cc_convert_vars().split(\",\")\n ccm = conf.cc_convert_mode()\n\n def convert_list(mapping_data, language, vars):\n total = []\n for i in vars:\n if len(mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=f\",{i},\")) != 0:\n i = mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=f\",{i},\")[0]\n total.append(i)\n return total\n\n def convert(mapping_data, language, vars):\n if len(mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)) != 0:\n return mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)[0]\n else:\n raise IndexError('keyword not found')\n\n for cc in cc_vars:\n if json_data[cc] == \"\" or len(json_data[cc]) == 0:\n continue\n if cc == \"actor\":\n try:\n if ccm == 1:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"zh_cn\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"zh_cn\", json_data['actor'])\n elif ccm == 2:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"zh_tw\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"zh_tw\", json_data['actor'])\n elif ccm == 3:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"jp\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"jp\", json_data['actor'])\n except:\n json_data['actor_list'] = [open_cc.convert(aa) for aa in json_data['actor_list']]\n json_data['actor'] = open_cc.convert(json_data['actor'])\n elif cc == \"tag\":\n try:\n if ccm == 1:\n json_data[cc] = convert_list(info_mapping_data, \"zh_cn\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n elif ccm == 2:\n json_data[cc] = convert_list(info_mapping_data, \"zh_tw\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n elif ccm == 3:\n json_data[cc] = convert_list(info_mapping_data, \"jp\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n except:\n json_data[cc] = [open_cc.convert(t) for t in json_data[cc]]\n else:\n try:\n if ccm == 1:\n json_data[cc] = convert(info_mapping_data, \"zh_cn\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n elif ccm == 2:\n json_data[cc] = convert(info_mapping_data, \"zh_tw\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n elif ccm == 3:\n json_data[cc] = convert(info_mapping_data, \"jp\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n except IndexError:\n json_data[cc] = open_cc.convert(json_data[cc])\n except:\n pass\n\n naming_rule = \"\"\n original_naming_rule = \"\"\n for i in conf.naming_rule().split(\"+\"):\n if i not in json_data:\n naming_rule += i.strip(\"'\").strip('\"')\n original_naming_rule += i.strip(\"'\").strip('\"')\n else:\n item = json_data.get(i)\n naming_rule += item if type(item) is not list else \"&\".join(item)\n # PATCH:处理[title]存在翻译的情况,后续NFO文件的original_name只会直接沿用naming_rule,这导致original_name非原始名\n # 理应在翻译处处理 naming_rule和original_naming_rule\n if i == 'title':\n item = json_data.get('original_title')\n original_naming_rule += item if type(item) is not list else \"&\".join(item)\n\n json_data['naming_rule'] = naming_rule\n json_data['original_naming_rule'] = original_naming_rule\n return json_data" }, { "identifier": "file_modification_days", "path": "ADC_function.py", "snippet": "def file_modification_days(filename: str) -> int:\n \"\"\"\n 文件修改时间距此时的天数\n \"\"\"\n mfile = Path(filename)\n if not mfile.is_file():\n return 9999\n mtime = int(mfile.stat().st_mtime)\n now = int(time.time())\n days = int((now - mtime) / (24 * 60 * 60))\n if days < 0:\n return 9999\n return days" }, { "identifier": "get_html", "path": "ADC_function.py", "snippet": "def get_html(url, cookies: dict = None, ua: str = None, return_type: str = None, encoding: str = None, json_headers=None):\n \"\"\"\n 网页请求核心函数\n \"\"\"\n verify = config.getInstance().cacert_file()\n config_proxy = config.getInstance().proxy()\n errors = \"\"\n\n headers = {\"User-Agent\": ua or G_USER_AGENT} # noqa\n if json_headers is not None:\n headers.update(json_headers)\n\n for i in range(config_proxy.retry):\n try:\n if config_proxy.enable:\n proxies = config_proxy.proxies()\n result = requests.get(str(url), headers=headers, timeout=config_proxy.timeout, proxies=proxies,\n verify=verify,\n cookies=cookies)\n else:\n result = requests.get(str(url), headers=headers, timeout=config_proxy.timeout, cookies=cookies)\n\n if return_type == \"object\":\n return result\n elif return_type == \"content\":\n return result.content\n else:\n result.encoding = encoding or result.apparent_encoding\n return result.text\n except Exception as e:\n print(\"[-]Connect retry {}/{}\".format(i + 1, config_proxy.retry))\n errors = str(e)\n if \"getaddrinfo failed\" in errors:\n print(\"[-]Connect Failed! Please Check your proxy config\")\n debug = config.getInstance().debug()\n if debug:\n print(\"[-]\" + errors)\n else:\n print(\"[-]\" + errors)\n print('[-]Connect Failed! Please check your Proxy or Network!')\n raise Exception('Connect Failed')" }, { "identifier": "parallel_download_files", "path": "ADC_function.py", "snippet": "def parallel_download_files(dn_list: typing.Iterable[typing.Sequence], parallel: int = 0, json_headers=None):\n \"\"\"\n download files in parallel 多线程下载文件\n\n 用法示例: 2线程同时下载两个不同文件,并保存到不同路径,路径目录可未创建,但需要具备对目标目录和文件的写权限\n parallel_download_files([\n ('https://site1/img/p1.jpg', 'C:/temp/img/p1.jpg'),\n ('https://site2/cover/n1.xml', 'C:/tmp/cover/n1.xml')\n ])\n\n :dn_list: 可以是 tuple或者list: ((url1, save_fullpath1),(url2, save_fullpath2),) fullpath可以是str或Path\n :parallel: 并行下载的线程池线程数,为0则由函数自己决定\n \"\"\"\n mp_args = []\n for url, fullpath in dn_list:\n if url and isinstance(url, str) and url.startswith('http') \\\n and fullpath and isinstance(fullpath, (str, Path)) and len(str(fullpath)):\n fullpath = Path(fullpath)\n fullpath.parent.mkdir(parents=True, exist_ok=True)\n mp_args.append((url, fullpath, json_headers))\n if not len(mp_args):\n return []\n if not isinstance(parallel, int) or parallel not in range(1, 200):\n parallel = min(5, len(mp_args))\n with ThreadPoolExecutor(parallel) as pool:\n results = list(pool.map(download_one_file, mp_args))\n return results" }, { "identifier": "get_number", "path": "number_parser.py", "snippet": "def get_number(debug: bool, file_path: str) -> str:\n \"\"\"\n 从文件路径中提取番号 from number_parser import get_number\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/[脸肿字幕组][PoRO]牝教師4~穢された教壇~ 「生意気ドジっ娘女教師・美結~高飛車ハメ堕ち2濁金」[720p][x264_aac].mp4\")\n '牝教師4~穢された教壇~ 「生意気ドジっ娘女教師・美結~高飛車ハメ堕ち2濁金」'\n >>> get_number(False, \"C:¥Users¥Guest¥snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"C:¥Users¥Guest¥snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"./snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"./snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \".¥snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \".¥snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"snis-829-C.mp4\")\n 'snis-829'\n \"\"\"\n filepath = os.path.basename(file_path)\n # debug True 和 False 两块代码块合并,原因是此模块及函数只涉及字符串计算,没有IO操作,debug on时输出导致异常信息即可\n try:\n file_number = get_number_by_dict(filepath)\n if file_number:\n return file_number\n elif '字幕组' in filepath or 'SUB' in filepath.upper() or re.match(r'[\\u30a0-\\u30ff]+', filepath):\n filepath = G_spat.sub(\"\", filepath)\n filepath = re.sub(\"\\[.*?\\]\",\"\",filepath)\n filepath = filepath.replace(\".chs\", \"\").replace(\".cht\", \"\")\n file_number = str(re.findall(r'(.+?)\\.', filepath)).strip(\" [']\")\n return file_number\n elif '-' in filepath or '_' in filepath: # 普通提取番号 主要处理包含减号-和_的番号\n filepath = G_spat.sub(\"\", filepath)\n filename = str(re.sub(\"\\[\\d{4}-\\d{1,2}-\\d{1,2}\\] - \", \"\", filepath)) # 去除文件名中时间\n lower_check = filename.lower()\n if 'fc2' in lower_check:\n filename = lower_check.replace('--', '-').replace('_', '-').upper()\n filename = re.sub(\"[-_]cd\\d{1,2}\", \"\", filename, flags=re.IGNORECASE)\n if not re.search(\"-|_\", filename): # 去掉-CD1之后再无-的情况,例如n1012-CD1.wmv\n return str(re.search(r'\\w+', filename[:filename.find('.')], re.A).group())\n file_number = os.path.splitext(filename)\n print(file_number)\n filename = re.search(r'[\\w\\-_]+', filename, re.A)\n if filename:\n file_number = str(filename.group())\n else:\n file_number = file_number[0]\n file_number = re.sub(\"(-|_)c$\", \"\", file_number, flags=re.IGNORECASE)\n file_number = re.sub(\"(-|_)uc$\", \"\", file_number, flags=re.IGNORECASE)\n file_number = re.sub(\"(-|_)u$\", \"\", file_number, flags=re.IGNORECASE)\n if re.search(\"\\d+ch$\", file_number, flags=re.I):\n file_number = file_number[:-2]\n return file_number.upper()\n else: # 提取不含减号-的番号,FANZA CID\n # 欧美番号匹配规则\n oumei = re.search(r'[a-zA-Z]+\\.\\d{2}\\.\\d{2}\\.\\d{2}', filepath)\n if oumei:\n return oumei.group()\n try:\n return str(\n re.findall(r'(.+?)\\.',\n str(re.search('([^<>/\\\\\\\\|:\"\"\\\\*\\\\?]+)\\\\.\\\\w+$', filepath).group()))).strip(\n \"['']\").replace('_', '-')\n except:\n return str(re.search(r'(.+?)\\.', filepath)[0])\n except Exception as e:\n if debug:\n print(f'[-]Number Parser exception: {e} [{file_path}]')\n return None" }, { "identifier": "core_main", "path": "core.py", "snippet": "def core_main(movie_path, number_th, oCC, specified_source=None, specified_url=None):\n conf = config.getInstance()\n # =======================================================================初始化所需变量\n multi_part = False\n part = ''\n leak_word = ''\n c_word = ''\n cn_sub = False\n liuchu = False\n hack = False\n hack_word = ''\n _4k = False\n\n # 下面被注释的变量不需要\n # rootpath = os.getcwd\n number = number_th\n json_data = get_data_from_json(number, oCC, specified_source, specified_url) # 定义番号\n\n # Return if blank dict returned (data not found)\n if not json_data:\n moveFailedFolder(movie_path)\n return\n\n if json_data[\"number\"] != number:\n # fix issue #119\n # the root cause is we normalize the search id\n # print_files() will use the normalized id from website,\n # but paste_file_to_folder() still use the input raw search id\n # so the solution is: use the normalized search id\n number = json_data[\"number\"]\n imagecut = json_data.get('imagecut')\n tag = json_data.get('tag')\n # =======================================================================判断-C,-CD后缀\n if re.search('[-_]CD\\d+', movie_path, re.IGNORECASE):\n multi_part = True\n part = re.findall('[-_]CD\\d+', movie_path, re.IGNORECASE)[0].upper()\n if re.search(r'[-_]C(\\.\\w+$|-\\w+)|\\d+ch(\\.\\w+$|-\\w+)', movie_path,\n re.I) or '中文' in movie_path or '字幕' in movie_path:\n cn_sub = True\n c_word = '-C' # 中文字幕影片后缀\n\n # 判断是否无码\n unce = json_data.get('无码')\n uncensored = int(unce) if isinstance(unce, bool) else int(is_uncensored(number))\n\n if '流出' in movie_path or 'uncensored' in movie_path.lower():\n liuchu = '流出'\n leak = True\n leak_word = '-无码流出' # 流出影片后缀\n else:\n leak = False\n\n if 'hack'.upper() in str(movie_path).upper() or '破解' in movie_path:\n hack = True\n hack_word = \"-hack\"\n\n if '4k'.upper() in str(movie_path).upper() or '4k' in movie_path:\n _4k = True\n\n # 判断是否4k\n if '4K' in tag:\n tag.remove('4K') # 从tag中移除'4K'\n\n # 判断是否为无码破解\n if '无码破解' in tag:\n tag.remove('无码破解') # 从tag中移除'无码破解'\n\n # try:\n # props = get_video_properties(movie_path) # 判断是否为4K视频\n # if props['width'] >= 4096 or props['height'] >= 2160:\n # _4k = True\n # except:\n # pass\n\n # 调试模式检测\n if conf.debug():\n debug_print(json_data)\n\n # 创建文件夹\n # path = create_folder(rootpath + '/' + conf.success_folder(), json_data.get('location_rule'), json_data)\n\n cover = json_data.get('cover')\n ext = image_ext(cover)\n\n fanart_path = f\"fanart{ext}\"\n poster_path = f\"poster{ext}\"\n thumb_path = f\"thumb{ext}\"\n if config.getInstance().image_naming_with_number():\n fanart_path = f\"{number}{leak_word}{c_word}{hack_word}-fanart{ext}\"\n poster_path = f\"{number}{leak_word}{c_word}{hack_word}-poster{ext}\"\n thumb_path = f\"{number}{leak_word}{c_word}{hack_word}-thumb{ext}\"\n\n # main_mode\n # 1: 刮削模式 / Scraping mode\n # 2: 整理模式 / Organizing mode\n # 3:不改变路径刮削\n if conf.main_mode() == 1:\n # 创建文件夹\n path = create_folder(json_data)\n if multi_part == 1:\n number += part # 这时number会被附加上CD1后缀\n\n # 检查小封面, 如果image cut为3,则下载小封面\n if imagecut == 3:\n if 'headers' in json_data:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path, json_data)\n else:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path)\n\n # creatFolder会返回番号路径\n if 'headers' in json_data:\n image_download(cover, fanart_path, thumb_path, path, movie_path, json_data)\n else:\n image_download(cover, fanart_path, thumb_path, path, movie_path)\n\n if not multi_part or part.lower() == '-cd1':\n try:\n # 下载预告片\n if conf.is_trailer() and json_data.get('trailer'):\n trailer_download(json_data.get('trailer'), leak_word, c_word, hack_word, number, path, movie_path)\n\n # 下载剧照 data, path, filepath\n if conf.is_extrafanart() and json_data.get('extrafanart'):\n if 'headers' in json_data:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path, json_data)\n else:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path)\n\n # 下载演员头像 KODI .actors 目录位置\n if conf.download_actor_photo_for_kodi():\n actor_photo_download(json_data.get('actor_photo'), path, number)\n except:\n pass\n\n # 裁剪图\n cutImage(imagecut, path, thumb_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n\n # 兼容Jellyfin封面图文件名规则\n if multi_part and conf.jellyfin_multi_part_fanart():\n linkImage(path, number_th, part, leak_word, c_word, hack_word, ext)\n\n # 移动电影\n paste_file_to_folder(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n # Move subtitles\n if(conf.check_subtitles()):\n move_status = move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n if move_status:\n cn_sub = True\n # 添加水印\n if conf.is_watermark():\n add_mark(os.path.join(path, poster_path), os.path.join(path, thumb_path), cn_sub, leak, uncensored,\n hack, _4k)\n\n # 最后输出.nfo元数据文件,以完成.nfo文件创建作为任务成功标志\n print_files(path, leak_word, c_word, json_data.get('naming_rule'), part, cn_sub, json_data, movie_path, tag,\n json_data.get('actor_list'), liuchu, uncensored, hack, hack_word\n , _4k, fanart_path, poster_path, thumb_path)\n\n elif conf.main_mode() == 2:\n # 创建文件夹\n path = create_folder(json_data)\n # 移动文件\n paste_file_to_folder_mode2(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n # Move subtitles\n if(conf.check_subtitles()):\n move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n elif conf.main_mode() == 3:\n path = str(Path(movie_path).parent)\n if multi_part == 1:\n number += part # 这时number会被附加上CD1后缀\n\n # 检查小封面, 如果image cut为3,则下载小封面\n if imagecut == 3:\n if 'headers' in json_data:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path, json_data)\n else:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path)\n\n # creatFolder会返回番号路径\n if 'headers' in json_data:\n image_download(cover, fanart_path, thumb_path, path, movie_path, json_data)\n else:\n image_download(cover, fanart_path, thumb_path, path, movie_path)\n\n if not multi_part or part.lower() == '-cd1':\n try:\n # 下载预告片\n if conf.is_trailer() and json_data.get('trailer'):\n trailer_download(json_data.get('trailer'), leak_word, c_word, hack_word, number, path, movie_path)\n\n # 下载剧照 data, path, filepath\n if conf.is_extrafanart() and json_data.get('extrafanart'):\n if 'headers' in json_data:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path, json_data)\n else:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path)\n\n # 下载演员头像 KODI .actors 目录位置\n if conf.download_actor_photo_for_kodi():\n actor_photo_download(json_data.get('actor_photo'), path, number)\n except:\n pass\n\n # 裁剪图\n cutImage(imagecut, path, fanart_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n\n # 添加水印\n if conf.is_watermark():\n add_mark(os.path.join(path, poster_path), os.path.join(path, fanart_path), cn_sub, leak, uncensored, hack,\n _4k)\n\n # 兼容Jellyfin封面图文件名规则\n if multi_part and conf.jellyfin_multi_part_fanart():\n linkImage(path, number_th, part, leak_word, c_word, hack_word, ext)\n\n # 最后输出.nfo元数据文件,以完成.nfo文件创建作为任务成功标志\n print_files(path, leak_word, c_word, json_data.get('naming_rule'), part, cn_sub, json_data, movie_path,\n tag, json_data.get('actor_list'), liuchu, uncensored, hack, hack_word, _4k, fanart_path, poster_path,\n thumb_path)" }, { "identifier": "core_main_no_net_op", "path": "core.py", "snippet": "def core_main_no_net_op(movie_path, number):\n conf = config.getInstance()\n part = ''\n leak_word = ''\n leak = False\n c_word = ''\n cn_sub = False\n hack = False\n hack_word = ''\n _4k = False\n imagecut = 1\n multi = False\n part = ''\n path = str(Path(movie_path).parent)\n\n if re.search('[-_]CD\\d+', movie_path, re.IGNORECASE):\n part = re.findall('[-_]CD\\d+', movie_path, re.IGNORECASE)[0].upper()\n multi = True\n if re.search(r'[-_]C(\\.\\w+$|-\\w+)|\\d+ch(\\.\\w+$|-\\w+)', movie_path,\n re.I) or '中文' in movie_path or '字幕' in movie_path or \".chs\" in movie_path or '.cht' in movie_path:\n cn_sub = True\n c_word = '-C' # 中文字幕影片后缀\n uncensored = True if is_uncensored(number) else 0\n if '流出' in movie_path or 'uncensored' in movie_path.lower():\n leak_word = '-无码流出' # 无码流出影片后缀\n leak = True\n\n if 'hack'.upper() in str(movie_path).upper() or '破解' in movie_path:\n hack = True\n hack_word = \"-hack\"\n\n # try:\n\n # props = get_video_properties(movie_path) # 判断是否为4K视频\n # if props['width'] >= 4096 or props['height'] >= 2160:\n # _4k = True\n # except:\n # pass\n prestr = f\"{number}{leak_word}{c_word}{hack_word}\"\n\n full_nfo = Path(path) / f\"{prestr}{part}.nfo\"\n if full_nfo.is_file():\n if full_nfo.read_text(encoding='utf-8').find(r'<tag>无码</tag>') >= 0:\n uncensored = True\n try:\n nfo_xml = etree.parse(full_nfo)\n nfo_fanart_path = nfo_xml.xpath('//fanart/text()')[0]\n ext = Path(nfo_fanart_path).suffix\n except:\n return\n else:\n return\n fanart_path = f\"fanart{ext}\"\n poster_path = f\"poster{ext}\"\n thumb_path = f\"thumb{ext}\"\n if config.getInstance().image_naming_with_number():\n fanart_path = f\"{prestr}-fanart{ext}\"\n poster_path = f\"{prestr}-poster{ext}\"\n thumb_path = f\"{prestr}-thumb{ext}\"\n full_fanart_path = os.path.join(path, fanart_path)\n full_poster_path = os.path.join(path, poster_path)\n full_thumb_path = os.path.join(path, thumb_path)\n\n if not all(os.path.isfile(f) for f in (full_fanart_path, full_thumb_path)):\n return\n\n cutImage(imagecut, path, fanart_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n if conf.is_watermark():\n add_mark(full_poster_path, full_thumb_path, cn_sub, leak, uncensored, hack, _4k)\n\n if multi and conf.jellyfin_multi_part_fanart():\n linkImage(path, number, part, leak_word, c_word, hack_word, ext)" }, { "identifier": "moveFailedFolder", "path": "core.py", "snippet": "def moveFailedFolder(filepath):\n conf = config.getInstance()\n failed_folder = conf.failed_folder()\n link_mode = conf.link_mode()\n # 模式3或软连接,改为维护一个失败列表,启动扫描时加载用于排除该路径,以免反复处理\n # 原先的创建软连接到失败目录,并不直观,不方便找到失败文件位置,不如直接记录该文件路径\n if conf.main_mode() == 3 or link_mode:\n ftxt = os.path.abspath(os.path.join(failed_folder, 'failed_list.txt'))\n print(\"[-]Add to Failed List file, see '%s'\" % ftxt)\n with open(ftxt, 'a', encoding='utf-8') as flt:\n flt.write(f'{filepath}\\n')\n elif conf.failed_move() and not link_mode:\n failed_name = os.path.join(failed_folder, os.path.basename(filepath))\n mtxt = os.path.abspath(os.path.join(failed_folder, 'where_was_i_before_being_moved.txt'))\n print(\"'[-]Move to Failed output folder, see '%s'\" % mtxt)\n with open(mtxt, 'a', encoding='utf-8') as wwibbmt:\n tmstr = datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n wwibbmt.write(f'{tmstr} FROM[{filepath}]TO[{failed_name}]\\n')\n try:\n if os.path.exists(failed_name):\n print('[-]File Exists while moving to FailedFolder')\n return\n shutil.move(filepath, failed_name)\n except:\n print('[-]File Moving to FailedFolder unsuccessful!')" }, { "identifier": "debug_print", "path": "core.py", "snippet": "def debug_print(data: json):\n try:\n print(\"[+] ------- DEBUG INFO -------\")\n for i, v in data.items():\n if i == 'outline':\n print('[+] -', \"%-19s\" % i, ':', len(v), 'characters')\n continue\n if i == 'actor_photo' or i == 'year':\n continue\n if i == 'extrafanart':\n print('[+] -', \"%-19s\" % i, ':', len(v), 'links')\n continue\n print(f'[+] - {i:<{cn_space(i, 19)}} : {v}')\n\n print(\"[+] ------- DEBUG INFO -------\")\n except:\n pass" } ]
import argparse import json import os import random import re import sys import time import shutil import typing import urllib3 import signal import platform import config from datetime import datetime, timedelta from lxml import etree from pathlib import Path from opencc import OpenCC from scraper import get_data_from_json from ADC_function import file_modification_days, get_html, parallel_download_files from number_parser import get_number from core import core_main, core_main_no_net_op, moveFailedFolder, debug_print
13,407
if debug: print('[!]Skip failed movie:', absf) continue is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue number = get_number(False, f.stem) if not number: continue skip_numbers.add(number.lower()) rm_list = [] for f in total: n_number = get_number(False, os.path.basename(f)) if n_number and n_number.lower() in skip_numbers: rm_list.append(f) for f in rm_list: total.remove(f) if debug: print(f"[!]Skip file successfully processed within {nfo_skip_days} days: '{f}'") if len(rm_list): print( f"[!]Skip {len(rm_list)} movies in success folder '{success_folder}' who's .nfo modified within {nfo_skip_days} days.") return total def create_failed_folder(failed_folder: str): """ 新建failed文件夹 """ if not os.path.exists(failed_folder): try: os.makedirs(failed_folder) except: print(f"[-]Fatal error! Can not make folder '{failed_folder}'") os._exit(0) def rm_empty_folder(path): """ Recursively removes empty folders from a given path. This function is useful for cleaning up the directory structure by removing folders that no longer contain any files. :param path: The path where empty folders will be searched for and removed. """ abspath = os.path.abspath(path) deleted = set() for current_dir, subdirs, files in os.walk(abspath, topdown=False): try: still_has_subdirs = any(_ for subdir in subdirs if os.path.join(current_dir, subdir) not in deleted) if not any(files) and not still_has_subdirs and not os.path.samefile(path, current_dir): os.rmdir(current_dir) deleted.add(current_dir) print('[+]Deleting empty folder', current_dir) except: pass def create_data_and_move(movie_path: str, zero_op: bool, no_net_op: bool, oCC): """ Processes a movie file, generates necessary data, and moves the file to an appropriate directory based on the outcome. This function is central to the application's file processing logic, including scraping, organizing, and error handling. :param movie_path: Path of the movie file to be processed. :param zero_op: A boolean flag indicating whether to perform a dry run (no actual file operations). :param no_net_op: A boolean flag to indicate whether network operations are to be skipped. :param oCC: An OpenCC instance for language conversion, if required. """ # Normalized number, eg: 111xxx-222.mp4 -> xxx-222.mp4 skip_file_names = config.getInstance().skip_file_names() debug = config.getInstance().debug() n_number = get_number(debug, os.path.basename(movie_path)) movie_path = os.path.abspath(movie_path) # print(movie_path) for skip_name in skip_file_names: if skip_name in movie_path: print('[+]Skipping file:{}'.format(movie_path)) return if debug is True: print(f"[!] [{n_number}] As Number Processing for '{movie_path}'") if zero_op: return if n_number: if no_net_op: core_main_no_net_op(movie_path, n_number) else:
def check_update(local_version): """ Check for updates by comparing the local version of the application with the latest version available on GitHub. It fetches the latest release information from GitHub and compares the version numbers. If a new version is available, it prints out the update information. :param local_version: The current local version of the application. """ htmlcode = get_html("https://api.github.com/repos/CineMingle/CineMingle/releases/latest") data = json.loads(htmlcode) remote = int(data["tag_name"].replace(".", "")) local_version = int(local_version.replace(".", "")) if local_version < remote: print("[*]" + ("* New update " + str(data["tag_name"]) + " *").center(54)) print("[*]" + "↓ Download ↓".center(54)) print("[*]https://github.com/CineMingle/CineMingle/releases") print("[*]======================================================") def argparse_function(ver: str) -> typing.Tuple[str, str, str, str, bool, bool, str, str]: """ Parses command-line arguments and returns the parsed values. It sets up the argument parser with various options for the application and returns the parsed arguments and their values. It also loads configuration from a config file. :param ver: The version of the application, used for the version argument. :return: A tuple containing various parsed arguments and flags. """ conf = config.getInstance() parser = argparse.ArgumentParser(epilog=f"Load Config file '{conf.ini_path}'.") parser.add_argument("file", default='', nargs='?', help="Single Movie file path.") parser.add_argument("-p", "--path", default='movies', nargs='?', help="Analysis folder path.") parser.add_argument("-m", "--main-mode", default='', nargs='?', help="Main mode. 1:Scraping 2:Organizing 3:Scraping in analysis folder") parser.add_argument("-n", "--number", default='', nargs='?', help="Custom file number of single movie file.") # parser.add_argument("-C", "--config", default='config.ini', nargs='?', help="The config file Path.") parser.add_argument("-L", "--link-mode", default='', nargs='?', help="Create movie file link. 0:moving movie file, do not create link 1:soft link 2:try hard link first") default_logdir = str(Path.home() / '.mlogs') parser.add_argument("-o", "--log-dir", dest='logdir', default=default_logdir, nargs='?', help=f"""Duplicate stdout and stderr to logfiles in logging folder, default on. default folder for current user: '{default_logdir}'. Change default folder to an empty file, or use --log-dir= to turn log off.""") parser.add_argument("-q", "--regex-query", dest='regexstr', default='', nargs='?', help="python re module regex filepath filtering.") parser.add_argument("-d", "--nfo-skip-days", dest='days', default='', nargs='?', help="Override nfo_skip_days value in config.") parser.add_argument("-c", "--stop-counter", dest='cnt', default='', nargs='?', help="Override stop_counter value in config.") parser.add_argument("-R", "--rerun-delay", dest='delaytm', default='', nargs='?', help="Delay (eg. 1h10m30s or 60 (second)) time and rerun, until all movies proceed. Note: stop_counter value in config or -c must none zero.") parser.add_argument("-i", "--ignore-failed-list", action="store_true", help="Ignore failed list '{}'".format( os.path.join(os.path.abspath(conf.failed_folder()), 'failed_list.txt'))) parser.add_argument("-a", "--auto-exit", action="store_true", help="Auto exit after program complete") parser.add_argument("-g", "--debug", action="store_true", help="Turn on debug mode to generate diagnostic log for issue report.") parser.add_argument("-N", "--no-network-operation", action="store_true", help="No network query, do not get metadata, for cover cropping purposes, only takes effect when main mode is 3.") parser.add_argument("-w", "--website", dest='site', default='', nargs='?', help="Override [priority]website= in config.") parser.add_argument("-D", "--download-images", dest='dnimg', action="store_true", help="Override [common]download_only_missing_images=0 force invoke image downloading.") parser.add_argument("-C", "--config-override", dest='cfgcmd', action='append', nargs=1, help="Common use config override. Grammar: section:key=value[;[section:]key=value] eg. 'de:s=1' or 'debug_mode:switch=1' override[debug_mode]switch=1 Note:this parameters can be used multiple times") parser.add_argument("-z", "--zero-operation", dest='zero_op', action="store_true", help="""Only show job list of files and numbers, and **NO** actual operation is performed. It may help you correct wrong numbers before real job.""") parser.add_argument("-v", "--version", action="version", version=ver) parser.add_argument("-s", "--search", default='', nargs='?', help="Search number") parser.add_argument("-ss", "--specified-source", default='', nargs='?', help="specified Source.") parser.add_argument("-su", "--specified-url", default='', nargs='?', help="specified Url.") args = parser.parse_args() def set_natural_number_or_none(sk, value): if isinstance(value, str) and value.isnumeric() and int(value) >= 0: conf.set_override(f'{sk}={value}') def set_str_or_none(sk, value): if isinstance(value, str) and len(value): conf.set_override(f'{sk}={value}') def set_bool_or_none(sk, value): if isinstance(value, bool) and value: conf.set_override(f'{sk}=1') set_natural_number_or_none("common:main_mode", args.main_mode) set_natural_number_or_none("common:link_mode", args.link_mode) set_str_or_none("common:source_folder", args.path) set_bool_or_none("common:auto_exit", args.auto_exit) set_natural_number_or_none("common:nfo_skip_days", args.days) set_natural_number_or_none("advenced_sleep:stop_counter", args.cnt) set_bool_or_none("common:ignore_failed_list", args.ignore_failed_list) set_str_or_none("advenced_sleep:rerun_delay", args.delaytm) set_str_or_none("priority:website", args.site) if isinstance(args.dnimg, bool) and args.dnimg: conf.set_override("common:download_only_missing_images=0") set_bool_or_none("debug_mode:switch", args.debug) if isinstance(args.cfgcmd, list): for cmd in args.cfgcmd: conf.set_override(cmd[0]) no_net_op = False if conf.main_mode() == 3: no_net_op = args.no_network_operation if no_net_op: conf.set_override("advenced_sleep:stop_counter=0;advenced_sleep:rerun_delay=0s;face:aways_imagecut=1") return args.file, args.number, args.logdir, args.regexstr, args.zero_op, no_net_op, args.search, args.specified_source, args.specified_url class OutLogger(object): def __init__(self, logfile) -> None: self.term = sys.stdout self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def __del__(self): self.close() def __enter__(self): pass def __exit__(self, *args): self.close() def write(self, msg): self.term.write(msg) self.log.write(msg) def flush(self): if 'flush' in dir(self.term): self.term.flush() if 'flush' in dir(self.log): self.log.flush() if 'fileno' in dir(self.log): os.fsync(self.log.fileno()) def close(self): if self.term is not None: sys.stdout = self.term self.term = None if self.log is not None: self.log.close() self.log = None class ErrLogger(OutLogger): def __init__(self, logfile) -> None: self.term = sys.stderr self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def close(self): if self.term is not None: sys.stderr = self.term self.term = None if self.log is not None: self.log.close() self.log = None def dupe_stdout_to_logfile(logdir: str): """ Duplicates the standard output (stdout) and standard error (stderr) to log files. This function creates log files in the specified directory and redirects stdout and stderr to these files for logging purposes. :param logdir: The directory where log files will be created and saved. """ if not isinstance(logdir, str) or len(logdir) == 0: return log_dir = Path(logdir) if not log_dir.exists(): try: log_dir.mkdir(parents=True, exist_ok=True) except: pass if not log_dir.is_dir(): return # Tips for disabling logs by change directory to a same name empty regular file abslog_dir = log_dir.resolve() log_tmstr = datetime.now().strftime("%Y%m%dT%H%M%S") logfile = abslog_dir / f'mdc_{log_tmstr}.txt' errlog = abslog_dir / f'mdc_{log_tmstr}_err.txt' sys.stdout = OutLogger(logfile) sys.stderr = ErrLogger(errlog) def close_logfile(logdir: str): """ Closes the log files and restores standard output and error streams. This function is typically called at the end of the application to ensure that log files are properly closed. :param logdir: The directory where log files are saved. """ if not isinstance(logdir, str) or len(logdir) == 0 or not os.path.isdir(logdir): return # 日志关闭前保存日志路径 filepath = None try: filepath = sys.stdout.filepath except: pass sys.stdout.close() sys.stderr.close() log_dir = Path(logdir).resolve() if isinstance(filepath, Path): print(f"Log file '{filepath}' saved.") assert (filepath.parent.samefile(log_dir)) # 清理空文件 for f in log_dir.glob(r'*_err.txt'): if f.stat().st_size == 0: try: f.unlink(missing_ok=True) except: pass # 合并日志 只检测日志目录内的文本日志,忽略子目录。三天前的日志,按日合并为单个日志,三个月前的日志, # 按月合并为单个月志,去年及以前的月志,今年4月以后将之按年合并为年志 # 测试步骤: """ LOGDIR=/tmp/mlog mkdir -p $LOGDIR for f in {2016..2020}{01..12}{01..28};do;echo $f>$LOGDIR/mdc_${f}T235959.txt;done for f in {01..09}{01..28};do;echo 2021$f>$LOGDIR/mdc_2021${f}T235959.txt;done for f in {00..23};do;echo 20211001T$f>$LOGDIR/mdc_20211001T${f}5959.txt;done echo "$(ls -1 $LOGDIR|wc -l) files in $LOGDIR" # 1932 files in /tmp/mlog mdc -zgic1 -d0 -m3 -o $LOGDIR # python3 ./Movie_Data_Capture.py -zgic1 -o $LOGDIR ls $LOGDIR # rm -rf $LOGDIR """ today = datetime.today() # 第一步,合并到日。3天前的日志,文件名是同一天的合并为一份日志 for i in range(1): txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}T\d{6}$', f.stem, re.A)] if not txts or not len(txts): break e = [f for f in txts if '_err' in f.stem] txts.sort() tmstr_3_days_ago = (today.replace(hour=0) - timedelta(days=3)).strftime("%Y%m%dT99") deadline_day = f'mdc_{tmstr_3_days_ago}' day_merge = [f for f in txts if f.stem < deadline_day] if not day_merge or not len(day_merge): break cutday = len('T235959.txt') # cut length mdc_20201201|T235959.txt for f in day_merge: try: day_file_name = str(f)[:-cutday] + '.txt' # mdc_20201201.txt with open(day_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第二步,合并到月 for i in range(1): # 利用1次循环的break跳到第二步,避免大块if缩进或者使用goto语法 txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}$', f.stem, re.A)] if not txts or not len(txts): break txts.sort() tmstr_3_month_ago = (today.replace(day=1) - timedelta(days=3 * 30)).strftime("%Y%m32") deadline_month = f'mdc_{tmstr_3_month_ago}' month_merge = [f for f in txts if f.stem < deadline_month] if not month_merge or not len(month_merge): break tomonth = len('01.txt') # cut length mdc_202012|01.txt for f in month_merge: try: month_file_name = str(f)[:-tomonth] + '.txt' # mdc_202012.txt with open(month_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第三步,月合并到年 for i in range(1): if today.month < 4: break mons = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{6}$', f.stem, re.A)] if not mons or not len(mons): break mons.sort() deadline_year = f'mdc_{today.year - 1}13' year_merge = [f for f in mons if f.stem < deadline_year] if not year_merge or not len(year_merge): break toyear = len('12.txt') # cut length mdc_2020|12.txt for f in year_merge: try: year_file_name = str(f)[:-toyear] + '.txt' # mdc_2020.txt with open(year_file_name, 'a', encoding='utf-8') as y: y.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第四步,压缩年志 如果有压缩需求,请自行手工压缩,或者使用外部脚本来定时完成。推荐nongnu的lzip,对于 # 这种粒度的文本日志,压缩比是目前最好的。lzip -9的运行参数下,日志压缩比要高于xz -9,而且内存占用更少, # 多核利用率更高(plzip多线程版本),解压速度更快。压缩后的大小差不多是未压缩时的2.4%到3.7%左右, # 100MB的日志文件能缩小到3.7MB。 return filepath def signal_handler(*args): """ A signal handler function for handling operating system signals like Ctrl+C (SIGINT). It defines the behavior of the application when such signals are received, such as graceful termination. :param args: Variable argument list, used to handle signal information. """ print('[!]Ctrl+C detected, Exit.') os._exit(9) def sigdebug_handler(*args): """ A signal handler function specifically for toggling debug mode on or off. It alters the debug configuration based on certain system signals (like window size change in Unix systems). :param args: Variable argument list, used to handle signal information. """ conf = config.getInstance() conf.set_override(f"debug_mode:switch={int(not conf.debug())}") print(f"[!]Debug {('oFF', 'On')[int(conf.debug())]}") # 新增失败文件列表跳过处理,及.nfo修改天数跳过处理,提示跳过视频总数,调试模式(-g)下详细被跳过文件,跳过小广告 def movie_lists(source_folder, regexstr: str) -> typing.List[str]: """ Generates a list of movie file paths from the specified source folder. It filters files based on regular expressions and other criteria, such as file type and size. :param source_folder: The folder to scan for movie files. :param regexstr: A regular expression string to filter movie files. :return: A list of paths to the movie files that match the criteria. """ conf = config.getInstance() main_mode = conf.main_mode() debug = conf.debug() nfo_skip_days = conf.nfo_skip_days() link_mode = conf.link_mode() file_type = conf.media_type().lower().split(",") trailerRE = re.compile(r'-trailer\.', re.IGNORECASE) cliRE = None if isinstance(regexstr, str) and len(regexstr): try: cliRE = re.compile(regexstr, re.IGNORECASE) except: pass failed_list_txt_path = Path(conf.failed_folder()).resolve() / 'failed_list.txt' failed_set = set() if (main_mode == 3 or link_mode) and not conf.ignore_failed_list(): try: flist = failed_list_txt_path.read_text(encoding='utf-8').splitlines() failed_set = set(flist) if len(flist) != len(failed_set): # 检查去重并写回,但是不改变failed_list.txt内条目的先后次序,重复的只保留最后的 fset = failed_set.copy() for i in range(len(flist) - 1, -1, -1): fset.remove(flist[i]) if flist[i] in fset else flist.pop(i) failed_list_txt_path.write_text('\n'.join(flist) + '\n', encoding='utf-8') assert len(fset) == 0 and len(flist) == len(failed_set) except: pass if not Path(source_folder).is_dir(): print('[-]Source folder not found!') return [] total = [] # source = Path(source_folder).resolve() source = Path(source_folder) skip_failed_cnt, skip_nfo_days_cnt = 0, 0 escape_folder_set = set(re.split("[,,]", conf.escape_folder())) for full_name in source.glob(r'**/*'): if main_mode != 3 and set(full_name.parent.parts) & escape_folder_set: continue if not full_name.suffix.lower() in file_type: continue absf = str(full_name) if absf in failed_set: skip_failed_cnt += 1 if debug: print('[!]Skip failed movie:', absf) continue is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue number = get_number(False, f.stem) if not number: continue skip_numbers.add(number.lower()) rm_list = [] for f in total: n_number = get_number(False, os.path.basename(f)) if n_number and n_number.lower() in skip_numbers: rm_list.append(f) for f in rm_list: total.remove(f) if debug: print(f"[!]Skip file successfully processed within {nfo_skip_days} days: '{f}'") if len(rm_list): print( f"[!]Skip {len(rm_list)} movies in success folder '{success_folder}' who's .nfo modified within {nfo_skip_days} days.") return total def create_failed_folder(failed_folder: str): """ 新建failed文件夹 """ if not os.path.exists(failed_folder): try: os.makedirs(failed_folder) except: print(f"[-]Fatal error! Can not make folder '{failed_folder}'") os._exit(0) def rm_empty_folder(path): """ Recursively removes empty folders from a given path. This function is useful for cleaning up the directory structure by removing folders that no longer contain any files. :param path: The path where empty folders will be searched for and removed. """ abspath = os.path.abspath(path) deleted = set() for current_dir, subdirs, files in os.walk(abspath, topdown=False): try: still_has_subdirs = any(_ for subdir in subdirs if os.path.join(current_dir, subdir) not in deleted) if not any(files) and not still_has_subdirs and not os.path.samefile(path, current_dir): os.rmdir(current_dir) deleted.add(current_dir) print('[+]Deleting empty folder', current_dir) except: pass def create_data_and_move(movie_path: str, zero_op: bool, no_net_op: bool, oCC): """ Processes a movie file, generates necessary data, and moves the file to an appropriate directory based on the outcome. This function is central to the application's file processing logic, including scraping, organizing, and error handling. :param movie_path: Path of the movie file to be processed. :param zero_op: A boolean flag indicating whether to perform a dry run (no actual file operations). :param no_net_op: A boolean flag to indicate whether network operations are to be skipped. :param oCC: An OpenCC instance for language conversion, if required. """ # Normalized number, eg: 111xxx-222.mp4 -> xxx-222.mp4 skip_file_names = config.getInstance().skip_file_names() debug = config.getInstance().debug() n_number = get_number(debug, os.path.basename(movie_path)) movie_path = os.path.abspath(movie_path) # print(movie_path) for skip_name in skip_file_names: if skip_name in movie_path: print('[+]Skipping file:{}'.format(movie_path)) return if debug is True: print(f"[!] [{n_number}] As Number Processing for '{movie_path}'") if zero_op: return if n_number: if no_net_op: core_main_no_net_op(movie_path, n_number) else:
core_main(movie_path, n_number, oCC)
5
2023-11-25 03:16:13+00:00
16k
abdulhaim/LMRL-Gym
llm_rl_scripts/maze/mc_returns/train_mc.py
[ { "identifier": "Text", "path": "LLM_RL/environment.py", "snippet": "class Text:\nclass TextTrajectory:\nclass TextTrajectoryChain:\nclass TextEnv(ABC):\nclass BatchedTextEnv(ABC):\nclass TextEnvToBatchedTextEnv(BatchedTextEnv):\nclass BatchedTextEnvToTextEnv(TextEnv):\nclass TextPolicy(ABC):\nclass BatchedTextPolicy(ABC):\nclass TextPolicyToBatchedTextPolicy(BatchedTextPolicy):\nclass BatchedTextPolicyToTextPolicy(TextPolicy):\nclass InteractionTransition(NamedTuple):\nclass UserPolicy(TextPolicy): \nclass TokenHistory:\nclass TokenTrajectory:\nclass TokenTrajectoryChain:\n def __post_init__(self):\n def step(self, text_history: TextHistory) -> Tuple[TextHistory, float, bool]:\n def reset(self, seed: Optional[int]=None, options: Optional[Dict]=None) -> TextHistory:\n def close(self) -> None:\n def copy(self) -> TextEnv:\n def step(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[Tuple[TextHistory, float, bool]]]:\n def reset(self, seed: Optional[List[Optional[int]]]=None, options: Optional[List[Optional[Dict]]]=None) -> List[TextHistory]:\n def close(self) -> None:\n def copy(self) -> BatchedTextEnv:\n def __init__(self, env: TextEnv):\n def step(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[Tuple[TextHistory, float, bool]]]:\n def reset(self, seed: Optional[List[Optional[int]]]=None, options: Optional[List[Optional[Dict]]]=None) -> List[TextHistory]:\n def close(self) -> None:\n def __init__(self, env: BatchedTextEnv):\n def step(self, text_history: TextHistory) -> Tuple[TextHistory, float, bool]:\n def reset(self, seed: Optional[int]=None, options: Optional[Dict]=None) -> TextHistory:\n def close(self) -> None:\n def act(self, text_history: TextHistory) -> TextHistory:\n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n def __init__(self, policy: TextPolicy):\n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n def __init__(self, policy: BatchedTextPolicy):\n def act(self, text_history: TextHistory) -> TextHistory:\ndef interact_environment(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n initial_text_history: Optional[Union[TextHistory, List[TextHistory]]]=None, \n env_seed: Union[Optional[int], Optional[List[Optional[int]]]]=None, \n env_options: Union[Optional[Dict], Optional[List[Optional[int]]]]=None, \n bsize: int=1, \n npad: int=0,\n) -> List[List[InteractionTransition]]:\ndef text_env_eval(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n n_rollouts: int, \n initial_text_history: Optional[TextHistory]=None, # only allow one initial_text_history here\n seed_generator: Optional[Iterator[int]]=None, \n env_options: Optional[Dict]=None, # only allow one env_options here\n interaction_callback: Optional[Callable[[List[Tuple[TextHistory, TextHistory, TextHistory, float, bool]]], None]]=None, \n bsize: int=1, \n verbose: bool=True, \n) -> Tuple[List[List[InteractionTransition]], Dict[str, Any]]:\n def __init__(\n self, \n initial_str: str, \n postproc_print_f: Optional[Callable[[str], str]]=None, \n postproc_action_f: Optional[Callable[[str], str]]=None, \n ):\n def act(self, text_history: TextHistory) -> TextHistory:\n def __post_init__(self):\n def from_text_history(\n cls, \n text_history: TextHistory, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenHistory:\n def __post_init__(self):\n def from_text_trajectory(\n cls, \n text_trajectory: TextTrajectory, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenTrajectory:\n def __post_init__(self):\n def to_list(self) -> List[TokenTrajectory]:\n def from_text_trajectory_chain(\n cls, \n text_trajectory_chain: TextTrajectoryChain, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenTrajectoryChain:" }, { "identifier": "MCData", "path": "LLM_RL/algorithms/mc_returns/data.py", "snippet": "class MCData(NamedTuple):\n input_ids: np.ndarray # [t]\n should_take_action: np.ndarray # [t-1]\n returns: np.ndarray # [t-1]\n\n @staticmethod\n def block(\n data: List[MCData], \n blocking_strategy: BlockingStrategy, \n tokenizer: PreTrainedTokenizerBase, \n ) -> Dict[str, np.ndarray]:\n return dict(\n input_ids=block_sequences(\n list(map(lambda x: x.input_ids, data)), \n tokenizer.pad_token_id, \n dtype=np.int32, \n blocking_strategy=blocking_strategy, \n ), \n should_take_action=block_sequences(\n list(map(lambda x: x.should_take_action, data)), \n False, \n dtype=np.bool_, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n returns=block_sequences(\n list(map(lambda x: x.returns, data)), \n 0.0, \n dtype=np.float32, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n )\n \n @classmethod\n def from_token_trajectory_chain(\n cls, \n token_trajectory_chain: TokenTrajectoryChain, \n gamma: float, \n ):\n filtered_rewards_chain = []\n should_take_action_chain = []\n for token_trajectory in token_trajectory_chain.to_list():\n should_take_action = token_trajectory.is_action[1:]\n rewards = token_trajectory.reward[1:]\n filtered_rewards = rewards[should_take_action]\n filtered_rewards_chain.append(filtered_rewards)\n should_take_action_chain.append(should_take_action)\n filtered_rewards_chain = np.concatenate(filtered_rewards_chain, axis=0)\n should_take_action_chain = np.concatenate(should_take_action_chain, axis=0)\n \n rtgs_sequence = get_rtg(filtered_rewards_chain, gamma=gamma)\n \n should_take_action = token_trajectory_chain.token_trajectory.is_action[1:]\n returns = np.zeros_like(should_take_action, dtype=np.float32)\n returns[should_take_action] = rtgs_sequence[:should_take_action.sum()]\n return cls(\n input_ids=token_trajectory_chain.token_trajectory.tokens, \n should_take_action=should_take_action, \n returns=returns, \n )" }, { "identifier": "MCDataset", "path": "LLM_RL/algorithms/mc_returns/data.py", "snippet": "class MCDataset(Dataset):\n def __init__(\n self, \n input_ids: np.ndarray, # [b, t]\n should_take_action: np.ndarray, # [b, t-1]\n returns: np.ndarray, # [b, t-1]\n ):\n assert input_ids.shape[1] == (should_take_action.shape[1]+1)\n assert input_ids.shape[1] == (returns.shape[1]+1)\n\n assert input_ids.shape[0] == should_take_action.shape[0]\n assert input_ids.shape[0] == returns.shape[0]\n\n self.input_ids = input_ids\n self.should_take_action = should_take_action\n self.returns = returns\n \n def __getitem__(self, index):\n return {\n 'input_ids': jnp.asarray(self.input_ids[index], dtype=jnp.int32), \n 'should_take_action': jnp.asarray(self.should_take_action[index], dtype=jnp.bool_), \n 'returns': jnp.asarray(self.returns[index], dtype=jnp.float32), \n }\n \n def __len__(self):\n return self.input_ids.shape[0]\n \n @classmethod\n def from_mc_data_list(\n cls, \n mc_data_list: List[MCData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> MCDataset:\n \n data = MCData.block(mc_data_list, blocking_strategy, tokenizer)\n\n return cls(**data)" }, { "identifier": "GPT2ValuePolicy", "path": "LLM_RL/algorithms/value_rl_base/gpt2/interface.py", "snippet": "class GPT2ValuePolicy(ValueRLPolicy):\n def __init__(\n self, \n inference: ValueRLInference, \n prng_key: Optional[jax.random.KeyArray], \n generation_config: Optional[GenerationConfig]=None, \n blocking_strategy: BlockingStrategy=BlockingStrategy(padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=None), \n in_str_process: Optional[Callable[[str], str]]=None, \n out_str_process: Optional[Callable[[str], str]]=None, \n input_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n target_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n trace: bool=True, \n ):\n self.inference = inference\n self.prng_key = prng_key\n self.generation_config = generation_config\n self.blocking_strategy = blocking_strategy\n self.in_str_process = in_str_process\n self.out_str_process = out_str_process\n self.input_token_process = input_token_process\n self.target_token_process = target_token_process\n if self.in_str_process is None:\n self.in_str_process = lambda x: x\n if self.out_str_process is None:\n self.out_str_process = lambda x: x\n self.trace = trace\n \n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n if done is None:\n done = [False]*len(text_history)\n # force eos_token for done sequences\n eos_token = self.inference.tokenizer.eos_token\n if self.generation_config is not None and self.generation_config.eos_token_id is not None:\n eos_token = self.inference.tokenizer.decode(self.generation_config.eos_token_id)\n if eos_token is None:\n eos_token = self.inference.tokenizer.pad_token\n if eos_token is None:\n eos_token = ''\n \n raw_input_strs = [\n eos_token if d else self.in_str_process(text_history_to_str(item)) \\\n for item, d in zip(text_history, done)\n ]\n\n new_key = None\n if self.prng_key is not None:\n self.prng_key, new_key = jax.random.split(self.prng_key)\n model_outputs = self.inference.generate_from_str(\n input_strs=raw_input_strs, \n prng_key=new_key, \n blocking_strategy=self.blocking_strategy, \n generation_config=self.generation_config, \n input_token_process=self.input_token_process, \n target_token_process=self.target_token_process, \n trace=self.trace, \n )\n\n raw_output_strs = model_outputs.output_strs\n output_strs = [\n \"\" if d else self.out_str_process(strip_prompt_from_completion(raw_input_str, raw_output_str)) \\\n for raw_input_str, raw_output_str, d in zip(raw_input_strs, raw_output_strs, done)\n ]\n\n return [\n None if d else text_history_item+(Text(output_str, True),) \\\n for text_history_item, output_str, d in zip(text_history, output_strs, done)\n ]\n \n def set_params(self, policy_params: PyTree) -> None:\n pi_beta_params, base_params, \\\n q1_head_params, q2_head_params = policy_params\n self.inference = self.inference.replace(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q1_head_params, \n q2_head_params=q2_head_params, \n )" }, { "identifier": "load_train_state_from_config", "path": "LLM_RL/heads/mlp_head.py", "snippet": "def load_train_state_from_config(\n model_config: MLPHeadConfig, \n model_dtype: Union[str, jnp.dtype], \n optim_getter: Callable[[PyTree], optax.GradientTransformation], \n mesh: Mesh, # should be shape (dp, mp)\n prng_key: jax.random.PRNGKeyArray, \n pad_to_output_dim: Optional[int]=None, \n params_dtype: Optional[Union[str, jnp.dtype]]=jnp.float32, \n) -> Tuple[TrainState, MLPHead]:\n \n model = MLPHead(model_config, dtype=model_dtype)\n model.config.mesh = mesh\n # shard params\n params = freeze(shard_params_from_config(model, prng_key, params_dtype=params_dtype))\n # pad outputs\n if pad_to_output_dim is not None:\n params = freeze(pad_outputs(unfreeze(params), model, pad_to_output_dim, dtype=params_dtype))\n # shard train_state\n train_state = shard_train_state_from_params(model, params, optim_getter(params))\n\n return train_state, model" }, { "identifier": "MLPHeadConfig", "path": "LLM_RL/heads/mlp_head.py", "snippet": "class MLPHeadConfig(HeadConfig):\n def __init__(\n self, \n input_dim: int, \n hidden_dim: int, \n output_dim: int, \n use_bias: bool=True, \n unpadded_output_dim: Optional[int]=None, \n layer1_initializer_range: Optional[int]=None, \n layer1_bias_init: Optional[float]=None, \n layer2_initializer_range: Optional[int]=None, \n layer2_bias_init: Optional[float]=None, \n mesh: Optional[jax.sharding.Mesh]=None, \n ) -> None:\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.output_dim = output_dim\n self.use_bias = use_bias\n self.layer1_initializer_range = layer1_initializer_range\n self.layer1_bias_init = layer1_bias_init\n self.layer2_initializer_range = layer2_initializer_range\n self.layer2_bias_init = layer2_bias_init\n self.mesh = mesh\n self.unpadded_output_dim = unpadded_output_dim\n if self.unpadded_output_dim is None:\n self.unpadded_output_dim = self.output_dim\n super().__init__()\n \n @staticmethod\n def get_partition_rules():\n return [\n (re.escape(\"['dense1']['kernel']\"), PS(\"fsdp\", \"mp\")), \n (re.escape(\"['dense1']['bias']\"), PS(\"mp\")), \n (re.escape(\"['dense2']['kernel']\"), PS(\"mp\", \"fsdp\")), \n (re.escape(\"['dense2']['bias']\"), PS()), \n ]\n\n def to_dict(self) -> Dict[str, Any]:\n if self.mesh is None:\n return super().to_dict()\n else:\n new_conf = MLPHeadConfig(**self.__dict__)\n new_conf.mesh = None\n return new_conf.to_dict()" }, { "identifier": "GPT2MCTrain", "path": "LLM_RL/algorithms/mc_returns/gpt2/interface.py", "snippet": "class GPT2MCTrain(MCTrain):\n @classmethod\n def load_train(\n cls, \n base_train_state: TrainState, \n q_head_train_state: TrainState, \n base_model: FlaxPreTrainedModel, \n q_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Callable, \n detach_q: bool, \n ):\n mesh = base_model.config.mesh\n assert mesh is not None\n assert mesh == q_head_model.config.mesh\n base_train_state_partition_spec = match_partition_rules(base_model.config.get_partition_rules(), base_train_state)\n q_head_train_state_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q_head_train_state)\n\n @partial(\n pjit, \n donate_argnums=(0, 1), \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q_head_train_state_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q_head_train_state_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _step(\n base_train_state: TrainState, \n q_head_train_state: TrainState, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n returns: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray], \n train: bool=True, \n ) -> Tuple[TrainState, Optional[PyTree], TrainState, TrainState, TrainState, PyTree, PyTree, jax.Array, PyTree]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS(('dp', 'fsdp'), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS(('dp', 'fsdp'), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS(('dp', 'fsdp'), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS(('dp', 'fsdp'), None))\n returns = with_named_sharding_constraint(returns, mesh, PS(('dp', 'fsdp'), None))\n\n # define loss function\n\n def grad_loss(base_params: PyTree, q_head_params: PyTree, prng_key: jax.random.PRNGKeyArray):\n \n # get base hidden states\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_model_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n \n # get values\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q_head_output = q_head_model.apply(\n {'params': q_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n # stop gradients\n if detach_q:\n q_head_output = jax.lax.stop_gradient(q_head_output)\n\n q = jnp.take_along_axis(q_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n q_logits = q_head_output[:, :-1, :].astype(jnp.float32)\n\n loss, info = loss_fn(\n q, \n q_logits, \n input_ids[:, 1:], \n attention_mask[:, 1:], \n should_take_action, \n returns, \n )\n return loss, info\n\n # take loss\n (loss, info), (base_grads, q_head_grads) = jax.value_and_grad(grad_loss, has_aux=True, argnums=(0, 1))(\n base_train_state.params, \n q_head_train_state.params, \n prng_key, \n )\n # assert shard gradients\n base_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n base_grads, \n base_train_state_partition_spec.params, \n )\n q_head_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n q_head_grads, \n q_head_train_state_partition_spec.params, \n )\n # update params and optim state\n base_train_state = base_train_state.apply_gradients(grads=base_grads)\n q_head_train_state = q_head_train_state.apply_gradients(grads=q_head_grads)\n\n return base_train_state, q_head_train_state, loss, info\n\n return cls(\n base_train_state=base_train_state, \n q_head_train_state=q_head_train_state, \n base_model=base_model, \n q_head_model=q_head_model, \n tokenizer=tokenizer, \n _step=_step, \n )" }, { "identifier": "GPT2MCInference", "path": "LLM_RL/algorithms/mc_returns/gpt2/interface.py", "snippet": "class GPT2MCInference(MCInference):\n @classmethod\n def load_inference(\n cls, \n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q_head_params: PyTree, \n pi_beta_model: Optional[FlaxPreTrainedModel], \n base_model: FlaxPreTrainedModel, \n q_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Callable, \n beta: float=0.0, \n dp_shard_logits: bool=True, \n ):\n mesh = base_model.config.mesh\n assert mesh is not None\n assert mesh == q_head_model.config.mesh\n\n value_inference = GPT2ValueRLInference.load_inference(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q_head_params, \n q2_head_params=None, \n v_head_params=None, \n pi_beta_model=pi_beta_model, \n base_model=base_model, \n q_head_model=q_head_model, \n v_head_model=None, \n tokenizer=tokenizer, \n beta=beta, \n dp_shard_logits=dp_shard_logits, \n )\n\n base_params_partition_spec = match_partition_rules(base_model.config.get_partition_rules(), base_params)\n q_head_params_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q_head_params)\n\n @partial(\n pjit, \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _eval_loss(\n base_params: TrainState, \n q_head_params: TrainState, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n returns: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray], \n train: bool=True, \n ):\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS(('dp', 'fsdp'), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS(('dp', 'fsdp'), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS(('dp', 'fsdp'), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS(('dp', 'fsdp'), None))\n returns = with_named_sharding_constraint(returns, mesh, PS(('dp', 'fsdp'), None))\n\n # get base hidden states\n \n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_model_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n \n # get values\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q_head_output = q_head_model.apply(\n {'params': q_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n q = jnp.take_along_axis(q_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n q_logits = q_head_output[:, :-1, :].astype(jnp.float32)\n\n loss, info = loss_fn(\n q, \n q_logits, \n input_ids[:, 1:], \n attention_mask[:, 1:], \n should_take_action, \n returns, \n )\n\n return loss, info\n \n return cls(\n pi_beta_params=value_inference.pi_beta_params, \n base_params=value_inference.base_params, \n q1_head_params=value_inference.q1_head_params, \n q2_head_params=value_inference.q2_head_params, \n v_head_params=value_inference.v_head_params, \n pi_beta_model=value_inference.pi_beta_model, \n base_model=value_inference.base_model, \n q_head_model=value_inference.q_head_model, \n v_head_model=value_inference.v_head_model, \n tokenizer=value_inference.tokenizer, \n _generate=value_inference._generate, \n _forward=value_inference._forward, \n _eval_loss=_eval_loss, \n )" }, { "identifier": "setup_maze_env", "path": "llm_rl_scripts/maze/env/maze_utils.py", "snippet": "def setup_maze_env(maze_name, describe_function, reward_function=None, last_k=1, max_steps=100):\n # setup environment\n if maze_name == 'umaze':\n maze = maze2d_umaze()\n valid_goals = np.array([[3, 3]])\n start_position = (3, 1)\n elif maze_name == \"double_t_maze\":\n maze = double_t_maze()\n valid_goals = np.array([[8, 6]])\n start_position = (1, 1)\n else:\n raise ValueError(f'unknown maze name: {maze_name}')\n \n # valid_goals = np.where(maze == 0)\n # valid_goals = np.array(list(zip(valid_goals[0], valid_goals[1])), dtype=np.int32)\n if describe_function == \"describe_observation\":\n describe_function = describe_observation\n elif describe_function == \"describe_observation_give_position\":\n describe_function = describe_observation_give_position\n elif describe_function == \"describe_observation_only_walls\":\n describe_function = describe_observation_only_walls\n else:\n raise ValueError(f'unknown describe function: {describe_function}')\n \n if reward_function is None or reward_function == \"standard_reward\":\n reward_function = standard_reward\n elif reward_function == \"illegal_penalty_reward\":\n reward_function = illegal_penalty_reward\n elif reward_function == \"illegal_penalty_diff_scale\":\n reward_function = illegal_penalty_diff_scale\n else:\n raise ValueError(f'unknown reward function: {reward_function}')\n \n env = MazeEnv(\n maze=maze, \n valid_goals=valid_goals, \n actions=manhatten_actions, \n max_steps=max_steps, \n display_initial_position=True,\n describe_function=describe_function,\n reward_function=reward_function,\n last_k=last_k,\n )\n return env" }, { "identifier": "pick_start_position", "path": "llm_rl_scripts/maze/env/maze_utils.py", "snippet": "def pick_start_position(maze_name):\n if maze_name == 'umaze':\n return (3, 1)\n elif maze_name == \"double_t_maze\":\n return (1, 1)\n else:\n raise ValueError(f'unknown maze name: {maze_name}')" }, { "identifier": "double_t_maze_optimal_directions", "path": "llm_rl_scripts/maze/env/mazes.py", "snippet": "def double_t_maze_optimal_directions():\n dct = {\n (1, 1): \"move right\\n\",\n (1, 2): \"move right\\n\",\n (1, 3): \"move down\\n\",\n (1, 4): \"move left\\n\",\n (1, 5): \"move left\\n\",\n (1, 7): \"move right\\n\",\n (1, 8): \"move right\\n\",\n (1, 9): \"move down\\n\",\n (1, 10): \"move left\\n\",\n (1, 11): \"move left\\n\",\n (2, 3): \"move down\\n\",\n (3, 3): \"move down\\n\",\n (4, 3): \"move down\\n\",\n (5, 3): \"move right\\n\",\n (5, 4): \"move right\\n\",\n (5, 5): \"move right\\n\",\n (5, 6): \"move down\\n\",\n (6, 6): \"move down\\n\",\n (7, 6): \"move down\\n\",\n (5, 7): \"move left\\n\",\n (5, 8): \"move left\\n\",\n (5, 9): \"move left\\n\",\n (4, 9): \"move down\\n\",\n (3, 9): \"move down\\n\",\n (2, 9): \"move down\\n\",\n }\n return dct" }, { "identifier": "double_t_maze", "path": "llm_rl_scripts/maze/env/mazes.py", "snippet": "def double_t_maze():\n x = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], \n [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], \n [1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1], \n [1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1], \n [1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1], \n [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], \n [1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1], \n [1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1], \n [1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1], \n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=np.uint8)\n \n return x" }, { "identifier": "describe_observation_give_position", "path": "llm_rl_scripts/maze/env/env.py", "snippet": "def describe_observation_give_position(maze:np.ndarray,\n position: Tuple[int, int],\n goal_position: Tuple[int, int],\n initial_position: Tuple[int, int]=None,\n move_history: List[str]=None,\n ) -> str:\n goal_description = f\"The goal is at position {' '.join(str(goal_position[0]))}, {' '.join(str(goal_position[1]))}.\"\n curr_position_description = f\"Your current position is at position {' '.join(str(position[0]))}, {' '.join(str(position[1]))}.\"\n delta_descriptions = {\"to your right\": (0, 1), \"to your left\": (0, -1), \"above you\": (-1, 0), \"below you\": (1, 0)} \n\n walls = []\n for k, (dy, dx) in delta_descriptions.items():\n if maze[position[0]+dy, position[1]+dx] == 1:\n walls.append(k)\n \n wall_description = describe_objects(\"wall\", walls)\n \n return f\"{goal_description} {curr_position_description} {wall_description}\\n\"" }, { "identifier": "maze_proposal_function", "path": "llm_rl_scripts/maze/env/env.py", "snippet": "def maze_proposal_function(text_history: TextHistory) -> List[TextHistory]:\n return [text_history+(Text(action, True),) for action in manhatten_actions.keys()]" }, { "identifier": "ReRankerPolicy", "path": "LLM_RL/algorithms/ppo/reranker_policy.py", "snippet": "class ReRankerPolicy(TextPolicy):\n \n def __init__(self, proposal_fn: Callable[[TextHistory], List[TextHistory]], score_fn: Callable[[List[TextHistory]], List[float]]):\n self.proposal_fn = proposal_fn\n self.score_fn = score_fn\n\n def act(self, text_history: TextHistory) -> TextHistory:\n proposals = self.proposal_fn(text_history)\n scores = self.score_fn(proposals)\n\n return proposals[np.argmax(np.asarray(scores, dtype=np.float32)).item()]" }, { "identifier": "ReRankerSamplePolicy", "path": "LLM_RL/algorithms/ppo/reranker_policy.py", "snippet": "class ReRankerSamplePolicy(TextPolicy):\n \n def __init__(self, proposal_fn, score_fn: Callable[[List[TextHistory]], List[float]]):\n self.proposal_fn = proposal_fn\n self.score_fn = score_fn\n \n def act(self, text_history: TextHistory) -> TextHistory:\n proposals = self.proposal_fn(text_history)\n scores = np.asarray(self.score_fn(proposals), dtype=np.float32)\n # sample from scores\n scores = np.exp(scores) / np.exp(scores).sum()\n selected = np.random.choice(len(scores), p=scores)\n # # zip proposals and scores together\n # proposals_and_scores = list(zip(proposals, scores))\n # print(proposals_and_scores)\n return proposals[selected]" }, { "identifier": "mc_loss", "path": "LLM_RL/algorithms/mc_returns/base_interface.py", "snippet": "def mc_loss(\n q: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n q_logits: jax.Array, # [batch, time-1, vocab] output is masked; shift x[:-1]\n token_ids: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n attention_mask: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n should_take_action: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n returns: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n *, \n cql_weight: Union[float, jax.Array], \n) -> Tuple[jnp.ndarray, Any]:\n # should be an action in the batch\n mask = should_take_action.astype(jnp.float32) * attention_mask\n n = mask.sum()\n q_query_indicators = get_query_indicators(should_take_action.reshape(-1))\n \n # extract selected values\n qsa_selected = (q_query_indicators * q.reshape(-1)).sum(axis=1)\n returns_selected = (q_query_indicators * returns.reshape(-1)).sum(axis=1)\n\n # get masks for selected values\n a_mask = (q_query_indicators.sum(axis=1) > 0).astype(jnp.float32)\n\n # compute q loss\n q_loss = (optax.l2_loss(qsa_selected, jax.lax.stop_gradient(returns_selected)) * a_mask).sum() / n\n\n # compute cql loss on both q heads\n q_cql_loss = optax.softmax_cross_entropy_with_integer_labels(q_logits, token_ids)\n q_cql_loss = (mask * q_cql_loss).sum() / n\n \n loss = q_loss + cql_weight * q_cql_loss\n\n logs = dict(\n losses=dict(\n total_loss=loss, \n q_loss=q_loss, \n q_cql_loss=q_cql_loss, \n ), \n q=get_tensor_stats(qsa_selected, mask=a_mask, n=n), \n returns=get_tensor_stats(returns_selected, mask=a_mask, n=n), \n )\n\n return loss, logs" }, { "identifier": "train_loop", "path": "LLM_RL/algorithms/mc_returns/train.py", "snippet": "def train_loop(\n trainer: MCTrain, \n inference: Union[ValueRLInference, MCInference], \n evaluator: Optional[Callable[[Inference], Tuple[float, Dict[str, Any]]]], \n dataset: Union[Seq2SeqDataset, Seq2SeqIterableDataset], \n prng_key: KeyArray, \n save_dir: Optional[str], \n epochs: int, \n max_steps: Optional[int], \n bsize: int, \n log_every: int, \n eval_every_steps: Optional[int], \n eval_every_epochs: Optional[int], \n eval_at_beginning: bool, \n eval_at_end: bool, \n save_every_steps: Optional[int], \n save_every_epochs: Optional[int], \n save_at_beginning: bool, \n save_at_end: bool, \n save_best: bool, \n max_checkpoints: Optional[int], \n save_train_state: bool, \n save_dtype: jnp.dtype, \n use_wandb: bool, \n wandb_project: Optional[str], \n wandb_run_name: Optional[str], \n wandb_config: Optional[Dict[str, Any]], \n is_main_process: Optional[bool]=None, \n **loop_state: Dict[Hashable, Any], \n) -> Tuple[Train, Inference]:\n assert (not use_wandb) or (use_wandb and wandb_project is not None)\n if is_main_process is None:\n is_main_process = jax.process_index() == 0\n \n # initalize wandb\n wandb_id = loop_state.get('wandb_id', None)\n if use_wandb and is_main_process:\n if wandb_id is None:\n wandb_id = wandb.util.generate_id()\n wandb.init(\n project=wandb_project, \n id=wandb_id, \n name=wandb_run_name, \n config=wandb_config, \n reinit=True, \n resume=\"allow\", \n )\n\n # initalize training loop state\n train_logs = []\n best_perf = loop_state.get('best_perf', float('inf'))\n saved_checkpoints = loop_state.get('saved_checkpoints', deque([]))\n step = 0\n steps_per_epoch = len(dataset) // bsize if isinstance(dataset, Dataset) else None\n if 'steps_per_epoch' in loop_state:\n assert steps_per_epoch == loop_state['steps_per_epoch'], 'loop_state steps_per_epoch does not match dataset steps_per_epoch'\n epoch = -1\n\n def _save(\n name: str, \n add_to_queue: bool, \n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal saved_checkpoints\n print(f'saving checkpoint {name} ...')\n # conditionally delete old checkpoints\n if add_to_queue and is_main_process:\n if (max_checkpoints is not None) and (len(saved_checkpoints) >= max_checkpoints):\n delete(saved_checkpoints.popleft(), recursive=True)\n curr_save_dir = os.path.join(save_dir, name)\n if is_main_process:\n create_path(curr_save_dir)\n dump_state(\n base_model=trainer.base_model, \n q_head_model=trainer.q_head_model, \n base_train_state=trainer.base_train_state, \n q_head_train_state=trainer.q_head_train_state, \n save_dir=curr_save_dir, \n save_train_state=save_train_state, \n enable_save=is_main_process, \n save_dtype=save_dtype, \n **loop_state, \n )\n if add_to_queue and is_main_process:\n saved_checkpoints.append(curr_save_dir)\n print('saved.')\n \n def _inference_update():\n nonlocal inference\n inference = inference.replace(\n base_params=trainer.base_train_state.params, \n q1_head_params=trainer.q_head_train_state.params, \n )\n \n def _eval(\n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal best_perf\n # get eval logs\n _inference_update()\n eval_perf, eval_logs = evaluator(inference)\n\n # publish eval logs\n eval_logs = pull_logs(label_logs(eval_logs, 'eval', {'step': step+1, 'epoch': epoch}))\n log(eval_logs, use_wandb and is_main_process)\n\n # conditionally save best model and optimizer state\n if save_dir is not None and save_best and eval_perf < best_perf:\n print('new best model!')\n best_perf = eval_perf\n _save(\n name='best', \n add_to_queue=False, \n **{**loop_state, 'best_perf': best_perf}, \n )\n \n # begin evaluation\n if evaluator is not None and eval_at_beginning:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save initial checkpoint\n if save_dir is not None and save_at_beginning:\n _save(\n name='initial', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # begin training loop\n for epoch in tqdm(range(epochs)):\n prng_key, new_prng = jax.random.split(prng_key)\n d = dataloader(new_prng, dataset, bsize, truncate=True)\n for batch in tqdm(d, total=steps_per_epoch):\n \n # step model and get training logs\n prng_key, new_prng = jax.random.split(prng_key)\n if 'step' in loop_state and step < loop_state['step']:\n step += 1\n continue\n trainer, _, info = trainer.step(\n **batch, \n prng_key=new_prng, \n train=True, \n )\n train_logs.append(info)\n \n # publish training logs and clear logs\n if (step + 1) % log_every == 0:\n logs = combine_logs(train_logs)\n logs = pull_logs(label_logs(logs, 'train', {'step': step+1, 'epoch': epoch}))\n log(logs, use_wandb and is_main_process)\n train_logs = []\n \n # begin evaluation\n if evaluator is not None and eval_every_steps is not None and (step + 1) % eval_every_steps == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_steps is not None and (step + 1) % save_every_steps == 0:\n _save(\n name=f'step_{step+1}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n step += 1\n\n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_every_epochs is not None and (epoch + 1) % eval_every_epochs == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_epochs is not None and (epoch + 1) % save_every_epochs == 0:\n _save(\n name=f'epoch_{epoch}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_at_end:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save final checkpoint\n if save_dir is not None and save_at_end:\n _save(\n name='last', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n # stop wandb\n if use_wandb and is_main_process:\n wandb.finish()\n _inference_update()\n return trainer, inference" }, { "identifier": "MCData", "path": "LLM_RL/algorithms/mc_returns/data.py", "snippet": "class MCData(NamedTuple):\n input_ids: np.ndarray # [t]\n should_take_action: np.ndarray # [t-1]\n returns: np.ndarray # [t-1]\n\n @staticmethod\n def block(\n data: List[MCData], \n blocking_strategy: BlockingStrategy, \n tokenizer: PreTrainedTokenizerBase, \n ) -> Dict[str, np.ndarray]:\n return dict(\n input_ids=block_sequences(\n list(map(lambda x: x.input_ids, data)), \n tokenizer.pad_token_id, \n dtype=np.int32, \n blocking_strategy=blocking_strategy, \n ), \n should_take_action=block_sequences(\n list(map(lambda x: x.should_take_action, data)), \n False, \n dtype=np.bool_, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n returns=block_sequences(\n list(map(lambda x: x.returns, data)), \n 0.0, \n dtype=np.float32, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n )\n \n @classmethod\n def from_token_trajectory_chain(\n cls, \n token_trajectory_chain: TokenTrajectoryChain, \n gamma: float, \n ):\n filtered_rewards_chain = []\n should_take_action_chain = []\n for token_trajectory in token_trajectory_chain.to_list():\n should_take_action = token_trajectory.is_action[1:]\n rewards = token_trajectory.reward[1:]\n filtered_rewards = rewards[should_take_action]\n filtered_rewards_chain.append(filtered_rewards)\n should_take_action_chain.append(should_take_action)\n filtered_rewards_chain = np.concatenate(filtered_rewards_chain, axis=0)\n should_take_action_chain = np.concatenate(should_take_action_chain, axis=0)\n \n rtgs_sequence = get_rtg(filtered_rewards_chain, gamma=gamma)\n \n should_take_action = token_trajectory_chain.token_trajectory.is_action[1:]\n returns = np.zeros_like(should_take_action, dtype=np.float32)\n returns[should_take_action] = rtgs_sequence[:should_take_action.sum()]\n return cls(\n input_ids=token_trajectory_chain.token_trajectory.tokens, \n should_take_action=should_take_action, \n returns=returns, \n )" }, { "identifier": "MCDataset", "path": "LLM_RL/algorithms/mc_returns/data.py", "snippet": "class MCDataset(Dataset):\n def __init__(\n self, \n input_ids: np.ndarray, # [b, t]\n should_take_action: np.ndarray, # [b, t-1]\n returns: np.ndarray, # [b, t-1]\n ):\n assert input_ids.shape[1] == (should_take_action.shape[1]+1)\n assert input_ids.shape[1] == (returns.shape[1]+1)\n\n assert input_ids.shape[0] == should_take_action.shape[0]\n assert input_ids.shape[0] == returns.shape[0]\n\n self.input_ids = input_ids\n self.should_take_action = should_take_action\n self.returns = returns\n \n def __getitem__(self, index):\n return {\n 'input_ids': jnp.asarray(self.input_ids[index], dtype=jnp.int32), \n 'should_take_action': jnp.asarray(self.should_take_action[index], dtype=jnp.bool_), \n 'returns': jnp.asarray(self.returns[index], dtype=jnp.float32), \n }\n \n def __len__(self):\n return self.input_ids.shape[0]\n \n @classmethod\n def from_mc_data_list(\n cls, \n mc_data_list: List[MCData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> MCDataset:\n \n data = MCData.block(mc_data_list, blocking_strategy, tokenizer)\n\n return cls(**data)" }, { "identifier": "build_mc_score_fn", "path": "LLM_RL/algorithms/mc_returns/score_fn.py", "snippet": "def build_mc_score_fn(\n inference: MCInference, \n pi_beta_inference: Optional[GPT2Inference], \n tokenizer: PreTrainedTokenizer, \n max_length: int, \n value_weight: float, \n logit_weight: Optional[float], \n bsize: int, \n):\n assert (pi_beta_inference is None and logit_weight is None) or (pi_beta_inference is not None and logit_weight is not None)\n \n def score_fn(text_histories: List[TextHistory], done:Optional[List]=None) -> List[float]:\n assert all([text_history[-1].is_action for text_history in text_histories])\n \n prev_token_histories = []\n token_histories = []\n for text_history in text_histories:\n prev_token_histories.append(TokenHistory.from_text_history(text_history[:-1], tokenizer))\n token_histories.append(TokenHistory.from_text_history(text_history, tokenizer))\n \n # truncate to end and pad tokens\n tokens = np.stack([np.concatenate((token_history.tokens[-max_length:], np.full((max_length-min(token_history.tokens.shape[0], max_length),), tokenizer.pad_token_id)), axis=0) for token_history in token_histories], axis=0)\n tokens = jnp.asarray(tokens, dtype=jnp.int32)\n \n advantages = []\n \n for i in range(0, len(text_histories), bsize):\n batch = tokens[i:i+bsize, :]\n values = inference.forward(batch)\n\n prefix_len = jnp.asarray([prev_token_histories[i+x].tokens.shape[0] for x in range(batch.shape[0])], dtype=jnp.int32)\n attention_mask = (batch != tokenizer.pad_token_id).astype(np.float32)\n\n qs = values.q1\n qsa = jnp.take_along_axis(qs[:, :-1], batch[:, 1:][..., None], axis=2).squeeze(2)\n action_advs = jnp.empty(prefix_len.shape, dtype=jnp.float32)\n for x in range(len(prefix_len)):\n # embed()\n action_advs = action_advs.at[x].set(value_weight * ((qsa[x]) * attention_mask[x, 1:])[(prefix_len[x]-1):].sum(axis=0))\n\n if logit_weight is not None:\n logprobs = jax.nn.log_softmax(pi_beta_inference.get_logits_from_tokens(batch), axis=-1)\n action_logits = jnp.take_along_axis(logprobs[:, :-1], batch[:, 1:][..., None], axis=2).squeeze(2)\n for x in range(len(prefix_len)):\n action_advs = action_advs.at[x].add(logit_weight * (action_logits[x] * attention_mask[x, 1:])[(prefix_len[x]-1):].sum(axis=0))\n\n advantages.extend(jax.device_get(action_advs).tolist())\n \n return advantages\n\n return score_fn" } ]
from typing import Optional, Dict, Any from JaxSeq.bucket_manager import open_with_bucket as open from JaxSeq.utils import convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask from JaxSeq.models.gpt2.load import load_train_state, ModelLoadMode from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import Text, text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectoryChain, text_history_to_str from LLM_RL.algorithms.mc_returns.data import MCData, MCDataset from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy from LLM_RL.heads.mlp_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.mlp_head import MLPHeadConfig from LLM_RL.algorithms.mc_returns.gpt2.interface import GPT2MCTrain, GPT2MCInference from functools import partial from JaxSeq.logs import log, pull_logs from transformers import GPT2TokenizerFast from IPython import embed from llm_rl_scripts.maze.env.maze_utils import setup_maze_env, pick_start_position from llm_rl_scripts.maze.env.mazes import double_t_maze_optimal_directions, double_t_maze from llm_rl_scripts.maze.env.env import describe_observation_give_position, maze_proposal_function from LLM_RL.algorithms.ppo.reranker_policy import ReRankerPolicy, ReRankerSamplePolicy from JaxSeq.shard_model import copy_sharded_pytree from LLM_RL.algorithms.mc_returns.base_interface import mc_loss from LLM_RL.algorithms.mc_returns.train import train_loop from LLM_RL.algorithms.mc_returns.data import MCData, MCDataset from LLM_RL.algorithms.mc_returns.score_fn import build_mc_score_fn import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np import json import random
14,175
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_give_position_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.95, cql_weight: float=0.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=80, log_every: int=256, eval_every_steps: Optional[int]=10000, eval_every_epochs: Optional[int]=None, eval_at_beginning: bool=True, eval_at_end: bool=True, save_every_steps: Optional[int]=100000, save_every_epochs: Optional[int]=None, save_at_beginning: bool=True, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=False, ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def mc_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_give_position_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.95, cql_weight: float=0.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=80, log_every: int=256, eval_every_steps: Optional[int]=10000, eval_every_epochs: Optional[int]=None, eval_at_beginning: bool=True, eval_at_end: bool=True, save_every_steps: Optional[int]=100000, save_every_epochs: Optional[int]=None, save_at_beginning: bool=True, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=False, ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def mc_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element
last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)],
0
2023-11-21 00:16:42+00:00
16k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Kernerl size of conv layers.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):\r\n \"\"\"Construct an ConvolutionModule object.\r\n \"\"\"\r\n super(ConvolutionModule, self).__init__()\r\n # kernerl_size should be a odd number for 'SAME' padding\r\n assert (kernel_size - 1) % 2 == 0\r\n\r\n self.pointwise_conv1 = nn.Conv1d(\r\n channels,\r\n 2 * channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.depthwise_conv = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n groups=channels,\r\n bias=bias,\r\n )\r\n self.norm = nn.BatchNorm1d(channels)\r\n self.pointwise_conv2 = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Compute convolution module.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, channels).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, channels).\r\n\r\n \"\"\"\r\n # exchange the temporal dimension and the feature dimension\r\n x = x.transpose(1, 2)\r\n\r\n # GLU mechanism\r\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\r\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\r\n\r\n # 1D Depthwise Conv\r\n x = self.depthwise_conv(x)\r\n x = self.activation(self.norm(x))\r\n\r\n x = self.pointwise_conv2(x)\r\n\r\n return x.transpose(1, 2)\r" }, { "identifier": "EncoderLayer", "path": "src/clap_module/conformer/encoder_layer.py", "snippet": "class EncoderLayer(nn.Module):\r\n \"\"\"Encoder layer module.\r\n\r\n Args:\r\n size (int): Input dimension.\r\n self_attn (torch.nn.Module): Self-attention module instance.\r\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance\r\n can be used as the argument.\r\n feed_forward (torch.nn.Module): Feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n conv_module (torch.nn.Module): Convolution module instance.\r\n `ConvlutionModule` instance can be used as the argument.\r\n dropout_rate (float): Dropout rate.\r\n normalize_before (bool): Whether to use layer_norm before the first block.\r\n concat_after (bool): Whether to concat attention layer's input and output.\r\n if True, additional linear will be applied.\r\n i.e. x -> x + linear(concat(x, att(x)))\r\n if False, no additional linear will be applied. i.e. x -> x + att(x)\r\n stochastic_depth_rate (float): Proability to skip this layer.\r\n During training, the layer may skip residual computation and return input\r\n as-is with given probability.\r\n\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n size,\r\n self_attn,\r\n feed_forward,\r\n feed_forward_macaron,\r\n conv_module,\r\n dropout_rate,\r\n normalize_before=True,\r\n concat_after=False,\r\n stochastic_depth_rate=0.0,\r\n ):\r\n \"\"\"Construct an EncoderLayer object.\"\"\"\r\n super(EncoderLayer, self).__init__()\r\n self.self_attn = self_attn\r\n self.feed_forward = feed_forward\r\n self.feed_forward_macaron = feed_forward_macaron\r\n self.conv_module = conv_module\r\n self.norm_ff = LayerNorm(size) # for the FNN module\r\n self.norm_mha = LayerNorm(size) # for the MHA module\r\n if feed_forward_macaron is not None:\r\n self.norm_ff_macaron = LayerNorm(size)\r\n self.ff_scale = 0.5\r\n else:\r\n self.ff_scale = 1.0\r\n if self.conv_module is not None:\r\n self.norm_conv = LayerNorm(size) # for the CNN module\r\n self.norm_final = LayerNorm(size) # for the final output of the block\r\n self.dropout = nn.Dropout(dropout_rate)\r\n self.size = size\r\n self.normalize_before = normalize_before\r\n self.concat_after = concat_after\r\n if self.concat_after:\r\n self.concat_linear = nn.Linear(size + size, size)\r\n self.stochastic_depth_rate = stochastic_depth_rate\r\n\r\n def forward(self, x_input, mask, cache=None):\r\n \"\"\"Compute encoded features.\r\n\r\n Args:\r\n x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.\r\n - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].\r\n - w/o pos emb: Tensor (#batch, time, size).\r\n mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).\r\n cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, size).\r\n torch.Tensor: Mask tensor (#batch, 1, time).\r\n\r\n \"\"\"\r\n if isinstance(x_input, tuple):\r\n x, pos_emb = x_input[0], x_input[1]\r\n else:\r\n x, pos_emb = x_input, None\r\n\r\n skip_layer = False\r\n # with stochastic depth, residual connection `x + f(x)` becomes\r\n # `x <- x + 1 / (1 - p) * f(x)` at training time.\r\n stoch_layer_coeff = 1.0\r\n if self.training and self.stochastic_depth_rate > 0:\r\n skip_layer = torch.rand(1).item() < self.stochastic_depth_rate\r\n stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)\r\n\r\n if skip_layer:\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n return x, mask\r\n\r\n # whether to use macaron style\r\n if self.feed_forward_macaron is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward_macaron(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n\r\n # convolution module\r\n \"\"\"\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n \"\"\"\r\n\r\n # multi-headed self-attention module\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n if cache is None:\r\n x_q = x\r\n else:\r\n assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)\r\n x_q = x[:, -1:, :]\r\n residual = residual[:, -1:, :]\r\n mask = None if mask is None else mask[:, -1:, :]\r\n\r\n if pos_emb is not None:\r\n x_att = self.self_attn(x_q, x, x, pos_emb, mask)\r\n else:\r\n x_att = self.self_attn(x_q, x, x, mask)\r\n\r\n if self.concat_after:\r\n x_concat = torch.cat((x, x_att), dim=-1)\r\n x = residual + stoch_layer_coeff * self.concat_linear(x_concat)\r\n else:\r\n x = residual + stoch_layer_coeff * self.dropout(x_att)\r\n if not self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n # convolution module\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n\r\n # feed forward module\r\n if self.feed_forward:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff(x)\r\n else:\r\n raise ValueError(\"not exit\")\r\n\r\n if self.conv_module is not None:\r\n x = self.norm_final(x)\r\n\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n\r\n return x, mask\r" }, { "identifier": "get_activation", "path": "src/clap_module/conformer/modules.py", "snippet": "def get_activation(act):\r\n \"\"\"Return activation function.\r\n \"\"\"\r\n # Lazy load to avoid unused import\r\n\r\n activation_funcs = {\r\n \"hardtanh\": torch.nn.Hardtanh,\r\n \"tanh\": torch.nn.Tanh,\r\n \"relu\": torch.nn.ReLU,\r\n \"selu\": torch.nn.SELU,\r\n \"swish\": Swish,\r\n }\r\n\r\n return activation_funcs[act]()\r" }, { "identifier": "VGG2L", "path": "src/clap_module/conformer/modules.py", "snippet": "class VGG2L(torch.nn.Module):\r\n \"\"\"VGG2L module for custom encoder.\r\n\r\n Args:\r\n idim: Input dimension.\r\n odim: Output dimension.\r\n pos_enc: Positional encoding class.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim: int, odim: int, pos_enc: torch.nn.Module = None):\r\n \"\"\"Construct a VGG2L object.\"\"\"\r\n super().__init__()\r\n\r\n self.vgg2l = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(64, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((3, 2)),\r\n torch.nn.Conv2d(64, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(128, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((2, 2)),\r\n )\r\n\r\n if pos_enc is not None:\r\n self.output = torch.nn.Sequential(\r\n torch.nn.Linear(128 * ((idim // 2) // 2), odim), pos_enc\r\n )\r\n else:\r\n self.output = torch.nn.Linear(128 * ((idim // 2) // 2), odim)\r\n\r\n def forward(\r\n self, feats: torch.Tensor, feats_mask: torch.Tensor\r\n ) -> Union[\r\n Tuple[torch.Tensor, torch.Tensor],\r\n Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],\r\n ]:\r\n \"\"\"Forward VGG2L bottleneck.\r\n\r\n Args:\r\n feats: Feature sequences. (B, F, D_feats)\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_output: VGG output sequences.\r\n (B, sub(F), D_out) or ((B, sub(F), D_out), (B, sub(F), D_att))\r\n vgg_mask: Mask of VGG output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n feats = feats.unsqueeze(1)\r\n vgg_output = self.vgg2l(feats)\r\n\r\n b, c, t, f = vgg_output.size()\r\n\r\n vgg_output = self.output(\r\n vgg_output.transpose(1, 2).contiguous().view(b, t, c * f)\r\n )\r\n\r\n if feats_mask is not None:\r\n vgg_mask = self.create_new_mask(feats_mask)\r\n else:\r\n vgg_mask = feats_mask\r\n\r\n return vgg_output, vgg_mask\r\n\r\n def create_new_mask(self, feats_mask: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Create a subsampled mask of feature sequences.\r\n\r\n Args:\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_mask: Mask of VGG2L output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n vgg1_t_len = feats_mask.size(2) - (feats_mask.size(2) % 3)\r\n vgg_mask = feats_mask[:, :, :vgg1_t_len][:, :, ::3]\r\n\r\n vgg2_t_len = vgg_mask.size(2) - (vgg_mask.size(2) % 2)\r\n vgg_mask = vgg_mask[:, :, :vgg2_t_len][:, :, ::2]\r\n\r\n return vgg_mask\r" }, { "identifier": "LegacyRelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)))\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, time1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "MultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class MultiHeadedAttention(nn.Module):\r\n \"\"\"Multi-Head Attention layer.\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate):\r\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\r\n super(MultiHeadedAttention, self).__init__()\r\n assert n_feat % n_head == 0\r\n # We assume d_v always equals d_k\r\n self.d_k = n_feat // n_head\r\n self.h = n_head\r\n self.linear_q = nn.Linear(n_feat, n_feat)\r\n self.linear_k = nn.Linear(n_feat, n_feat)\r\n self.linear_v = nn.Linear(n_feat, n_feat)\r\n self.linear_out = nn.Linear(n_feat, n_feat)\r\n self.attn = None\r\n self.dropout = nn.Dropout(p=dropout_rate)\r\n\r\n def forward_qkv(self, query, key, value):\r\n \"\"\"Transform query, key and value.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n\r\n Returns:\r\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\r\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\r\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\r\n\r\n \"\"\"\r\n n_batch = query.size(0)\r\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\r\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\r\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\r\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\r\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\r\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\r\n\r\n return q, k, v\r\n\r\n def forward_attention(self, value, scores, mask):\r\n \"\"\"Compute attention context vector.\r\n\r\n Args:\r\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\r\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\r\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Transformed value (#batch, time1, d_model)\r\n weighted by the attention score (#batch, time1, time2).\r\n\r\n \"\"\"\r\n n_batch = value.size(0)\r\n if mask is not None:\r\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\r\n min_value = torch.finfo(scores.dtype).min\r\n scores = scores.masked_fill(mask, min_value)\r\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\r\n mask, 0.0\r\n ) # (batch, head, time1, time2)\r\n else:\r\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\r\n\r\n p_attn = self.dropout(self.attn)\r\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\r\n x = (\r\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\r\n ) # (batch, time1, d_model)\r\n\r\n return self.linear_out(x) # (batch, time1, d_model)\r\n\r\n def forward(self, query, key, value, mask):\r\n \"\"\"Compute scaled dot product attention.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).\r\n time1 means the length of query vector.\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)[\r\n :, :, :, : x.size(-1) // 2 + 1\r\n ] # only keep the positions from 0 to time2\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor\r\n (#batch, 2*time1-1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, 2*time1-1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "LegacyRelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class LegacyRelPositionalEncoding(PositionalEncoding):\r\n \"\"\"Relative positional encoding module (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(\r\n d_model=d_model,\r\n dropout_rate=dropout_rate,\r\n max_len=max_len,\r\n reverse=True,\r\n )\r\n\r\n def forward(self, x):\r\n \"\"\"Compute positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n torch.Tensor: Positional embedding tensor (1, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[:, : x.size(1)]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "PositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\r\n \"\"\"Positional encoding.\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n reverse (bool): Whether to reverse the input position. Only for\r\n the class LegacyRelPositionalEncoding. We remove it in the current\r\n class RelPositionalEncoding.\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(PositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.reverse = reverse\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n self._register_load_state_dict_pre_hook(_pre_hook)\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n if self.pe.size(1) >= x.size(1):\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n pe = torch.zeros(x.size(1), self.d_model)\r\n if self.reverse:\r\n position = torch.arange(\r\n x.size(1) - 1, -1, -1.0, dtype=torch.float32\r\n ).unsqueeze(1)\r\n else:\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale + self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "RelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class RelPositionalEncoding(torch.nn.Module):\r\n \"\"\"Relative positional encoding module (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(RelPositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n # self.pe contains both positive and negative parts\r\n # the length of self.pe is 2 * input_len - 1\r\n if self.pe.size(1) >= x.size(1) * 2 - 1:\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n # Suppose `i` means to the position of query vecotr and `j` means the\r\n # position of key vector. We use position relative positions when keys\r\n # are to the left (i>j) and negative relative positions otherwise (i<j).\r\n pe_positive = torch.zeros(x.size(1), self.d_model)\r\n pe_negative = torch.zeros(x.size(1), self.d_model)\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe_positive[:, 0::2] = torch.sin(position * div_term)\r\n pe_positive[:, 1::2] = torch.cos(position * div_term)\r\n pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)\r\n pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)\r\n\r\n # Reserve the order of positive indices and concat both positive and\r\n # negative indices. This is used to support the shifting trick\r\n # as in https://arxiv.org/abs/1901.02860\r\n pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)\r\n pe_negative = pe_negative[1:].unsqueeze(0)\r\n pe = torch.cat([pe_positive, pe_negative], dim=1)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[\r\n :,\r\n self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1),\r\n ]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "ScaledPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class ScaledPositionalEncoding(PositionalEncoding):\r\n \"\"\"Scaled positional encoding module.\r\n\r\n See Sec. 3.2 https://arxiv.org/abs/1809.08895\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)\r\n self.alpha = torch.nn.Parameter(torch.tensor(1.0))\r\n\r\n def reset_parameters(self):\r\n \"\"\"Reset parameters.\"\"\"\r\n self.alpha.data = torch.tensor(1.0)\r\n\r\n def forward(self, x):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x + self.alpha * self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "LayerNorm", "path": "src/clap_module/conformer/modules.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\r\n \"\"\"Layer normalization module.\r\n\r\n Args:\r\n nout (int): Output dim size.\r\n dim (int): Dimension to be normalized.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, nout, dim=-1):\r\n \"\"\"Construct an LayerNorm object.\"\"\"\r\n super(LayerNorm, self).__init__(nout, eps=1e-12)\r\n self.dim = dim\r\n\r\n def forward(self, x):\r\n \"\"\"Apply layer normalization.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor.\r\n\r\n Returns:\r\n torch.Tensor: Normalized tensor.\r\n\r\n \"\"\"\r\n if self.dim == -1:\r\n return super(LayerNorm, self).forward(x)\r\n return (\r\n super(LayerNorm, self)\r\n .forward(x.transpose(self.dim, -1))\r\n .transpose(self.dim, -1)\r\n )\r" }, { "identifier": "Conv1dLinear", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class Conv1dLinear(torch.nn.Module):\r\n \"\"\"Conv1D + Linear for Transformer block.\r\n\r\n A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize Conv1dLinear module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(Conv1dLinear, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Linear(hidden_chans, in_chans)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x))\r" }, { "identifier": "MultiLayeredConv1d", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class MultiLayeredConv1d(torch.nn.Module):\r\n \"\"\"Multi-layered conv1d for Transformer block.\r\n\r\n This is a module of multi-leyered conv1d designed\r\n to replace positionwise feed-forward network\r\n in Transforner block, which is introduced in\r\n `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\r\n\r\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\r\n https://arxiv.org/pdf/1905.09263.pdf\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize MultiLayeredConv1d module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(MultiLayeredConv1d, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Conv1d(\r\n hidden_chans,\r\n in_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)\r" }, { "identifier": "PositionwiseFeedForward", "path": "src/clap_module/conformer/modules.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\r\n \"\"\"Positionwise feed forward layer.\r\n\r\n Args:\r\n idim (int): Input dimenstion.\r\n hidden_units (int): The number of hidden units.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):\r\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\r\n super(PositionwiseFeedForward, self).__init__()\r\n self.w_1 = torch.nn.Linear(idim, hidden_units)\r\n self.w_2 = torch.nn.Linear(hidden_units, idim)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Forward function.\"\"\"\r\n return self.w_2(self.dropout(self.activation(self.w_1(x))))\r" }, { "identifier": "repeat", "path": "src/clap_module/conformer/modules.py", "snippet": "def repeat(N, fn, layer_drop_rate=0.0):\r\n \"\"\"Repeat module N times.\r\n\r\n Args:\r\n N (int): Number of repeat time.\r\n fn (Callable): Function to generate module.\r\n layer_drop_rate (float): Probability of dropping out each fn (layer).\r\n\r\n Returns:\r\n MultiSequential: Repeated model instance.\r\n\r\n \"\"\"\r\n return MultiSequential(*[fn(n) for n in range(N)], layer_drop_rate=layer_drop_rate)\r" }, { "identifier": "Conv2dSubsampling", "path": "src/clap_module/conformer/sub_sampling.py", "snippet": "class Conv2dSubsampling(torch.nn.Module):\r\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\r\n\r\n Args:\r\n idim (int): Input dimension.\r\n odim (int): Output dimension.\r\n dropout_rate (float): Dropout rate.\r\n pos_enc (torch.nn.Module): Custom position encoding layer.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\r\n \"\"\"Construct an Conv2dSubsampling object.\"\"\"\r\n super(Conv2dSubsampling, self).__init__()\r\n self.conv = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(odim, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n )\r\n self.out = torch.nn.Sequential(\r\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),\r\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\r\n )\r\n\r\n def forward(self, x, x_mask):\r\n \"\"\"Subsample x.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, idim).\r\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\r\n\r\n Returns:\r\n torch.Tensor: Subsampled tensor (#batch, time', odim),\r\n where time' = time // 4.\r\n torch.Tensor: Subsampled mask (#batch, 1, time'),\r\n where time' = time // 4.\r\n\r\n \"\"\"\r\n x = x.unsqueeze(1) # (b, c, t, f)\r\n x = self.conv(x)\r\n b, c, t, f = x.size()\r\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\r\n if x_mask is None:\r\n return x, None\r\n return x, x_mask[:, :, :-2:2][:, :, :-2:2]\r\n\r\n def __getitem__(self, key):\r\n \"\"\"Get item.\r\n\r\n When reset_parameters() is called, if use_scaled_pos_enc is used,\r\n return the positioning encoding.\r\n\r\n \"\"\"\r\n if key != -1:\r\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\r\n return self.out[key]\r" }, { "identifier": "AttentionPool1d", "path": "src/clap_module/feature_fusion.py", "snippet": "class AttentionPool1d(nn.Module):\r\n def __init__(\r\n self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None\r\n ):\r\n super().__init__()\r\n self.positional_embedding = nn.Parameter(\r\n torch.randn(spacial_dim + 1, embed_dim) / embed_dim\r\n # torch.randn(spacial_dim, embed_dim) / embed_dim\r\n )\r\n self.k_proj = nn.Linear(embed_dim, embed_dim)\r\n self.q_proj = nn.Linear(embed_dim, embed_dim)\r\n self.v_proj = nn.Linear(embed_dim, embed_dim)\r\n self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)\r\n self.num_heads = num_heads\r\n\r\n def forward(self, x):\r\n # import pdb; pdb.set_trace()\r\n x = x.permute(1, 0, 2) # B*L*D -> L*B*D; NCHW -> (HW)NC\r\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC\r\n x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC\r\n x, _ = F.multi_head_attention_forward(\r\n query=x,\r\n key=x,\r\n value=x,\r\n embed_dim_to_check=x.shape[-1],\r\n num_heads=self.num_heads,\r\n q_proj_weight=self.q_proj.weight,\r\n k_proj_weight=self.k_proj.weight,\r\n v_proj_weight=self.v_proj.weight,\r\n in_proj_weight=None,\r\n in_proj_bias=torch.cat(\r\n [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]\r\n ),\r\n bias_k=None,\r\n bias_v=None,\r\n add_zero_attn=False,\r\n dropout_p=0,\r\n out_proj_weight=self.c_proj.weight,\r\n out_proj_bias=self.c_proj.bias,\r\n use_separate_proj_weight=True,\r\n training=self.training,\r\n need_weights=False,\r\n )\r\n\r\n return x[0] # B*D\r" }, { "identifier": "DAF", "path": "src/clap_module/feature_fusion.py", "snippet": "class DAF(nn.Module):\r\n \"\"\"直接相加 DirectAddFuse\r\n \"\"\"\r\n\r\n def __init__(self):\r\n super(DAF, self).__init__()\r\n\r\n def forward(self, x, residual):\r\n return x + residual\r" }, { "identifier": "AFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class AFF(nn.Module):\r\n \"\"\"多特征融合 AFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(AFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported.'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xo = 2 * x * wei + 2 * residual * (1 - wei)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" }, { "identifier": "iAFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class iAFF(nn.Module):\r\n \"\"\"多特征融合 iAFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(iAFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xi = x * wei + residual * (1 - wei)\r\n\r\n xl2 = self.local_att2(xi)\r\n xg2 = self.global_att(xi)\r\n xlg2 = xl2 + xg2\r\n wei2 = self.sigmoid(xlg2)\r\n xo = x * wei2 + residual * (1 - wei2)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" } ]
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
14,369
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l":
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l":
self.embed = VGG2L(idim, attention_dim)
3
2023-11-25 02:38:32+00:00
16k
facebookresearch/ExPLORe
train_finetuning_pixels.py
[ { "identifier": "DrQLearner", "path": "rlpd/agents/drq/drq_learner.py", "snippet": "class DrQLearner(SACLearner):\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n temp_lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n num_qs: int = 2,\n num_min_qs: Optional[int] = None,\n critic_dropout_rate: Optional[float] = None,\n critic_layer_norm: bool = False,\n target_entropy: Optional[float] = None,\n init_temperature: float = 1.0,\n backup_entropy: bool = True,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n bc_coeff: float = 0,\n ):\n \"\"\"\n An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905\n \"\"\"\n\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n actions = action_space.sample()\n\n if target_entropy is None:\n target_entropy = -action_dim / 2\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)\n actor_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=actor_cls,\n latent_dim=latent_dim,\n stop_gradient=True,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n actor_params = FrozenDict(actor_def.init(actor_key, observations)[\"params\"])\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n critic_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n dropout_rate=critic_dropout_rate,\n use_layer_norm=critic_layer_norm,\n )\n critic_cls = partial(StateActionValue, base_cls=critic_base_cls)\n critic_cls = partial(Ensemble, net_cls=critic_cls, num=num_qs)\n critic_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=critic_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n critic_params = FrozenDict(\n critic_def.init(critic_key, observations, actions)[\"params\"]\n )\n critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.adam(learning_rate=critic_lr),\n )\n target_critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.GradientTransformation(lambda _: None, lambda _: None),\n )\n\n temp_def = Temperature(init_temperature)\n temp_params = FrozenDict(temp_def.init(temp_key)[\"params\"])\n temp = TrainState.create(\n apply_fn=temp_def.apply,\n params=temp_params,\n tx=optax.adam(learning_rate=temp_lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n actor=actor,\n critic=critic,\n target_critic=target_critic,\n temp=temp,\n target_entropy=target_entropy,\n tau=tau,\n discount=discount,\n num_qs=num_qs,\n num_min_qs=num_min_qs,\n backup_entropy=backup_entropy,\n data_augmentation_fn=data_augmentation_fn,\n bc_coeff=bc_coeff,\n )\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n new_agent = self\n\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n actor = _share_encoder(source=new_agent.critic, target=new_agent.actor)\n new_agent = new_agent.replace(actor=actor)\n\n rng, key = jax.random.split(new_agent.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n\n new_agent = new_agent.replace(rng=rng)\n\n return SACLearner.update(new_agent, batch, utd_ratio)" }, { "identifier": "PixelBCAgent", "path": "rlpd/agents/drq/bc.py", "snippet": "class PixelBCAgent(BCAgent):\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n hidden_dims: Sequence[int] = (256, 256),\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n encoder: str = \"d4pg\",\n ):\n assert encoder == \"d4pg\"\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key = jax.random.split(rng, 2)\n\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)\n actor_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=actor_cls,\n latent_dim=latent_dim,\n stop_gradient=False,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n actor_params = FrozenDict(actor_def.init(actor_key, observations)[\"params\"])\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n return cls(\n rng=rng,\n actor=actor,\n )" }, { "identifier": "PixelRM", "path": "rlpd/agents/drq/rm.py", "snippet": "class PixelRM(struct.PyTreeNode):\n rng: PRNGKey\n r_net: TrainState\n m_net: TrainState\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n lr: float = 3e-4,\n hidden_dims: Sequence[int] = (256, 256),\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n encoder: str = \"d4pg\",\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n ):\n\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, key = jax.random.split(rng)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n )\n net_cls = partial(StateValue, base_cls=base_cls)\n ucb_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=net_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n r_params = FrozenDict(ucb_def.init(key, observations)[\"params\"])\n r_net = TrainState.create(\n apply_fn=ucb_def.apply,\n params=r_params,\n tx=optax.adam(learning_rate=lr),\n )\n\n m_params = FrozenDict(ucb_def.init(key, observations)[\"params\"])\n m_net = TrainState.create(\n apply_fn=ucb_def.apply,\n params=m_params,\n tx=optax.adam(learning_rate=lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n r_net=r_net,\n m_net=m_net,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n def _update(self, batch: DatasetDict) -> Tuple[struct.PyTreeNode, Dict[str, float]]:\n def r_loss_fn(r_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n rs = self.r_net.apply_fn({\"params\": r_params}, batch[\"observations\"])\n\n loss = ((rs - batch[\"rewards\"]) ** 2.0).mean()\n return loss, {\"r_loss\": loss}\n\n grads, r_info = jax.grad(r_loss_fn, has_aux=True)(self.r_net.params)\n r_net = self.r_net.apply_gradients(grads=grads)\n\n def m_loss_fn(m_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n ms = self.m_net.apply_fn({\"params\": m_params}, batch[\"observations\"])\n\n loss = optax.sigmoid_binary_cross_entropy(ms, batch[\"masks\"]).mean()\n return loss, {\"m_loss\": loss}\n\n grads, m_info = jax.grad(m_loss_fn, has_aux=True)(self.m_net.params)\n m_net = self.m_net.apply_gradients(grads=grads)\n\n return self.replace(r_net=r_net, m_net=m_net), {**r_info, **m_info}\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n rng, key = jax.random.split(self.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n new_self = self.replace(rng=rng)\n\n for i in range(utd_ratio):\n\n def slice(x):\n assert x.shape[0] % utd_ratio == 0\n batch_size = x.shape[0] // utd_ratio\n return x[batch_size * i : batch_size * (i + 1)]\n\n mini_batch = jax.tree_util.tree_map(slice, batch)\n new_self, info = new_self._update(mini_batch)\n\n return new_self, info\n\n @jax.jit\n def get_reward(self, batch):\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n rewards = self.r_net.apply_fn(\n {\"params\": self.r_net.params}, batch[\"observations\"]\n )\n return rewards\n\n @jax.jit\n def get_mask(self, batch):\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n logits = self.m_net.apply_fn(\n {\"params\": self.m_net.params}, batch[\"observations\"]\n )\n return jax.nn.sigmoid(logits)" }, { "identifier": "PixelRND", "path": "rlpd/agents/drq/rnd.py", "snippet": "class PixelRND(struct.PyTreeNode):\n rng: PRNGKey\n net: TrainState\n frozen_net: TrainState\n coeff: float = struct.field(pytree_node=False)\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n lr: float = 3e-4,\n coeff: float = 1.0,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n feature_dim: int = 256,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n ):\n\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, key1, key2 = jax.random.split(rng, 3)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n rnd_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n )\n rnd_cls = partial(StateFeature, base_cls=rnd_base_cls, feature_dim=feature_dim)\n net_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=rnd_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n params = FrozenDict(net_def.init(key1, observations)[\"params\"])\n net = TrainState.create(\n apply_fn=net_def.apply,\n params=params,\n tx=optax.adam(learning_rate=lr),\n )\n frozen_params = FrozenDict(net_def.init(key2, observations)[\"params\"])\n frozen_net = TrainState.create(\n apply_fn=net_def.apply,\n params=frozen_params,\n tx=optax.adam(learning_rate=lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n net=net,\n frozen_net=frozen_net,\n coeff=coeff,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n @jax.jit\n def update(self, batch: DatasetDict) -> Tuple[struct.PyTreeNode, Dict[str, float]]:\n\n rng, key = jax.random.split(self.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n new_self = self.replace(rng=rng)\n\n def loss_fn(params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n feats = new_self.net.apply_fn({\"params\": params}, batch[\"observations\"])\n frozen_feats = new_self.frozen_net.apply_fn(\n {\"params\": new_self.frozen_net.params}, batch[\"observations\"]\n )\n\n loss = ((feats - frozen_feats) ** 2.0).mean()\n return loss, {\"rnd_loss\": loss}\n\n grads, info = jax.grad(loss_fn, has_aux=True)(new_self.net.params)\n net = new_self.net.apply_gradients(grads=grads)\n\n return new_self.replace(net=net), info\n\n @jax.jit\n def get_reward(self, batch):\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n feats = self.net.apply_fn({\"params\": self.net.params}, batch[\"observations\"])\n frozen_feats = self.net.apply_fn(\n {\"params\": self.frozen_net.params}, batch[\"observations\"]\n )\n return jnp.mean((feats - frozen_feats) ** 2.0, axis=-1) * self.coeff" }, { "identifier": "MemoryEfficientReplayBuffer", "path": "rlpd/data/memory_efficient_replay_buffer.py", "snippet": "class MemoryEfficientReplayBuffer(ReplayBuffer):\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n capacity: int,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n ):\n self.pixel_keys = pixel_keys\n\n observation_space = copy.deepcopy(observation_space)\n self._num_stack = None\n for pixel_key in self.pixel_keys:\n pixel_obs_space = observation_space.spaces[pixel_key]\n if self._num_stack is None:\n self._num_stack = pixel_obs_space.shape[-1]\n else:\n assert self._num_stack == pixel_obs_space.shape[-1]\n self._unstacked_dim_size = pixel_obs_space.shape[-2]\n low = pixel_obs_space.low[..., 0]\n high = pixel_obs_space.high[..., 0]\n unstacked_pixel_obs_space = Box(\n low=low, high=high, dtype=pixel_obs_space.dtype\n )\n observation_space.spaces[pixel_key] = unstacked_pixel_obs_space\n\n next_observation_space_dict = copy.deepcopy(observation_space.spaces)\n for pixel_key in self.pixel_keys:\n next_observation_space_dict.pop(pixel_key)\n next_observation_space = gym.spaces.Dict(next_observation_space_dict)\n\n self._first = True\n self._is_correct_index = np.full(capacity, False, dtype=bool)\n\n super().__init__(\n observation_space,\n action_space,\n capacity,\n next_observation_space=next_observation_space,\n )\n\n def insert(self, data_dict: DatasetDict):\n if self._insert_index == 0 and self._capacity == len(self) and not self._first:\n indxs = np.arange(len(self) - self._num_stack, len(self))\n for indx in indxs:\n element = super().sample(1, indx=indx)\n self._is_correct_index[self._insert_index] = False\n super().insert(element)\n\n data_dict = data_dict.copy()\n data_dict[\"observations\"] = data_dict[\"observations\"].copy()\n data_dict[\"next_observations\"] = data_dict[\"next_observations\"].copy()\n\n obs_pixels = {}\n next_obs_pixels = {}\n for pixel_key in self.pixel_keys:\n obs_pixels[pixel_key] = data_dict[\"observations\"].pop(pixel_key)\n next_obs_pixels[pixel_key] = data_dict[\"next_observations\"].pop(pixel_key)\n\n if self._first:\n for i in range(self._num_stack):\n for pixel_key in self.pixel_keys:\n data_dict[\"observations\"][pixel_key] = obs_pixels[pixel_key][..., i]\n\n self._is_correct_index[self._insert_index] = False\n super().insert(data_dict)\n\n for pixel_key in self.pixel_keys:\n data_dict[\"observations\"][pixel_key] = next_obs_pixels[pixel_key][..., -1]\n\n self._first = data_dict[\"dones\"]\n\n self._is_correct_index[self._insert_index] = True\n super().insert(data_dict)\n\n for i in range(self._num_stack):\n indx = (self._insert_index + i) % len(self)\n self._is_correct_index[indx] = False\n\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n pack_obs_and_next_obs: bool = False,\n ) -> frozen_dict.FrozenDict:\n \"\"\"Samples from the replay buffer.\n\n Args:\n batch_size: Minibatch size.\n keys: Keys to sample.\n indx: Take indices instead of sampling.\n pack_obs_and_next_obs: whether to pack img and next_img into one image.\n It's useful when they have overlapping frames.\n\n Returns:\n A frozen dictionary.\n \"\"\"\n\n if indx is None:\n if hasattr(self.np_random, \"integers\"):\n indx = self.np_random.integers(len(self), size=batch_size)\n else:\n indx = self.np_random.randint(len(self), size=batch_size)\n\n for i in range(batch_size):\n while not self._is_correct_index[indx[i]]:\n if hasattr(self.np_random, \"integers\"):\n indx[i] = self.np_random.integers(len(self))\n else:\n indx[i] = self.np_random.randint(len(self))\n else:\n pass\n\n if keys is None:\n keys = self.dataset_dict.keys()\n else:\n assert \"observations\" in keys\n\n keys = list(keys)\n keys.remove(\"observations\")\n\n batch = super().sample(batch_size, keys, indx)\n batch = batch.unfreeze()\n\n obs_keys = self.dataset_dict[\"observations\"].keys()\n obs_keys = list(obs_keys)\n for pixel_key in self.pixel_keys:\n obs_keys.remove(pixel_key)\n\n batch[\"observations\"] = {}\n for k in obs_keys:\n batch[\"observations\"][k] = _sample(\n self.dataset_dict[\"observations\"][k], indx\n )\n\n for pixel_key in self.pixel_keys:\n obs_pixels = self.dataset_dict[\"observations\"][pixel_key]\n obs_pixels = np.lib.stride_tricks.sliding_window_view(\n obs_pixels, self._num_stack + 1, axis=0\n )\n obs_pixels = obs_pixels[indx - self._num_stack]\n\n if pack_obs_and_next_obs:\n batch[\"observations\"][pixel_key] = obs_pixels\n else:\n batch[\"observations\"][pixel_key] = obs_pixels[..., :-1]\n if \"next_observations\" in keys:\n batch[\"next_observations\"][pixel_key] = obs_pixels[..., 1:]\n\n return frozen_dict.freeze(batch)" }, { "identifier": "ReplayBuffer", "path": "rlpd/data/replay_buffer.py", "snippet": "class ReplayBuffer(Dataset):\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n capacity: int,\n next_observation_space: Optional[gym.Space] = None,\n ):\n if next_observation_space is None:\n next_observation_space = observation_space\n\n observation_data = _init_replay_dict(observation_space, capacity)\n next_observation_data = _init_replay_dict(next_observation_space, capacity)\n dataset_dict = dict(\n observations=observation_data,\n next_observations=next_observation_data,\n actions=np.empty((capacity, *action_space.shape), dtype=action_space.dtype),\n rewards=np.empty((capacity,), dtype=np.float32),\n masks=np.empty((capacity,), dtype=np.float32),\n dones=np.empty((capacity,), dtype=np.float32),\n )\n\n super().__init__(dataset_dict)\n\n self._size = 0\n self._capacity = capacity\n self._insert_index = 0\n\n def __len__(self) -> int:\n return self._size\n\n def insert(self, data_dict: DatasetDict):\n _insert_recursively(self.dataset_dict, data_dict, self._insert_index)\n\n self._insert_index = (self._insert_index + 1) % self._capacity\n self._size = min(self._size + 1, self._capacity)\n\n def insert_batch(self, data_dict: DatasetDict):\n first_key = list(data_dict.keys())[0]\n batch_size = data_dict[first_key].shape[0]\n\n if self._insert_index + batch_size > self._capacity:\n self._insert_index = 0\n self._size = max(self._size, self._insert_index + batch_size)\n _insert_recursively_batch(\n self.dataset_dict, data_dict, self._insert_index, batch_size\n )\n\n def get_iterator(self, queue_size: int = 2, sample_args: dict = {}):\n # See https://flax.readthedocs.io/en/latest/_modules/flax/jax_utils.html#prefetch_to_device\n # queue_size = 2 should be ok for one GPU.\n\n queue = collections.deque()\n\n def enqueue(n):\n for _ in range(n):\n data = self.sample(**sample_args)\n queue.append(jax.device_put(data))\n\n enqueue(queue_size)\n while queue:\n yield queue.popleft()\n enqueue(1)" }, { "identifier": "evaluate", "path": "rlpd/evaluation.py", "snippet": "def evaluate(agent, env: gym.Env, num_episodes: int) -> Dict[str, float]:\n\n trajs = []\n cum_returns = []\n cum_lengths = []\n for i in range(num_episodes):\n observation, done = env.reset(), False\n traj = [observation]\n cum_return = 0\n cum_length = 0\n while not done:\n action = agent.eval_actions(observation)\n observation, reward, done, _ = env.step(action)\n cum_return += reward\n cum_length += 1\n traj.append(observation)\n cum_returns.append(cum_return)\n cum_lengths.append(cum_length)\n trajs.append({\"observation\": np.stack(traj, axis=0)})\n return {\"return\": np.mean(cum_returns), \"length\": np.mean(cum_lengths)}, trajs" }, { "identifier": "wrap_pixels", "path": "rlpd/wrappers/pixels.py", "snippet": "def wrap_pixels(\n env: gym.Env,\n action_repeat: int,\n image_size: int = 84,\n num_stack: Optional[int] = 3,\n camera_id: int = 0,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n) -> gym.Env:\n if action_repeat > 1:\n env = RepeatAction(env, action_repeat)\n\n env = UniversalSeed(env)\n env = gym.wrappers.RescaleAction(env, -1, 1)\n\n env = PixelObservationWrapper(\n env,\n pixels_only=True,\n render_kwargs={\n \"pixels\": {\n \"height\": image_size,\n \"width\": image_size,\n \"camera_id\": camera_id,\n }\n },\n pixel_keys=pixel_keys,\n )\n\n if num_stack is not None:\n env = FrameStack(env, num_stack=num_stack)\n\n env = gym.wrappers.ClipAction(env)\n\n return env, pixel_keys" }, { "identifier": "PixelICVF", "path": "rlpd/agents/drq/icvf.py", "snippet": "class PixelICVF(struct.PyTreeNode):\n rng: PRNGKey\n net: TrainState\n target_net: TrainState\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n feature_dim: int = 256,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n **kwargs,\n ):\n print(\"Got additional kwargs: \", kwargs)\n\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, key1, key2 = jax.random.split(rng, 3)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n rnd_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n )\n rnd_cls = partial(ICVF, base_cls=rnd_base_cls, feature_dim=feature_dim)\n net_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=rnd_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n params = FrozenDict(net_def.init(key1, observations)[\"params\"])\n net = TrainState.create(\n apply_fn=net_def.apply,\n params=params,\n tx=optax.adam(learning_rate=lr),\n )\n target_net = TrainState.create(\n apply_fn=net_def.apply,\n params=params,\n tx=optax.adam(learning_rate=lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n net=net,\n target_net=target_net,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n def _update(self, batch: DatasetDict) -> Tuple[struct.PyTreeNode, Dict[str, float]]:\n def loss_fn(params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n def get_v(params, s, g, z):\n phi = self.net.apply_fn({\"params\": params}, s)[\"phi\"]\n psi = self.net.apply_fn({\"params\": params}, g)[\"psi\"]\n T = self.net.apply_fn({\"params\": params}, z)[\"T\"]\n phi_T = apply_layernorm(phi * T)\n psi_T = apply_layernorm(psi * T)\n return -1 * optax.safe_norm(phi_T - psi_T, 1e-3, axis=-1)\n\n V = get_v(\n params, batch[\"observations\"], batch[\"goals\"], batch[\"desired_goals\"]\n )\n nV = get_v(\n self.target_net.params,\n batch[\"next_observations\"],\n batch[\"goals\"],\n batch[\"desired_goals\"],\n )\n target_V = batch[\"rewards\"] + 0.99 * batch[\"masks\"] * nV\n\n V_z = get_v(\n self.target_net.params,\n batch[\"next_observations\"],\n batch[\"desired_goals\"],\n batch[\"desired_goals\"],\n )\n nV_z = get_v(\n self.target_net.params,\n batch[\"next_observations\"],\n batch[\"desired_goals\"],\n batch[\"desired_goals\"],\n )\n adv = batch[\"desired_rewards\"] + 0.99 * batch[\"desired_masks\"] * nV_z - V_z\n\n def expectile_fn(adv, loss, expectile):\n weight = jnp.where(adv >= 0, expectile, 1 - expectile)\n return weight * loss\n\n def masked_mean(x, mask):\n mask = (mask > 0).astype(jnp.float32)\n return jnp.sum(x * mask) / (1e-5 + jnp.sum(mask))\n\n loss = expectile_fn(adv, jnp.square(V - target_V), 0.9).mean()\n return loss, {\n \"icvf_loss\": loss,\n \"V_success\": masked_mean(V, 1.0 - batch[\"masks\"]),\n \"V_failure\": masked_mean(V, batch[\"masks\"]),\n }\n\n grads, info = jax.grad(loss_fn, has_aux=True)(self.net.params)\n net = self.net.apply_gradients(grads=grads)\n target_params = optax.incremental_update(\n self.net.params, self.target_net.params, 0.005\n )\n target_net = self.target_net.replace(params=target_params)\n return self.replace(net=net, target_net=target_net), info\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n\n # if \"pixels\" not in batch[\"next_observations\"]:\n # batch = _unpack(batch)\n\n rng, key = jax.random.split(self.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n goals = self.data_augmentation_fn(key, batch[\"goals\"])\n desired_goals = self.data_augmentation_fn(key, batch[\"desired_goals\"])\n\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n \"goals\": goals,\n \"desired_goals\": desired_goals,\n }\n )\n new_self = self.replace(rng=rng)\n\n for i in range(utd_ratio):\n\n def slice(x):\n assert x.shape[0] % utd_ratio == 0\n batch_size = x.shape[0] // utd_ratio\n return x[batch_size * i : batch_size * (i + 1)]\n\n mini_batch = jax.tree_util.tree_map(slice, batch)\n new_self, info = new_self._update(mini_batch)\n\n return new_self, info" }, { "identifier": "gc_dataset", "path": "rlpd/gc_dataset.py", "snippet": "class GCDataset:\nclass GCSDataset(GCDataset):\n def get_default_config():\n def __post_init__(self):\n def sample_goals(self, indx, p_randomgoal=None, p_trajgoal=None, p_currgoal=None):\n def sample(self, batch_size: int, indx=None):\n def get_default_config():\n def sample(self, batch_size: int, indx=None):" }, { "identifier": "Dataset", "path": "rlpd/data/dataset.py", "snippet": "class Dataset(object):\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n self.dataset_dict = dataset_dict\n self.dataset_len = _check_lengths(dataset_dict)\n\n # Seeding similar to OpenAI Gym:\n # https://github.com/openai/gym/blob/master/gym/spaces/space.py#L46\n self._np_random = None\n self._seed = None\n if seed is not None:\n self.seed(seed)\n\n @property\n def np_random(self) -> np.random.RandomState:\n if self._np_random is None:\n self.seed()\n return self._np_random\n\n def seed(self, seed: Optional[int] = None) -> list:\n self._np_random, self._seed = seeding.np_random(seed)\n return [self._seed]\n\n def __len__(self) -> int:\n return self.dataset_len\n\n def get_iter(self, batch_size):\n for i in range(len(self) // batch_size):\n indx = np.arange(i * batch_size, (i + 1) * batch_size)\n indx = np.clip(indx, a_min=0, a_max=len(self) - 1)\n batch = dict()\n keys = self.dataset_dict.keys()\n\n for k in keys:\n if isinstance(self.dataset_dict[k], dict):\n batch[k] = _sample(self.dataset_dict[k], indx)\n else:\n batch[k] = self.dataset_dict[k][indx]\n\n yield frozen_dict.freeze(batch)\n\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n ) -> frozen_dict.FrozenDict:\n if indx is None:\n if hasattr(self.np_random, \"integers\"):\n indx = self.np_random.integers(len(self), size=batch_size)\n else:\n indx = self.np_random.randint(len(self), size=batch_size)\n\n batch = dict()\n\n if keys is None:\n keys = self.dataset_dict.keys()\n\n for k in keys:\n if isinstance(self.dataset_dict[k], dict):\n batch[k] = _sample(self.dataset_dict[k], indx)\n else:\n batch[k] = self.dataset_dict[k][indx]\n\n return frozen_dict.freeze(batch)\n\n def sample_jax(self, batch_size: int, keys: Optional[Iterable[str]] = None):\n if not hasattr(self, \"rng\"):\n self.rng = jax.random.PRNGKey(self._seed or 42)\n\n if keys is None:\n keys = self.dataset_dict.keys()\n\n jax_dataset_dict = {k: self.dataset_dict[k] for k in keys}\n jax_dataset_dict = jax.device_put(jax_dataset_dict)\n\n @jax.jit\n def _sample_jax(rng):\n key, rng = jax.random.split(rng)\n indx = jax.random.randint(\n key, (batch_size,), minval=0, maxval=len(self)\n )\n return rng, jax.tree_map(\n lambda d: jnp.take(d, indx, axis=0), jax_dataset_dict\n )\n\n self._sample_jax = _sample_jax\n\n self.rng, sample = self._sample_jax(self.rng)\n return sample\n\n def split(self, ratio: float) -> Tuple[\"Dataset\", \"Dataset\"]:\n assert 0 < ratio and ratio < 1\n train_index = np.index_exp[: int(self.dataset_len * ratio)]\n test_index = np.index_exp[int(self.dataset_len * ratio) :]\n\n index = np.arange(len(self), dtype=np.int32)\n self.np_random.shuffle(index)\n train_index = index[: int(self.dataset_len * ratio)]\n test_index = index[int(self.dataset_len * ratio) :]\n\n train_dataset_dict = _subselect(self.dataset_dict, train_index)\n test_dataset_dict = _subselect(self.dataset_dict, test_index)\n return Dataset(train_dataset_dict), Dataset(test_dataset_dict)\n\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n episode_starts = [0]\n episode_ends = []\n\n episode_return = 0\n episode_returns = []\n\n for i in range(len(self)):\n episode_return += self.dataset_dict[\"rewards\"][i]\n\n if self.dataset_dict[\"dones\"][i]:\n episode_returns.append(episode_return)\n episode_ends.append(i + 1)\n if i + 1 < len(self):\n episode_starts.append(i + 1)\n episode_return = 0.0\n\n return episode_starts, episode_ends, episode_returns\n\n def filter_by_fn(self, fn):\n bool_indx = np.full((len(self),), False, dtype=bool)\n for i in range(len(self)):\n tran = {k: v[i] for k, v in self.dataset_dict.items()}\n bool_indx[i] = fn(tran)\n\n self.dataset_dict = _subselect(self.dataset_dict, bool_indx)\n self.dataset_len = _check_lengths(self.dataset_dict)\n\n def filter(\n self, take_top: Optional[float] = None, threshold: Optional[float] = None\n ):\n assert (take_top is None and threshold is not None) or (\n take_top is not None and threshold is None\n )\n\n (\n episode_starts,\n episode_ends,\n episode_returns,\n ) = self._trajectory_boundaries_and_returns()\n\n if take_top is not None:\n threshold = np.percentile(episode_returns, 100 - take_top)\n\n bool_indx = np.full((len(self),), False, dtype=bool)\n\n for i in range(len(episode_returns)):\n if episode_returns[i] >= threshold:\n bool_indx[episode_starts[i] : episode_ends[i]] = True\n\n self.dataset_dict = _subselect(self.dataset_dict, bool_indx)\n\n self.dataset_len = _check_lengths(self.dataset_dict)\n\n def normalize_returns(self, scaling: float = 1000):\n (_, _, episode_returns) = self._trajectory_boundaries_and_returns()\n self.dataset_dict[\"rewards\"] /= np.max(episode_returns) - np.min(\n episode_returns\n )\n self.dataset_dict[\"rewards\"] *= scaling" }, { "identifier": "COGDataset", "path": "rlpd/data/cog_datasets.py", "snippet": "class COGDataset(MemoryEfficientReplayBuffer):\n def __init__(\n self,\n env: gym.Env,\n dataset_path: str,\n capacity: int = 500_000,\n subsample_ratio: float = 1.0,\n pixel_keys: tuple = (\"pixels\",),\n np_rng = None,\n load_successes: bool = True,\n ):\n self.np_rng = np_rng\n super().__init__(\n env.observation_space,\n env.action_space,\n capacity=capacity,\n pixel_keys=pixel_keys\n )\n self.successful_offline_prior_trajs = []\n self.successful_offline_task_trajs = []\n \n self._load_data_from_dir(dataset_path, subsample_ratio)\n \n self.load_successes = load_successes\n if self.load_successes:\n self._load_successful_traj(dataset_path)\n\n def load_successful_traj(self):\n assert self.load_successes, \"did not load successful trajectories upon making this dataset\"\n prior_idx = self.np_rng.integers(len(self.successful_offline_prior_trajs))\n task_idx = self.np_rng.integers(len(self.successful_offline_task_trajs))\n prior_traj = self.successful_offline_prior_trajs[prior_idx]\n task_traj = self.successful_offline_task_trajs[task_idx]\n return prior_traj + task_traj\n \n def _load_data_from_dir(self, dataset_path, subsample_ratio=1.0):\n print(\"subsample ratio:\", subsample_ratio * subsample_ratio) # sub-sampled twice\n for f in os.listdir(dataset_path):\n full_path = os.path.join(dataset_path, f)\n if f.endswith('.npy'):\n print(\"*\"*20, \"\\nloading data from:\", full_path)\n data = np.load(full_path, allow_pickle=True)\n print(\"prior subsampling # trajs:\", len(data))\n data = self._subsample_data(data, subsample_ratio)\n self._load_data(data, subsample_ratio)\n print(\"post subsampling # trajs:\", len(self))\n \n def _subsample_data(self, data, r=1.0):\n assert 0 <= r <= 1\n n = len(data)\n idxs = self.np_rng.choice(n, size=int(n*r), replace=False)\n return data[idxs]\n\n def _load_data(self, data, subsample_ratio=1.0):\n cutoff = int(len(data) * subsample_ratio)\n for i, traj in enumerate(data):\n if i > cutoff:\n break\n trans = dict_to_list(traj)\n for tran in trans:\n data_dict = self._make_data_dict(tran)\n self.insert(data_dict)\n \n def _load_successful_traj(self, dataset_path):\n # load successful offline trajectories for visualizations / evaluation\n prior_data = np.load(os.path.join(dataset_path, 'successful', 'prior_success.npy'), allow_pickle=True)\n task_data = np.load(os.path.join(dataset_path, 'successful', 'task_success.npy'), allow_pickle=True)\n\n for traj in prior_data:\n trans = dict_to_list(traj)\n trans = [self._make_data_dict(tran) for tran in trans]\n self.successful_offline_prior_trajs.append(trans)\n\n for traj in task_data:\n trans = dict_to_list(traj)\n trans = [self._make_data_dict(tran) for tran in trans]\n self.successful_offline_task_trajs.append(trans)\n\n def _make_data_dict(self, tran):\n return dict(\n observations={\"pixels\": np.array(tran[\"observations\"][\"image\"])[..., None]},\n actions=np.array(tran[\"actions\"]),\n next_observations={\"pixels\": np.array(tran[\"next_observations\"][\"image\"])[..., None]},\n rewards=np.array(tran[\"rewards\"]),\n masks=1-np.array(tran[\"terminals\"], dtype=float),\n dones=np.array(tran[\"agent_infos\"][\"done\"])\n )" } ]
import os import numpy as np import tqdm import wandb import matplotlib.pyplot as plt import pickle import roboverse import types import jax import jax.numpy as jnp from absl import app, flags from flax.core import FrozenDict from ml_collections import config_flags from flax.core import frozen_dict from flax.training import checkpoints from rlpd.agents import DrQLearner, PixelRND, PixelRM, PixelBCAgent from rlpd.data import MemoryEfficientReplayBuffer, ReplayBuffer from rlpd.evaluation import evaluate from rlpd.wrappers import wrap_pixels from rlpd.agents.drq.icvf import PixelICVF from rlpd import gc_dataset from gym.wrappers import TimeLimit, FilterObservation, RecordEpisodeStatistics from rlpd.data import Dataset from rlpd.data.cog_datasets import COGDataset from functools import partial
13,776
for k, v in info["episode"].items(): decode = {"r": "return", "l": "length", "t": "time"} wandb.log({f"episode/{decode[k]}": v}, step=record_step) if FLAGS.bc_pretrain_rollin > 0.0: curr_rng, rng = jax.random.split(rng) rollin_enabled = ( True if jax.random.uniform(key=curr_rng) < FLAGS.bc_pretrain_rollin else False ) # main updates if i >= FLAGS.start_training: online_batch = next(replay_buffer_iterator) if i >= FLAGS.start_training * 2: # update the reward model on the online batch if rm is not None: rm, rm_update_info = rm.update(online_batch, FLAGS.utd_ratio) logging_info.update(add_prefix("rm/", rm_update_info)) if rnd is not None: rnd, rnd_update_info = rnd.update( frozen_dict.freeze( { "observations": { k: ob[None] for k, ob in observation.items() }, "actions": action[None], "next_observations": { k: ob[None] for k, ob in next_observation.items() }, "rewards": np.array(reward)[None], "masks": np.array(mask)[None], "dones": np.array(done)[None], } ) ) logging_info.update(add_prefix("rnd/", rnd_update_info)) # prepare the batch for the main agent online_replace = {"bc_masks": jnp.ones_like(online_batch["masks"])} if FLAGS.use_rnd_online: online_replace["rewards"] = online_batch["rewards"] + rnd.get_reward( frozen_dict.freeze(online_batch) ) online_batch = online_batch.copy(add_or_replace=online_replace) if FLAGS.offline_ratio > 0: offline_batch = next(ds_iterator) offline_replace = { "bc_masks": jnp.ones_like(offline_batch["masks"]), "rewards": offline_batch["rewards"], } if FLAGS.offline_relabel_type in ["pred", "min"]: offline_replace["masks"] = rm.get_mask(offline_batch) if FLAGS.offline_relabel_type == "min": offline_replace["rewards"] = ( offline_batch["rewards"].at[:].set(ds_minr) ) if FLAGS.offline_relabel_type == "pred": offline_replace["rewards"] = rm.get_reward(offline_batch) if FLAGS.use_rnd_offline: offline_replace["rewards"] = offline_replace[ "rewards" ] + rnd.get_reward(frozen_dict.freeze(offline_batch)) offline_batch = offline_batch.copy(add_or_replace=offline_replace) batch = combine(offline_batch, online_batch) else: batch = online_batch # update the main agent agent, update_info = agent.update(batch, FLAGS.utd_ratio) logging_info.update(add_prefix("agent/", update_info)) if i % FLAGS.log_interval == 0: wandb.log({"env_step": i}, step=record_step) for k, v in logging_info.items(): wandb.log({k: v}, step=record_step) # visualize rewards rm and rnd rewards along a successful offline trajectory traj = ds.load_successful_traj() rnd_reward = [] rm_reward = [] for tran in traj: if rnd is not None: rnd_reward.append(rnd.get_reward(frozen_dict.freeze(tran)).item()) if rm is not None: rm_reward.append(rm.get_reward(frozen_dict.freeze(tran)).item()) if rm is not None: plt.clf() plt.plot(rm_reward, label="rm") plt.xlabel("step in offline trajectory") plt.ylabel("reward") plt.legend() plt.title("predicted rewards in successful offline trajectory") wandb.log( {"training/offline_success_traj_rewards_rm": plt}, step=record_step ) if rnd is not None: plt.clf() plt.plot(rnd_reward, label="rnd") plt.xlabel("step in offline trajectory") plt.ylabel("reward") plt.legend() plt.title("predicted rewards in successful offline trajectory") wandb.log( {"training/offline_success_traj_rewards_rnd": plt}, step=record_step ) if i % FLAGS.eval_interval == 0:
""" Modified from https://github.com/ikostrikov/rlpd/blob/main/rlpd/train_finetuning_pixels.py Original lincense information: MIT License Copyright (c) 2022 Ilya Kostrikov, Philip J. Ball, Laura Smith Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #! /usr/bin/env python ### cog imports ### ### cog imports ### FLAGS = flags.FLAGS flags.DEFINE_string("project_name", "explore-cog", "wandb project name.") flags.DEFINE_string("env_name", "cheetah-run-v0", "Environment name.") flags.DEFINE_float( "dataset_subsample_ratio", 0.1, "Ratio of the dataset to subsample (done twice)" ) flags.DEFINE_bool("use_icvf", False, "Whether to use the icvf encoder") flags.DEFINE_float("offline_ratio", 0.5, "Offline ratio.") flags.DEFINE_integer("seed", 42, "Random seed.") flags.DEFINE_integer("eval_episodes", 100, "Number of episodes used for evaluation.") flags.DEFINE_integer("log_interval", 1000, "Logging interval.") flags.DEFINE_integer("eval_interval", 5000, "Eval interval.") flags.DEFINE_integer("batch_size", 256, "Mini batch size.") flags.DEFINE_integer("max_steps", 500000, "Number of training steps.") flags.DEFINE_integer( "start_training", 5000, "Number of training steps to start training." ) flags.DEFINE_boolean("tqdm", True, "Use tqdm progress bar.") flags.DEFINE_string("save_dir", "exp_data_cog", "Directory to save checkpoints.") flags.DEFINE_bool("checkpoint_model", False, "save model") flags.DEFINE_bool("checkpoint_buffer", False, "save replay buffer") flags.DEFINE_integer("utd_ratio", 1, "Update to data ratio.") flags.DEFINE_float("bc_pretrain_rollin", 0.0, "rollin coeff") flags.DEFINE_integer( "bc_pretrain_steps", 10000, "Pre-train BC policy for a number of steps on pure offline data", ) config_flags.DEFINE_config_file( "config", "configs/rlpd_pixels_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rm_config", "configs/pixel_rm_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rnd_config", "configs/pixel_rnd_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "bc_config", "configs/pixel_bc_config.py", "File path to the training hyperparameter configuration", lock_config=False, ) flags.DEFINE_string( "offline_relabel_type", "gt", "Whether to use reward from the offline dataset. [gt/pred/min]", ) flags.DEFINE_boolean("use_rnd_offline", False, "Whether to use rnd offline.") flags.DEFINE_boolean("use_rnd_online", False, "Whether to use rnd online.") def combine(one_dict, other_dict): combined = {} for k, v in one_dict.items(): if isinstance(v, FrozenDict) or isinstance(v, dict): if len(v) == 0: combined[k] = v else: combined[k] = combine(v, other_dict[k]) else: tmp = np.empty( (v.shape[0] + other_dict[k].shape[0], *v.shape[1:]), dtype=v.dtype ) tmp[0::2] = v tmp[1::2] = other_dict[k] combined[k] = tmp return FrozenDict(combined) def add_prefix(prefix, dict): return {prefix + k: v for k, v in dict.items()} def main(_): wandb.init(project=FLAGS.project_name, mode="online") wandb.config.update(FLAGS) if FLAGS.save_dir is not None: log_dir = os.path.join( FLAGS.save_dir, f"{FLAGS.env_name}-s{FLAGS.seed}-icvf_{FLAGS.use_icvf}-ours_{FLAGS.use_rnd_offline}", ) print("logging to", log_dir) if FLAGS.checkpoint_model: chkpt_dir = os.path.join(log_dir, "checkpoints") os.makedirs(chkpt_dir, exist_ok=True) if FLAGS.checkpoint_buffer: buffer_dir = os.path.join(log_dir, "buffers") os.makedirs(buffer_dir, exist_ok=True) def wrap(env): return wrap_pixels( env, action_repeat=1, num_stack=1, camera_id=0, ) def render(env, *args, **kwargs): return env.render_obs() if FLAGS.env_name == "Widow250PickTray-v0": env_name_alt = "pickplace" cog_max_path_length = 40 elif FLAGS.env_name == "Widow250DoubleDrawerOpenGraspNeutral-v0": env_name_alt = "closeddrawer_small" cog_max_path_length = 50 elif FLAGS.env_name == "Widow250DoubleDrawerCloseOpenGraspNeutral-v0": env_name_alt = "blockeddrawer1_small" cog_max_path_length = 80 env = roboverse.make(FLAGS.env_name, transpose_image=False) env.render = types.MethodType(render, env) env = FilterObservation(env, ["image"]) env = TimeLimit(env, max_episode_steps=cog_max_path_length) # TODO env, pixel_keys = wrap(env) env = RecordEpisodeStatistics(env, deque_size=1) env.seed(FLAGS.seed) eval_env = roboverse.make(FLAGS.env_name, transpose_image=False) eval_env.render = types.MethodType(render, eval_env) eval_env = FilterObservation(eval_env, ["image"]) eval_env = TimeLimit(eval_env, max_episode_steps=cog_max_path_length) # TODO eval_env, _ = wrap(eval_env) eval_env.seed(FLAGS.seed + 42) dataset_path = os.path.join("data", env_name_alt) print("Data Path:", dataset_path) np_rng = np.random.default_rng(FLAGS.seed) ds = COGDataset( env=env, dataset_path=dataset_path, capacity=300000, subsample_ratio=FLAGS.dataset_subsample_ratio, np_rng=np_rng, ) ds.seed(FLAGS.seed) ds_minr = ds.dataset_dict["rewards"][: len(ds)].min() assert -10 < ds_minr < 10, "maybe sampling reward outside of buffer range" ds_iterator = ds.get_iterator( sample_args={ "batch_size": int(FLAGS.batch_size * FLAGS.utd_ratio * FLAGS.offline_ratio), "pack_obs_and_next_obs": True, } ) replay_buffer = MemoryEfficientReplayBuffer( env.observation_space, env.action_space, FLAGS.max_steps ) replay_buffer_iterator = replay_buffer.get_iterator( sample_args={ "batch_size": int( FLAGS.batch_size * FLAGS.utd_ratio * (1 - FLAGS.offline_ratio) ), "pack_obs_and_next_obs": True, } ) replay_buffer.seed(FLAGS.seed) ########### MODELS ########### # Crashes on some setups if agent is created before replay buffer. kwargs = dict(FLAGS.config) model_cls = kwargs.pop("model_cls") agent = globals()[model_cls].create( FLAGS.seed, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) if FLAGS.offline_relabel_type != "gt": kwargs = dict(FLAGS.rm_config) model_cls = kwargs.pop("model_cls") rm = globals()[model_cls].create( FLAGS.seed + 123, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) else: rm = None if FLAGS.use_rnd_offline or FLAGS.use_rnd_online: kwargs = dict(FLAGS.rnd_config) model_cls = kwargs.pop("model_cls") rnd = globals()[model_cls].create( FLAGS.seed + 123, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) else: rnd = None # Pre-training record_step = 0 # ICVF training and initialize RM and RND with ICVF encoder if FLAGS.use_icvf: # assert rm is not None or rnd is not None, "ICVF is not needed in this configuration" icvf = PixelICVF.create( FLAGS.seed, env.observation_space, env.action_space, pixel_keys=pixel_keys, **dict(FLAGS.config), ) gc_ds = gc_dataset.GCSDataset(ds, **gc_dataset.GCSDataset.get_default_config()) for i in tqdm.trange(75001): record_step += 1 batch = gc_ds.sample(FLAGS.batch_size) icvf, update_info = icvf.update(frozen_dict.freeze(batch), 1) if i % FLAGS.log_interval == 0: for k, v in update_info.items(): wandb.log({f"icvf-training/{k}": v}, step=record_step) replace_keys = ["encoder_0"] replace = {k: icvf.net.params[k] for k in replace_keys} if rnd is not None: new_params = FrozenDict(rnd.net.params).copy(add_or_replace=replace) new_frozen_params = FrozenDict(rnd.frozen_net.params).copy( add_or_replace=replace ) rnd = rnd.replace( net=rnd.net.replace(params=new_params), frozen_net=rnd.frozen_net.replace(params=new_frozen_params), ) if rm is not None: new_params = FrozenDict(rm.r_net.params).copy(add_or_replace=replace) rm = rm.replace(r_net=rm.r_net.replace(params=new_params)) if FLAGS.bc_pretrain_rollin > 0.0: kwargs = dict(FLAGS.bc_config) model_cls = kwargs.pop("model_cls") bc_policy = globals()[model_cls].create( FLAGS.seed + 152, env.observation_space, env.action_space, **kwargs ) if FLAGS.use_icvf: new_params = FrozenDict(bc_policy.actor.params).copy(add_or_replace=replace) bc_policy = bc_policy.replace( actor=bc_policy.actor.replace(params=new_params) ) else: bc_policy = None if bc_policy is not None: for i in tqdm.tqdm( range(FLAGS.bc_pretrain_steps), smoothing=0.1, disable=not FLAGS.tqdm ): record_step += 1 batch = ds.sample(int(FLAGS.batch_size * FLAGS.utd_ratio)) bc_policy, update_info = bc_policy.update(batch, FLAGS.utd_ratio) if i % FLAGS.log_interval == 0: for k, v in update_info.items(): wandb.log(add_prefix("bc/", {k: v}), step=record_step) # Training observation, done = env.reset(), False rng = jax.random.PRNGKey(seed=FLAGS.seed) if FLAGS.bc_pretrain_rollin > 0.0: curr_rng, rng = jax.random.split(rng) rollin_enabled = ( True if jax.random.uniform(key=curr_rng) < FLAGS.bc_pretrain_rollin else False ) else: rollin_enabled = False for i in tqdm.tqdm( range(1, FLAGS.max_steps + 1), smoothing=0.1, disable=not FLAGS.tqdm, ): record_step += 1 logging_info = {} if rollin_enabled: action, bc_policy = bc_policy.sample_actions(observation) curr_rng, rng = jax.random.split(rng) rollin_enabled = ( True if jax.random.uniform(key=curr_rng) < agent.discount else False ) else: if i < FLAGS.start_training: action = env.action_space.sample() else: action, agent = agent.sample_actions(observation) next_observation, reward, done, info = env.step(action) if not done or "TimeLimit.truncated" in info: mask = 1.0 else: mask = 0.0 replay_buffer.insert( dict( observations=observation, actions=action, rewards=reward, masks=mask, dones=done, next_observations=next_observation, ) ) observation = next_observation if done: observation, done = env.reset(), False for k, v in info["episode"].items(): decode = {"r": "return", "l": "length", "t": "time"} wandb.log({f"episode/{decode[k]}": v}, step=record_step) if FLAGS.bc_pretrain_rollin > 0.0: curr_rng, rng = jax.random.split(rng) rollin_enabled = ( True if jax.random.uniform(key=curr_rng) < FLAGS.bc_pretrain_rollin else False ) # main updates if i >= FLAGS.start_training: online_batch = next(replay_buffer_iterator) if i >= FLAGS.start_training * 2: # update the reward model on the online batch if rm is not None: rm, rm_update_info = rm.update(online_batch, FLAGS.utd_ratio) logging_info.update(add_prefix("rm/", rm_update_info)) if rnd is not None: rnd, rnd_update_info = rnd.update( frozen_dict.freeze( { "observations": { k: ob[None] for k, ob in observation.items() }, "actions": action[None], "next_observations": { k: ob[None] for k, ob in next_observation.items() }, "rewards": np.array(reward)[None], "masks": np.array(mask)[None], "dones": np.array(done)[None], } ) ) logging_info.update(add_prefix("rnd/", rnd_update_info)) # prepare the batch for the main agent online_replace = {"bc_masks": jnp.ones_like(online_batch["masks"])} if FLAGS.use_rnd_online: online_replace["rewards"] = online_batch["rewards"] + rnd.get_reward( frozen_dict.freeze(online_batch) ) online_batch = online_batch.copy(add_or_replace=online_replace) if FLAGS.offline_ratio > 0: offline_batch = next(ds_iterator) offline_replace = { "bc_masks": jnp.ones_like(offline_batch["masks"]), "rewards": offline_batch["rewards"], } if FLAGS.offline_relabel_type in ["pred", "min"]: offline_replace["masks"] = rm.get_mask(offline_batch) if FLAGS.offline_relabel_type == "min": offline_replace["rewards"] = ( offline_batch["rewards"].at[:].set(ds_minr) ) if FLAGS.offline_relabel_type == "pred": offline_replace["rewards"] = rm.get_reward(offline_batch) if FLAGS.use_rnd_offline: offline_replace["rewards"] = offline_replace[ "rewards" ] + rnd.get_reward(frozen_dict.freeze(offline_batch)) offline_batch = offline_batch.copy(add_or_replace=offline_replace) batch = combine(offline_batch, online_batch) else: batch = online_batch # update the main agent agent, update_info = agent.update(batch, FLAGS.utd_ratio) logging_info.update(add_prefix("agent/", update_info)) if i % FLAGS.log_interval == 0: wandb.log({"env_step": i}, step=record_step) for k, v in logging_info.items(): wandb.log({k: v}, step=record_step) # visualize rewards rm and rnd rewards along a successful offline trajectory traj = ds.load_successful_traj() rnd_reward = [] rm_reward = [] for tran in traj: if rnd is not None: rnd_reward.append(rnd.get_reward(frozen_dict.freeze(tran)).item()) if rm is not None: rm_reward.append(rm.get_reward(frozen_dict.freeze(tran)).item()) if rm is not None: plt.clf() plt.plot(rm_reward, label="rm") plt.xlabel("step in offline trajectory") plt.ylabel("reward") plt.legend() plt.title("predicted rewards in successful offline trajectory") wandb.log( {"training/offline_success_traj_rewards_rm": plt}, step=record_step ) if rnd is not None: plt.clf() plt.plot(rnd_reward, label="rnd") plt.xlabel("step in offline trajectory") plt.ylabel("reward") plt.legend() plt.title("predicted rewards in successful offline trajectory") wandb.log( {"training/offline_success_traj_rewards_rnd": plt}, step=record_step ) if i % FLAGS.eval_interval == 0:
eval_info, _ = evaluate(
6
2023-11-19 21:28:52+00:00
16k
Luo-Z13/pointobb
PointOBB/mmdet/models/detectors/PointOBB.py
[ { "identifier": "DETECTORS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "DETECTORS = MODELS" }, { "identifier": "TwoStageDetector", "path": "PointOBB/mmdet/models/detectors/two_stage.py", "snippet": "class TwoStageDetector(BaseDetector):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(TwoStageDetector, self).__init__(init_cfg)\n backbone.pretrained = pretrained\n self.backbone = build_backbone(backbone)\n\n if neck is not None:\n self.neck = build_neck(neck)\n\n if rpn_head is not None:\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head_ = rpn_head.copy()\n rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head_)\n\n if roi_head is not None:\n # update train and test cfg here for now\n # TODO: refactor assigner & sampler\n rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n roi_head.update(train_cfg=rcnn_train_cfg)\n roi_head.update(test_cfg=test_cfg.rcnn)\n roi_head.pretrained = pretrained\n self.roi_head = build_head(roi_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @property\n def with_rpn(self):\n \"\"\"bool: whether the detector has RPN\"\"\"\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n @property\n def with_roi_head(self):\n \"\"\"bool: whether the detector has a RoI head\"\"\"\n return hasattr(self, 'roi_head') and self.roi_head is not None\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck.\"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs,)\n proposals = torch.randn(1000, 4).to(img.device)\n # roi_head\n roi_outs = self.roi_head.forward_dummy(x, proposals)\n outs = outs + (roi_outs,)\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n ann_weight=None, ## add by fei\n gt_masks=None,\n proposals=None,\n **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n ann_weight=ann_weight,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,ann_weight, ## add by fei\n gt_bboxes_ignore, gt_masks,\n **kwargs)\n losses.update(roi_losses)\n\n return losses\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.rpn_head.async_simple_test_rpn(\n x, img_meta)\n else:\n proposal_list = proposals\n\n return await self.roi_head.async_simple_test(\n x, proposal_list, img_meta, rescale=rescale)\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n return self.roi_head.simple_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # modified by hui #####################################\n if self.test_cfg.rcnn.get('do_tile_as_aug', False):\n x = self.extract_feats(imgs)\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n return self.roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n else:\n return self.tile_aug_test(imgs, img_metas, rescale)\n ##########################################################################\n\n # add by hui ######################################################################\n def tile_aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations for each tile seperatelly.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n x = self.extract_feats(imgs)\n\n assert len(x) == len(img_metas)\n assert not self.roi_head.with_mask\n tile2img_metas = {}\n tile2feats = {}\n for feat, img_meta in zip(x, img_metas):\n assert len(img_meta) == 1\n tile_off = img_meta[0].pop('tile_offset') # must pop here, attention.\n if tile_off in tile2img_metas:\n tile2img_metas[tile_off].append(img_meta)\n tile2feats[tile_off].append(feat)\n else:\n tile2img_metas[tile_off] = [img_meta]\n tile2feats[tile_off] = [feat]\n\n # forward and merge all result on each tile\n all_tile_bboxes = []\n all_tile_labels = []\n num_classes = 0\n for tile_off, img_metas in tile2img_metas.items():\n x = tile2feats[tile_off]\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n bboxes = self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale)[0]\n\n device = x[0][0].device\n dx, dy = tile_off\n labels = []\n num_classes = max(num_classes, len(bboxes))\n for cls in range(len(bboxes)):\n bboxes[cls][:, [0, 2]] += dx\n bboxes[cls][:, [1, 3]] += dy\n label = torch.zeros((len(bboxes[cls]),), dtype=torch.long, device=device) + cls\n labels.append(label)\n all_tile_bboxes.extend(bboxes)\n all_tile_labels.extend(labels)\n import numpy as np\n all_tile_bboxes = np.concatenate(all_tile_bboxes, axis=0)\n all_tile_bboxes = torch.from_numpy(all_tile_bboxes).to(device)\n all_tile_labels = torch.cat(all_tile_labels, dim=0)\n\n # performance NMS\n if len(all_tile_bboxes) > 0:\n from mmcv.ops.nms import batched_nms\n dets, keep = batched_nms(all_tile_bboxes[:, :4], all_tile_bboxes[:, 4].contiguous(),\n all_tile_labels, self.test_cfg.rcnn.nms)\n max_num = self.test_cfg.rcnn.max_per_img\n if max_num > 0:\n dets = dets[:max_num]\n keep = keep[:max_num]\n det_bboxes, det_labels = dets, all_tile_labels[keep]\n else:\n det_bboxes, det_labels = torch.zeros((0, 5)), torch.zeros((0,))\n\n from mmdet.core import bbox2result\n bbox_results = bbox2result(det_bboxes, det_labels, num_classes)\n return [bbox_results]\n\n ##################################################################\n\n def onnx_export(self, img, img_metas):\n\n img_shape = torch._shape_as_tensor(img)[2:]\n img_metas[0]['img_shape_for_onnx'] = img_shape\n x = self.extract_feat(img)\n proposals = self.rpn_head.onnx_export(x, img_metas)\n return self.roi_head.onnx_export(x, proposals, img_metas)" }, { "identifier": "build_head", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_head(cfg):\n \"\"\"Build head.\"\"\"\n return HEADS.build(cfg)" }, { "identifier": "HEADS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "HEADS = MODELS" }, { "identifier": "build_loss", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return LOSSES.build(cfg)" }, { "identifier": "gen_proposals_from_cfg", "path": "PointOBB/mmdet/models/detectors/P2BNet.py", "snippet": "def gen_proposals_from_cfg(gt_points, proposal_cfg, img_meta):\n base_scales = proposal_cfg['base_scales']\n base_ratios = proposal_cfg['base_ratios']\n shake_ratio = proposal_cfg['shake_ratio']\n \n if 'cut_mode' in proposal_cfg:\n cut_mode = proposal_cfg['cut_mode']\n else:\n cut_mode = 'symmetry'\n base_proposal_list = []\n proposals_valid_list = []\n for i in range(len(gt_points)):\n img_h, img_w, _ = img_meta[i]['img_shape']\n if 'base_size' in proposal_cfg:\n base = proposal_cfg['base_size']\n else:\n base = max(img_w, img_h) / 100\n \n base_proposals = []\n for scale in base_scales:\n scale = scale * base # ≈[41, 81, 161, 326, 640, 1280]\n for ratio in base_ratios:\n base_proposals.append(gt_points[i].new_tensor([[scale * ratio, scale / ratio]]))\n\n base_proposals = torch.cat(base_proposals)\n base_proposals = base_proposals.repeat((len(gt_points[i]), 1))\n base_center = torch.repeat_interleave(gt_points[i], len(base_scales) * len(base_ratios), dim=0)\n\n if shake_ratio is not None:\n base_x_l = base_center[:, 0] - shake_ratio * base_proposals[:, 0]\n base_x_r = base_center[:, 0] + shake_ratio * base_proposals[:, 0]\n base_y_t = base_center[:, 1] - shake_ratio * base_proposals[:, 1]\n base_y_d = base_center[:, 1] + shake_ratio * base_proposals[:, 1]\n if cut_mode is not None:\n base_x_l = torch.clamp(base_x_l, 1, img_w - 1)\n base_x_r = torch.clamp(base_x_r, 1, img_w - 1)\n base_y_t = torch.clamp(base_y_t, 1, img_h - 1)\n base_y_d = torch.clamp(base_y_d, 1, img_h - 1)\n\n base_center_l = torch.stack([base_x_l, base_center[:, 1]], dim=1)\n base_center_r = torch.stack([base_x_r, base_center[:, 1]], dim=1)\n base_center_t = torch.stack([base_center[:, 0], base_y_t], dim=1)\n base_center_d = torch.stack([base_center[:, 0], base_y_d], dim=1)\n\n shake_mode = 0\n if shake_mode == 0:\n base_proposals = base_proposals.unsqueeze(1).repeat((1, 5, 1))\n elif shake_mode == 1:\n base_proposals_l = torch.stack([((base_center[:, 0] - base_x_l) * 2 + base_proposals[:, 0]),\n base_proposals[:, 1]], dim=1)\n base_proposals_r = torch.stack([((base_x_r - base_center[:, 0]) * 2 + base_proposals[:, 0]),\n base_proposals[:, 1]], dim=1)\n base_proposals_t = torch.stack([base_proposals[:, 0],\n ((base_center[:, 1] - base_y_t) * 2 + base_proposals[:, 1])], dim=1\n )\n base_proposals_d = torch.stack([base_proposals[:, 0],\n ((base_y_d - base_center[:, 1]) * 2 + base_proposals[:, 1])], dim=1\n )\n base_proposals = torch.stack(\n [base_proposals, base_proposals_l, base_proposals_r, base_proposals_t, base_proposals_d], dim=1)\n\n base_center = torch.stack([base_center, base_center_l, base_center_r, base_center_t, base_center_d], dim=1)\n\n if cut_mode == 'symmetry':\n base_proposals[..., 0] = torch.min(base_proposals[..., 0], 2 * base_center[..., 0])\n base_proposals[..., 0] = torch.min(base_proposals[..., 0], 2 * (img_w - base_center[..., 0]))\n base_proposals[..., 1] = torch.min(base_proposals[..., 1], 2 * base_center[..., 1])\n base_proposals[..., 1] = torch.min(base_proposals[..., 1], 2 * (img_h - base_center[..., 1]))\n\n base_proposals = torch.cat([base_center, base_proposals], dim=-1)\n base_proposals = base_proposals.reshape(-1, 4)\n base_proposals = bbox_cxcywh_to_xyxy(base_proposals)\n proposals_valid = base_proposals.new_full(\n (*base_proposals.shape[:-1], 1), 1, dtype=torch.long).reshape(-1, 1)\n if cut_mode == 'clamp':\n base_proposals[..., 0:4:2] = torch.clamp(base_proposals[..., 0:4:2], 0, img_w)\n base_proposals[..., 1:4:2] = torch.clamp(base_proposals[..., 1:4:2], 0, img_h)\n proposals_valid_list.append(proposals_valid)\n if cut_mode == 'symmetry':\n proposals_valid_list.append(proposals_valid)\n elif cut_mode == 'ignore':\n img_xyxy = base_proposals.new_tensor([0, 0, img_w, img_h])\n iof_in_img = bbox_overlaps(base_proposals, img_xyxy.unsqueeze(0), mode='iof')\n proposals_valid = iof_in_img > 0.7\n proposals_valid_list.append(proposals_valid)\n elif cut_mode is None:\n proposals_valid_list.append(proposals_valid)\n base_proposal_list.append(base_proposals)\n\n return base_proposal_list, proposals_valid_list" }, { "identifier": "resize_proposal", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def resize_proposal(img_metas, generate_proposals, gt_true_bboxes, gt_bboxes_ignore, ratio = 0.5):\n \n img_meta_out = copy.deepcopy(img_metas)\n generate_proposals_out = []\n gt_true_bboxes_out = []\n gt_bboxes_ignore_out = []\n for i in range(len(img_metas)):\n h, w, c = img_metas[i]['img_shape']\n img_meta_out[i]['img_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)\n img_meta_out[i]['pad_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)\n tmp_proposal = generate_proposals[i] * ratio\n generate_proposals_out.append(tmp_proposal)\n tmp_gt_true_bbox = gt_true_bboxes[i] * ratio\n gt_true_bboxes_out.append(tmp_gt_true_bbox)\n gt_bboxes_ignore_out.append(gt_bboxes_ignore[i]*ratio)\n return generate_proposals_out, gt_true_bboxes_out, img_meta_out, gt_bboxes_ignore_out" }, { "identifier": "resize_single_proposal", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def resize_single_proposal(generate_proposals, ratio = 0.5):\n generate_proposals_out = []\n for i in range(len(generate_proposals)):\n tmp_proposal = generate_proposals[i] * ratio\n generate_proposals_out.append(tmp_proposal)\n\n return generate_proposals_out" }, { "identifier": "flip_tensor", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def flip_tensor(tensor,\n img_shape: Tuple[int, int],\n direction: str = 'horizontal') -> None:\n \"\"\"Flip boxes horizontally or vertically in-place.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n direction (str): Flip direction, options are \"horizontal\",\n \"vertical\" and \"diagonal\". Defaults to \"horizontal\"\n \"\"\"\n assert direction in ['horizontal', 'vertical', 'diagonal']\n flipped = tensor\n if direction == 'horizontal':\n flipped[..., 0] = img_shape[1] - flipped[..., 0]\n flipped[..., 4] = -flipped[..., 4]\n elif direction == 'vertical':\n flipped[..., 1] = img_shape[0] - flipped[..., 1]\n flipped[..., 4] = -flipped[..., 4]\n else:\n flipped[..., 0] = img_shape[1] - flipped[..., 0]\n flipped[..., 1] = img_shape[0] - flipped[..., 1]\n return flipped" }, { "identifier": "hboxlist2cxcywha", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def hboxlist2cxcywha(bbox_list):\n batch_bbox = []\n\n for i in range(len(bbox_list)):\n gt_box = bbox_list[i]\n # xyxy2cxcywha\n cx = (gt_box[:,0] + gt_box[:,2]) /2\n cy = (gt_box[:,1] + gt_box[:,3]) /2\n w = gt_box[:,2] - gt_box[:,0]\n h = gt_box[:,3] - gt_box[:,1]\n theta = torch.zeros_like(w, dtype=w.dtype)\n gt_box_new = torch.stack([cx, cy, w, h, theta], dim=-1)\n batch_bbox.append(gt_box_new)\n\n return batch_bbox" }, { "identifier": "merge_batch_list", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def merge_batch_list(batch_gt_bboxes, batch_proposals):\n merged_list = []\n flag = []\n\n for gt_bboxes, proposals in zip(batch_gt_bboxes, batch_proposals):\n merged_list.append(torch.cat([gt_bboxes, proposals], dim=0))\n flag.append([gt_bboxes.size(0), proposals.size(0)])\n\n return merged_list, flag" }, { "identifier": "split_batch_list", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def split_batch_list(merged_list, flags):\n out_list1 = []\n out_list2 = []\n for merged_tensor, flag in zip(merged_list, flags):\n out_list1.append(merged_tensor[:flag[0]])\n out_list2.append(merged_tensor[flag[0]:])\n\n return out_list1, out_list2" }, { "identifier": "box_iou_rotated", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def box_iou_rotated(bboxes1: torch.Tensor,\n bboxes2: torch.Tensor,\n mode: str = 'iou',\n aligned: bool = False,\n clockwise: bool = True) -> torch.Tensor:\n \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in\n (x_center, y_center, width, height, angle) format.\n\n If ``aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n .. note::\n The operator assumes:\n\n 1) The positive direction along x axis is left -> right.\n\n 2) The positive direction along y axis is top -> down.\n\n 3) The w border is in parallel with x axis when angle = 0.\n\n However, there are 2 opposite definitions of the positive angular\n direction, clockwise (CW) and counter-clockwise (CCW). MMCV supports\n both definitions and uses CW by default.\n\n Please set ``clockwise=False`` if you are using the CCW definition.\n\n The coordinate system when ``clockwise`` is ``True`` (default)\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\\\begin{pmatrix}\n \\\\cos\\\\alpha & -\\\\sin\\\\alpha \\\\\\\\\n \\\\sin\\\\alpha & \\\\cos\\\\alpha\n \\\\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\\\begin{pmatrix} x_A \\\\\\\\ y_A\\\\end{pmatrix}\n =\n \\\\begin{pmatrix} x_{center} \\\\\\\\ y_{center}\\\\end{pmatrix} +\n \\\\begin{pmatrix}\\\\cos\\\\alpha & -\\\\sin\\\\alpha \\\\\\\\\n \\\\sin\\\\alpha & \\\\cos\\\\alpha\\\\end{pmatrix}\n \\\\begin{pmatrix} -0.5w \\\\\\\\ -0.5h\\\\end{pmatrix} \\\\\\\\\n =\n \\\\begin{pmatrix} x_{center}-0.5w\\\\cos\\\\alpha+0.5h\\\\sin\\\\alpha\n \\\\\\\\\n y_{center}-0.5w\\\\sin\\\\alpha-0.5h\\\\cos\\\\alpha\\\\end{pmatrix}\n\n\n The coordinate system when ``clockwise`` is ``False``\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (-pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\\\begin{pmatrix}\n \\\\cos\\\\alpha & \\\\sin\\\\alpha \\\\\\\\\n -\\\\sin\\\\alpha & \\\\cos\\\\alpha\n \\\\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\\\begin{pmatrix} x_A \\\\\\\\ y_A\\\\end{pmatrix}\n =\n \\\\begin{pmatrix} x_{center} \\\\\\\\ y_{center}\\\\end{pmatrix} +\n \\\\begin{pmatrix}\\\\cos\\\\alpha & \\\\sin\\\\alpha \\\\\\\\\n -\\\\sin\\\\alpha & \\\\cos\\\\alpha\\\\end{pmatrix}\n \\\\begin{pmatrix} -0.5w \\\\\\\\ -0.5h\\\\end{pmatrix} \\\\\\\\\n =\n \\\\begin{pmatrix} x_{center}-0.5w\\\\cos\\\\alpha-0.5h\\\\sin\\\\alpha\n \\\\\\\\\n y_{center}+0.5w\\\\sin\\\\alpha-0.5h\\\\cos\\\\alpha\\\\end{pmatrix}\n\n Args:\n boxes1 (torch.Tensor): rotated bboxes 1. It has shape (N, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n boxes2 (torch.Tensor): rotated bboxes 2. It has shape (M, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n mode (str): \"iou\" (intersection over union) or iof (intersection over\n foreground).\n clockwise (bool): flag indicating whether the positive angular\n orientation is clockwise. default True.\n `New in version 1.4.3.`\n\n Returns:\n torch.Tensor: Return the ious betweens boxes. If ``aligned`` is\n ``False``, the shape of ious is (N, M) else (N,).\n \"\"\"\n assert mode in ['iou', 'iof']\n mode_dict = {'iou': 0, 'iof': 1}\n mode_flag = mode_dict[mode]\n rows = bboxes1.size(0)\n cols = bboxes2.size(0)\n if aligned:\n ious = bboxes1.new_zeros(rows)\n else:\n ious = bboxes1.new_zeros(rows * cols)\n if not clockwise:\n flip_mat = bboxes1.new_ones(bboxes1.shape[-1])\n flip_mat[-1] = -1\n bboxes1 = bboxes1 * flip_mat\n bboxes2 = bboxes2 * flip_mat\n bboxes1 = bboxes1.contiguous()\n bboxes2 = bboxes2.contiguous()\n ext_module.box_iou_rotated(\n bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned)\n if not aligned:\n ious = ious.view(rows, cols)\n return ious" }, { "identifier": "obb2poly_np", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def obb2poly_np(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to polygons.\n\n Args:\n obbs (ndarray): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]\n \"\"\"\n if version == 'oc':\n results = obb2poly_np_oc(rbboxes)\n elif version == 'le135':\n results = obb2poly_np_le135(rbboxes)\n elif version == 'le90':\n results = obb2poly_np_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" } ]
import copy import torch import numpy as np import copy import math import cv2 import os from ..builder import DETECTORS from .two_stage import TwoStageDetector from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core import bbox_cxcywh_to_xyxy from mmdet.core.bbox.iou_calculators import bbox_overlaps from ..builder import build_head from torch.nn import functional as F from ..builder import HEADS, build_loss from typing import Tuple, Union from torch import Tensor from torch.nn.functional import grid_sample from torchvision import transforms from .P2BNet import gen_proposals_from_cfg from .utils import resize_proposal, resize_single_proposal, flip_tensor, hboxlist2cxcywha \ ,merge_batch_list, split_batch_list, box_iou_rotated, obb2poly_np
11,271
cls_score_v1_prob = cls_score_v1_prob.reshape(cls_score_v1.size(0), num_base_scales, -1) ins_score_v1_prob = ins_score_v1_prob.reshape(ins_score_v1.size(0), num_base_scales, -1) cls_score_v2_prob = cls_score_v2_prob.reshape(cls_score_v2.size(0), num_base_scales, -1) ins_score_v2_prob = ins_score_v2_prob.reshape(ins_score_v2.size(0), num_base_scales, -1) cls_similarity = 1 - F.cosine_similarity(cls_score_v1_prob, cls_score_v2_prob, dim=-1, eps=1e-6) ins_similarity = 1 - F.cosine_similarity(ins_score_v1_prob, ins_score_v2_prob, dim=-1, eps=1e-6) score_similarity = 1 - F.cosine_similarity(prob_v1, prob_v2, dim=1, eps=1e-6) return cls_similarity, ins_similarity, score_similarity # def Cross_View_Sim(self, results_v1v2, gt_labels, proposals_valid_list, mode = 'scales', stage = 0): # gt_label = torch.cat(gt_labels) # half_num = len(gt_label)//2 # proposals_valid_all = torch.cat(proposals_valid_list) # half_num_vaild = len(proposals_valid_all)//2 # # with torch.no_grad(): # base_proposal_cfg = self.train_cfg.get('base_proposal',self.test_cfg.rpn) # fine_proposal_cfg = self.train_cfg.get('fine_proposal',self.test_cfg.rpn) # if mode == 'scales': # num_base_scales = len(base_proposal_cfg['base_scales']) # elif mode == 'ratios': # num_base_scales = len(base_proposal_cfg['base_ratios']) # elif mode == 'gts': # num_base_scales = len(base_proposal_cfg['base_scales']) * len(base_proposal_cfg['base_ratios']) # if stage >=1: # if isinstance(fine_proposal_cfg['base_ratios'], tuple): # num_base_scales = len(fine_proposal_cfg['base_ratios'][stage - 1]) # # shake_ratio = fine_proposal_cfg['shake_ratio'][stage - 1] # else: # num_base_scales = len(fine_proposal_cfg['base_ratios']) # # shake_ratio = fine_proposal_cfg['shake_ratio'] # cls_score_v1 = results_v1v2['cls_score'][:half_num,...] # [num_gt, num_pros, num_cls+1]) # ins_score_v1 = results_v1v2['ins_score'][:half_num,...] # proposal_vaild_v1 = proposals_valid_all[:half_num_vaild,...].reshape(half_num, -1) # proposal_vaild_v2 = proposals_valid_all[half_num_vaild:,...].reshape(half_num, -1) # proposal_vaild = proposal_vaild_v1 * proposal_vaild_v2 # if stage < 1: # cls_score_v1_prob = cls_score_v1.softmax(dim=-1) # elif stage >= 1: # cls_score_v1_prob = cls_score_v1.sigmoid() # cls_score_v1_prob = cls_score_v1_prob * proposal_vaild[...,None] # ins_score_v1_prob = ins_score_v1.softmax(dim=1) * proposal_vaild[...,None] # ins_score_v1_prob = F.normalize(ins_score_v1_prob, dim=1, p=1) # prob_v1 = (cls_score_v1_prob * ins_score_v1_prob).sum(dim=1) # cls_score_v2 = results_v1v2['cls_score'][half_num:,...] # ins_score_v2 = results_v1v2['ins_score'][half_num:,...] # if stage < 1: # cls_score_v2_prob = cls_score_v2.softmax(dim=-1) # elif stage >= 1: # cls_score_v2_prob = cls_score_v2.sigmoid() # cls_score_v2_prob = cls_score_v2_prob * proposal_vaild[...,None] # ins_score_v2_prob = ins_score_v2.softmax(dim=1) * proposal_vaild[...,None] # ins_score_v2_prob = F.normalize(ins_score_v2_prob, dim=1, p=1) # prob_v2 = (cls_score_v2_prob * ins_score_v2_prob).sum(dim=1) # if stage >= 1: # cls_score_v1_prob_list = [] # cls_score_v2_prob_list = [] # ins_score_v1_prob_list = [] # ins_score_v2_prob_list = [] # for i in range(half_num): # cls_score_v1_prob_list.append(cls_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) # cls_score_v2_prob_list.append(cls_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) # ins_score_v1_prob_list.append(ins_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) # ins_score_v2_prob_list.append(ins_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) # cls_score_v1_prob = torch.cat(cls_score_v1_prob_list, dim=0) # cls_score_v2_prob = torch.cat(cls_score_v2_prob_list, dim=0) # ins_score_v1_prob = torch.cat(ins_score_v1_prob_list, dim=0) # ins_score_v2_prob = torch.cat(ins_score_v2_prob_list, dim=0) # cls_score_v1_prob = cls_score_v1_prob.reshape(cls_score_v1.size(0), num_base_scales, -1) # # cls_score_v1_prob = cls_score_v1_prob * proposal_vaild_v1 # ins_score_v1_prob = ins_score_v1_prob.reshape(ins_score_v1.size(0), num_base_scales, -1) # cls_score_v2_prob = cls_score_v2_prob.reshape(cls_score_v2.size(0), num_base_scales, -1) # ins_score_v2_prob = ins_score_v2_prob.reshape(ins_score_v2.size(0), num_base_scales, -1) # cls_similarity = 1 - F.cosine_similarity(cls_score_v1_prob, cls_score_v2_prob, dim=-1, eps=1e-6) # ins_similarity = 1 - F.cosine_similarity(ins_score_v1_prob, ins_score_v2_prob, dim=-1, eps=1e-6) # score_similarity = 1 - F.cosine_similarity(prob_v1, prob_v2, dim=1, eps=1e-6) # return cls_similarity, ins_similarity, score_similarity def forward_train(self, img, img_metas, gt_bboxes, gt_true_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): if self.iter_count == self.burn_in_steps1: self.roi_head.use_angle_loss = True print(f'#####iter_count1 use_angle_loss:{self.iter_count}#####') if self.construct_resize: self.construct_resize = False if self.iter_count == self.burn_in_steps2: if self.roi_head.use_angle_loss: self.roi_head.add_angle_pred_begin = True print(f'#####iter_count2 add_angle_pred_begin:{self.iter_count}#####') base_proposal_cfg = self.train_cfg.get('base_proposal', self.test_cfg.rpn) fine_proposal_cfg = self.train_cfg.get('fine_proposal', self.test_cfg.rpn) losses = dict() gt_points = [bbox_xyxy_to_cxcywh(b)[:, :2] for b in gt_bboxes] if self.stage == 0:
# from mmdet.datasets.utils import obb2poly_np def resize_image(inputs, resize_ratio=0.5): down_inputs = F.interpolate(inputs, scale_factor=resize_ratio, mode='nearest') return down_inputs def fine_rotate_proposals_from_cfg(pseudo_boxes, fine_proposal_cfg, img_meta, stage): gen_mode = fine_proposal_cfg['gen_proposal_mode'] # cut_mode = fine_proposal_cfg['cut_mode'] cut_mode = None if isinstance(fine_proposal_cfg['base_ratios'], tuple): base_ratios = fine_proposal_cfg['base_ratios'][stage - 1] shake_ratio = fine_proposal_cfg['shake_ratio'][stage - 1] else: base_ratios = fine_proposal_cfg['base_ratios'] shake_ratio = fine_proposal_cfg['shake_ratio'] if gen_mode == 'fix_gen': proposal_list = [] proposals_valid_list = [] for i in range(len(img_meta)): pps = [] base_boxes = pseudo_boxes[i] for ratio_w in base_ratios: for ratio_h in base_ratios: base_boxes_ = base_boxes.clone() base_boxes_[:, 2] *= ratio_w base_boxes_[:, 3] *= ratio_h pps.append(base_boxes_.unsqueeze(1)) pps_old = torch.cat(pps, dim=1) if shake_ratio is not None: pps_new = [] pps_new.append(pps_old.reshape(*pps_old.shape[0:2], -1, 5)) for ratio in shake_ratio: pps = pps_old.clone() pps_center = pps[:, :, :2] pps_wh = pps[:, :, 2:4] pps_angle = pps[:, :, 4].unsqueeze(2) pps_x_l = pps_center[:, :, 0] - ratio * pps_wh[:, :, 0] pps_x_r = pps_center[:, :, 0] + ratio * pps_wh[:, :, 0] pps_y_t = pps_center[:, :, 1] - ratio * pps_wh[:, :, 1] pps_y_d = pps_center[:, :, 1] + ratio * pps_wh[:, :, 1] pps_center_l = torch.stack([pps_x_l, pps_center[:, :, 1]], dim=-1) pps_center_r = torch.stack([pps_x_r, pps_center[:, :, 1]], dim=-1) pps_center_t = torch.stack([pps_center[:, :, 0], pps_y_t], dim=-1) pps_center_d = torch.stack([pps_center[:, :, 0], pps_y_d], dim=-1) pps_center = torch.stack([pps_center_l, pps_center_r, pps_center_t, pps_center_d], dim=2) pps_wh = pps_wh.unsqueeze(2).expand(pps_center.shape) pps_angle = pps_angle.unsqueeze(2).expand((pps_center.size()[0], pps_center.size()[1], pps_center.size()[2], 1)) pps = torch.cat([pps_center, pps_wh, pps_angle], dim=-1) pps = pps.reshape(pps.shape[0], -1, 5) pps_new.append(pps.reshape(*pps_old.shape[0:2], -1, 5)) pps_new = torch.cat(pps_new, dim=2) else: pps_new = pps_old h, w, _ = img_meta[i]['img_shape'] if cut_mode is 'clamp': pps_new[..., 0:4:2] = torch.clamp(pps_new[..., 0:4:2], 0, w) pps_new[..., 1:4:2] = torch.clamp(pps_new[..., 1:4:2], 0, h) proposals_valid_list.append(pps_new.new_full( (*pps_new.shape[0:3], 1), 1, dtype=torch.long).reshape(-1, 1)) else: rot_theta = base_boxes[:,-1].mean() img_xywh = pps_new.new_tensor([w/2, h/2, w, h, rot_theta]) # (cx,cy,w,h,theta) iof_in_img = box_iou_rotated(pps_new.reshape(-1, 5), img_xywh.unsqueeze(0), mode='iof') proposals_valid = iof_in_img > 0.8 proposals_valid_list.append(proposals_valid) proposal_list.append(pps_new.reshape(-1, 5)) return proposal_list, proposals_valid_list def gen_rotate_negative_proposals(gt_points, proposal_cfg, aug_generate_proposals, img_meta): num_neg_gen = proposal_cfg['gen_num_neg'] if num_neg_gen == 0: return None, None neg_proposal_list = [] neg_weight_list = [] device = gt_points[0].device for i in range(len(gt_points)): pos_box = aug_generate_proposals[i] h, w, _ = img_meta[i]['img_shape'] x1 = -0.2 * w + torch.rand(num_neg_gen) * (1.2 * w) y1 = -0.2 * h + torch.rand(num_neg_gen) * (1.2 * h) x2 = x1 + torch.rand(num_neg_gen) * (1.2 * w - x1) y2 = y1 + torch.rand(num_neg_gen) * (1.2 * h - y1) neg_theta = torch.ones_like(x1)*(pos_box[:,-1].mean().cpu()) neg_bboxes = torch.stack([(x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1, neg_theta], dim=1).to(device) iou = box_iou_rotated(neg_bboxes, pos_box) neg_weight = ((iou < 0.3).sum(dim=1) == iou.shape[1]) neg_proposal_list.append(neg_bboxes) neg_weight_list.append(neg_weight) return neg_proposal_list, neg_weight_list def resize_rotate_proposal(img_metas, batch_gt_bboxes, batch_proposals, gt_true_bboxes, gt_bboxes_ignore, ratio = 0.5): ''' batch_gt_bboxes_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a] batch_proposals_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a] ''' img_meta_out = copy.deepcopy(img_metas) batch_gt_bboxes_out = [] batch_proposals_out =[] gt_true_bboxes_out = [] gt_bboxes_ignore_out = [] for i in range(len(img_metas)): h, w, c = img_metas[i]['img_shape'] img_meta_out[i]['img_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c) img_meta_out[i]['pad_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c) tmp_gt_bboxes = batch_gt_bboxes[i].clone() tmp_gt_bboxes[:,:4] = tmp_gt_bboxes[:,:4] * ratio batch_gt_bboxes_out.append(tmp_gt_bboxes) tmp_proposal = batch_proposals[i].clone() tmp_proposal[:,:4] = tmp_proposal[:,:4] * ratio batch_proposals_out.append(tmp_proposal) tmp_gt_true_bbox = gt_true_bboxes[i].clone() tmp_gt_true_bbox[:,:4] = tmp_gt_true_bbox[:,:4] * ratio gt_true_bboxes_out.append(tmp_gt_true_bbox) tmp_gt_bboxes_ignore = gt_bboxes_ignore[i].clone() if gt_bboxes_ignore[i].size(0) != 0: tmp_gt_bboxes_ignore[:,:,:4] = tmp_gt_bboxes_ignore[:,:4] * ratio gt_bboxes_ignore_out.append(tmp_gt_bboxes_ignore) return img_meta_out, batch_gt_bboxes_out, batch_proposals_out, gt_true_bboxes_out, gt_bboxes_ignore_out @DETECTORS.register_module() class PointOBB(TwoStageDetector): def __init__(self, backbone, roi_head, train_cfg, test_cfg, construct_view = True, construct_resize = False, loss_diff_view=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0), crop_size = (1024, 1024), padding = 'reflection', view_range: Tuple[float, float] = (0.25, 0.75), bbox_head=None, neck=None, pretrained=None, init_cfg=None): super(PointOBB, self).__init__( backbone=backbone, neck=neck, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) self.num_stages = roi_head.num_stages self.stage = 0 print(f'========={self.stage}===========') if bbox_head is not None: self.with_bbox_head = True self.bbox_head = build_head(bbox_head) self.crop_size = crop_size self.padding = padding self.view_range = view_range self.loss_diff_view = build_loss(loss_diff_view) self.construct_view = construct_view self.construct_resize = construct_resize if train_cfg is not None: self.iter_count = train_cfg.get("iter_count") self.burn_in_steps1 = train_cfg.get("burn_in_steps1") self.burn_in_steps2 = train_cfg.get("burn_in_steps2") def rotate_crop( self, batch_inputs: Tensor, rot: float = 0., size: Tuple[int, int] = (768, 768), batch_gt_instances = None, padding: str = 'reflection'): """ Args: batch_inputs (Tensor): Input images of shape (N, C, H, W). These should usually be mean centered and std scaled. rot (float): Angle of view rotation. Defaults to 0. size (tuple[int]): Crop size from image center. Defaults to (768, 768). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. padding (str): Padding method of image black edge. Defaults to 'reflection'. Returns: Processed batch_inputs (Tensor) and batch_gt_instances (list[:obj:`InstanceData`]) """ device = batch_inputs.device n, c, h, w = batch_inputs.shape size_h, size_w = size crop_h = (h - size_h) // 2 crop_w = (w - size_w) // 2 if rot != 0: cosa, sina = math.cos(rot), math.sin(rot) tf = batch_inputs.new_tensor([[cosa, -sina], [sina, cosa]], dtype=torch.float) x_range = torch.linspace(-1, 1, w, device=device) y_range = torch.linspace(-1, 1, h, device=device) y, x = torch.meshgrid(y_range, x_range) grid = torch.stack([x, y], -1).expand([n, -1, -1, -1]) grid = grid.reshape(-1, 2).matmul(tf).view(n, h, w, 2) # rotate batch_inputs = grid_sample( batch_inputs, grid, 'bilinear', padding, align_corners=True) if batch_gt_instances is not None: for i, gt_instances in enumerate(batch_gt_instances): gt_bboxes = gt_instances xy, wh, a = gt_bboxes[..., :2], gt_bboxes[ ..., 2:4], gt_bboxes[..., [4]] ctr = tf.new_tensor([[w / 2, h / 2]]) xy = (xy - ctr).matmul(tf.T) + ctr a = a + rot rot_gt_bboxes = torch.cat([xy, wh, a], dim=-1) batch_gt_instances[i] = rot_gt_bboxes batch_inputs = batch_inputs[..., crop_h:crop_h + size_h, crop_w:crop_w + size_w] if batch_gt_instances is None: return batch_inputs else: # rot == 0 for i, gt_instances in enumerate(batch_gt_instances): gt_bboxes = gt_instances xy, wh, a = gt_bboxes[..., :2], gt_bboxes[..., 2:4], gt_bboxes[..., [4]] xy = xy - xy.new_tensor([[crop_w, crop_h]]) crop_gt_bboxes = torch.cat([xy, wh, a], dim=-1) batch_gt_instances[i] = crop_gt_bboxes return batch_inputs, batch_gt_instances def construct_Rview(self, img, generate_proposals_0, gt_bboxes, img_metas, gt_labels, gt_true_bboxes, gt_bboxes_ignore, proposals_valid_list_0): img_ori = img.clone() # 1) # Crop original images and gts batch_gt_bboxes = hboxlist2cxcywha(gt_bboxes) batch_proposals = hboxlist2cxcywha(generate_proposals_0) batch_instances_all, interval_flag = merge_batch_list(batch_gt_bboxes, batch_proposals) img, batch_instances_all = self.rotate_crop(img, 0, self.crop_size, batch_instances_all, self.padding) offset_gt = 1 offset = 1 for i, img_meta in enumerate(img_metas): img_meta['gt_bid'] = torch.arange(0, interval_flag[i][0], 1, device=batch_instances_all[i].device) + offset_gt + 0.2 offset_gt += interval_flag[i][0] img_meta['bid'] = torch.arange(0, interval_flag[i][1], 1, device=batch_instances_all[i].device) + offset + 0.2 offset += interval_flag[i][1] # 2) # Generate rotated images and gts rot = math.pi * ( torch.rand(1, device=img.device) * (self.view_range[1] - self.view_range[0]) + self.view_range[0]) batch_instance_rot = copy.deepcopy(batch_instances_all) img_metas_rot = copy.deepcopy(img_metas) img_rot, batch_instance_rot = self.rotate_crop( img, rot, self.crop_size, batch_instance_rot, self.padding) offset_gt = 1 offset = 1 for i, img_meta in enumerate(img_metas_rot): img_meta['gt_bid'] = torch.arange(0, interval_flag[i][0], 1, device=batch_instance_rot[i].device) + offset_gt + 0.4 offset_gt += interval_flag[i][0] img_meta['bid'] = torch.arange(0, interval_flag[i][1], 1, device=batch_instance_rot[i].device) + offset + 0.4 offset += interval_flag[i][1] # 3) # Generate flipped images and gts img_flp = transforms.functional.vflip(img) batch_instances_flp = copy.deepcopy(batch_instances_all) img_metas_flp = copy.deepcopy(img_metas) offset_gt = 1 offset = 1 for i, img_meta in enumerate(img_metas_flp): batch_instances_flp[i] = flip_tensor(batch_instances_flp[i], img.shape[2:4], 'vertical' ) img_meta['gt_bid'] = torch.arange(0, interval_flag[i][0], 1, device=batch_instances_flp[i].device) + offset_gt + 0.6 offset_gt += interval_flag[i][0] img_meta['bid'] = torch.arange(0, interval_flag[i][1], 1, device=batch_instances_flp[i].device) + offset + 0.6 offset += interval_flag[i][1] # 4) # Concat original/rotated/flipped images and gts batch_gt_bboxes, batch_proposals = split_batch_list(batch_instances_all, interval_flag) batch_gt_bboxes_rot, batch_proposals_rot = split_batch_list(batch_instance_rot, interval_flag) batch_gt_bboxes_flp, batch_proposals_flp = split_batch_list(batch_instances_flp, interval_flag) proposals_valid_list_rot = [] for v in range(len(proposals_valid_list_0)): rot_theta = batch_proposals_rot[v][:,-1].mean() w,h,_ = img_metas[v]['img_shape'] img_xywha = batch_proposals_rot[v].new_tensor([w/2, h/2, w, h, rot_theta]) # (cx,cy,w,h,theta) iof_in_img = box_iou_rotated(batch_proposals_rot[v], img_xywha.unsqueeze(0), mode='iof') # iof_in_img = bbox_overlaps(pps_new.reshape(-1, 4), img_xyxy.unsqueeze(0), mode='iof') proposals_valid = iof_in_img > 0.8 proposals_valid_list_rot.append(proposals_valid) img_ori, batch_instances_gt_true = self.rotate_crop(img_ori, 0, self.crop_size,gt_true_bboxes, self.padding) batch_instances_gt_true_rot = copy.deepcopy(batch_instances_gt_true) _, batch_instances_gt_true_rot = self.rotate_crop(img_ori, rot, self.crop_size, batch_instances_gt_true_rot, self.padding) batch_instances_gt_true_flp = copy.deepcopy(batch_instances_gt_true) for i, img_meta in enumerate(img_metas_flp): batch_instances_gt_true_flp[i] = flip_tensor(batch_instances_gt_true_flp[i], img_ori.shape[2:4], 'vertical' ) batch_gt_bboxes_all = [] batch_proposals_all = [] img_metas_all = [] gt_true_bboxes_all = [] proposals_valid_list_all = [] gt_labels_all = gt_labels + gt_labels gt_bboxes_ignore_all = gt_bboxes_ignore + gt_bboxes_ignore if torch.rand(1) < 0.95: img_inputs_all = torch.cat( (img, img_rot)) for gt_box in batch_gt_bboxes + batch_gt_bboxes_rot: batch_gt_bboxes_all.append(gt_box) for proposal in batch_proposals + batch_proposals_rot: batch_proposals_all.append(proposal) for tmp_img_metas in img_metas + img_metas_rot: img_metas_all.append(tmp_img_metas) for gt_true in batch_instances_gt_true + batch_instances_gt_true_rot: gt_true_bboxes_all.append(gt_true) for proposal_valid in proposals_valid_list_0 + proposals_valid_list_rot: proposals_valid_list_all.append(proposal_valid) else: img_inputs_all = torch.cat( (img, img_flp)) for gt_box in batch_gt_bboxes + batch_gt_bboxes_flp: batch_gt_bboxes_all.append(gt_box) for proposal in batch_proposals + batch_proposals_flp: batch_proposals_all.append(proposal) for tmp_img_metas in img_metas + img_metas_flp: img_metas_all.append(tmp_img_metas) for gt_true in batch_instances_gt_true + batch_instances_gt_true_flp: gt_true_bboxes_all.append(gt_true) for proposal_valid in proposals_valid_list_0 + proposals_valid_list_0: proposals_valid_list_all.append(proposal_valid) return (img_inputs_all, batch_gt_bboxes_all, batch_proposals_all, img_metas_all, gt_labels_all, gt_true_bboxes_all, gt_bboxes_ignore_all, proposals_valid_list_all) def Cross_View_Diff_Sim(self, results_v1, results_v2, gt_labels, proposals_valid, double_view, mode = 'scales', stage = 0): gt_label = torch.cat(gt_labels) base_proposal_cfg = self.train_cfg.get('base_proposal',self.test_cfg.rpn) fine_proposal_cfg = self.train_cfg.get('fine_proposal',self.test_cfg.rpn) if mode == 'scales': num_base_scales = len(base_proposal_cfg['base_scales']) elif mode == 'ratios': num_base_scales = len(base_proposal_cfg['base_ratios']) elif mode == 'gts': num_base_scales = len(base_proposal_cfg['base_scales']) * len(base_proposal_cfg['base_ratios']) if stage >=1: if isinstance(fine_proposal_cfg['base_ratios'], tuple): num_base_scales = len(fine_proposal_cfg['base_ratios'][stage - 1]) else: num_base_scales = len(fine_proposal_cfg['base_ratios']) if not double_view: v1_half_num = len(results_v1['cls_score']) else: v1_half_num = len(results_v1['cls_score'])//2 cls_score_v1 = results_v1['cls_score'][:v1_half_num,...] ins_score_v1 = results_v1['ins_score'][:v1_half_num,...] # 取二者并集才是有效共同部分 proposal_vaild = torch.cat(proposals_valid).reshape(cls_score_v1.size(0),-1,1) if stage < 1: cls_score_v1_prob = cls_score_v1.softmax(dim=-1) elif stage >= 1: cls_score_v1_prob = cls_score_v1.sigmoid() cls_score_v1_prob = cls_score_v1_prob * proposal_vaild ins_score_v1_prob = ins_score_v1.softmax(dim=1) * proposal_vaild # cls_score_v1_prob = cls_score_v1_prob # ins_score_v1_prob = ins_score_v1.softmax(dim=1) ins_score_v1_prob = F.normalize(ins_score_v1_prob, dim=1, p=1) prob_v1 = (cls_score_v1_prob * ins_score_v1_prob).sum(dim=1) cls_score_v2 = results_v2['cls_score'] ins_score_v2 = results_v2['ins_score'] if stage < 1: cls_score_v2_prob = cls_score_v2.softmax(dim=-1) elif stage >= 1: cls_score_v2_prob = cls_score_v2.sigmoid() cls_score_v2_prob = cls_score_v2_prob * proposal_vaild ins_score_v2_prob = ins_score_v2.softmax(dim=1) * proposal_vaild ins_score_v2_prob = F.normalize(ins_score_v2_prob, dim=1, p=1) prob_v2 = (cls_score_v2_prob * ins_score_v2_prob).sum(dim=1) if stage>=1: cls_score_v1_prob_list = [] cls_score_v2_prob_list = [] ins_score_v1_prob_list = [] ins_score_v2_prob_list = [] for i in range(v1_half_num): cls_score_v1_prob_list.append(cls_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) cls_score_v2_prob_list.append(cls_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) ins_score_v1_prob_list.append(ins_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) ins_score_v2_prob_list.append(ins_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) cls_score_v1_prob = torch.cat(cls_score_v1_prob_list, dim=0) cls_score_v2_prob = torch.cat(cls_score_v2_prob_list, dim=0) ins_score_v1_prob = torch.cat(ins_score_v1_prob_list, dim=0) ins_score_v2_prob = torch.cat(ins_score_v2_prob_list, dim=0) cls_score_v1_prob = cls_score_v1_prob.reshape(cls_score_v1.size(0), num_base_scales, -1) ins_score_v1_prob = ins_score_v1_prob.reshape(ins_score_v1.size(0), num_base_scales, -1) cls_score_v2_prob = cls_score_v2_prob.reshape(cls_score_v2.size(0), num_base_scales, -1) ins_score_v2_prob = ins_score_v2_prob.reshape(ins_score_v2.size(0), num_base_scales, -1) cls_similarity = 1 - F.cosine_similarity(cls_score_v1_prob, cls_score_v2_prob, dim=-1, eps=1e-6) ins_similarity = 1 - F.cosine_similarity(ins_score_v1_prob, ins_score_v2_prob, dim=-1, eps=1e-6) score_similarity = 1 - F.cosine_similarity(prob_v1, prob_v2, dim=1, eps=1e-6) return cls_similarity, ins_similarity, score_similarity # def Cross_View_Sim(self, results_v1v2, gt_labels, proposals_valid_list, mode = 'scales', stage = 0): # gt_label = torch.cat(gt_labels) # half_num = len(gt_label)//2 # proposals_valid_all = torch.cat(proposals_valid_list) # half_num_vaild = len(proposals_valid_all)//2 # # with torch.no_grad(): # base_proposal_cfg = self.train_cfg.get('base_proposal',self.test_cfg.rpn) # fine_proposal_cfg = self.train_cfg.get('fine_proposal',self.test_cfg.rpn) # if mode == 'scales': # num_base_scales = len(base_proposal_cfg['base_scales']) # elif mode == 'ratios': # num_base_scales = len(base_proposal_cfg['base_ratios']) # elif mode == 'gts': # num_base_scales = len(base_proposal_cfg['base_scales']) * len(base_proposal_cfg['base_ratios']) # if stage >=1: # if isinstance(fine_proposal_cfg['base_ratios'], tuple): # num_base_scales = len(fine_proposal_cfg['base_ratios'][stage - 1]) # # shake_ratio = fine_proposal_cfg['shake_ratio'][stage - 1] # else: # num_base_scales = len(fine_proposal_cfg['base_ratios']) # # shake_ratio = fine_proposal_cfg['shake_ratio'] # cls_score_v1 = results_v1v2['cls_score'][:half_num,...] # [num_gt, num_pros, num_cls+1]) # ins_score_v1 = results_v1v2['ins_score'][:half_num,...] # proposal_vaild_v1 = proposals_valid_all[:half_num_vaild,...].reshape(half_num, -1) # proposal_vaild_v2 = proposals_valid_all[half_num_vaild:,...].reshape(half_num, -1) # proposal_vaild = proposal_vaild_v1 * proposal_vaild_v2 # if stage < 1: # cls_score_v1_prob = cls_score_v1.softmax(dim=-1) # elif stage >= 1: # cls_score_v1_prob = cls_score_v1.sigmoid() # cls_score_v1_prob = cls_score_v1_prob * proposal_vaild[...,None] # ins_score_v1_prob = ins_score_v1.softmax(dim=1) * proposal_vaild[...,None] # ins_score_v1_prob = F.normalize(ins_score_v1_prob, dim=1, p=1) # prob_v1 = (cls_score_v1_prob * ins_score_v1_prob).sum(dim=1) # cls_score_v2 = results_v1v2['cls_score'][half_num:,...] # ins_score_v2 = results_v1v2['ins_score'][half_num:,...] # if stage < 1: # cls_score_v2_prob = cls_score_v2.softmax(dim=-1) # elif stage >= 1: # cls_score_v2_prob = cls_score_v2.sigmoid() # cls_score_v2_prob = cls_score_v2_prob * proposal_vaild[...,None] # ins_score_v2_prob = ins_score_v2.softmax(dim=1) * proposal_vaild[...,None] # ins_score_v2_prob = F.normalize(ins_score_v2_prob, dim=1, p=1) # prob_v2 = (cls_score_v2_prob * ins_score_v2_prob).sum(dim=1) # if stage >= 1: # cls_score_v1_prob_list = [] # cls_score_v2_prob_list = [] # ins_score_v1_prob_list = [] # ins_score_v2_prob_list = [] # for i in range(half_num): # cls_score_v1_prob_list.append(cls_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) # cls_score_v2_prob_list.append(cls_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) # ins_score_v1_prob_list.append(ins_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) # ins_score_v2_prob_list.append(ins_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) # cls_score_v1_prob = torch.cat(cls_score_v1_prob_list, dim=0) # cls_score_v2_prob = torch.cat(cls_score_v2_prob_list, dim=0) # ins_score_v1_prob = torch.cat(ins_score_v1_prob_list, dim=0) # ins_score_v2_prob = torch.cat(ins_score_v2_prob_list, dim=0) # cls_score_v1_prob = cls_score_v1_prob.reshape(cls_score_v1.size(0), num_base_scales, -1) # # cls_score_v1_prob = cls_score_v1_prob * proposal_vaild_v1 # ins_score_v1_prob = ins_score_v1_prob.reshape(ins_score_v1.size(0), num_base_scales, -1) # cls_score_v2_prob = cls_score_v2_prob.reshape(cls_score_v2.size(0), num_base_scales, -1) # ins_score_v2_prob = ins_score_v2_prob.reshape(ins_score_v2.size(0), num_base_scales, -1) # cls_similarity = 1 - F.cosine_similarity(cls_score_v1_prob, cls_score_v2_prob, dim=-1, eps=1e-6) # ins_similarity = 1 - F.cosine_similarity(ins_score_v1_prob, ins_score_v2_prob, dim=-1, eps=1e-6) # score_similarity = 1 - F.cosine_similarity(prob_v1, prob_v2, dim=1, eps=1e-6) # return cls_similarity, ins_similarity, score_similarity def forward_train(self, img, img_metas, gt_bboxes, gt_true_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): if self.iter_count == self.burn_in_steps1: self.roi_head.use_angle_loss = True print(f'#####iter_count1 use_angle_loss:{self.iter_count}#####') if self.construct_resize: self.construct_resize = False if self.iter_count == self.burn_in_steps2: if self.roi_head.use_angle_loss: self.roi_head.add_angle_pred_begin = True print(f'#####iter_count2 add_angle_pred_begin:{self.iter_count}#####') base_proposal_cfg = self.train_cfg.get('base_proposal', self.test_cfg.rpn) fine_proposal_cfg = self.train_cfg.get('fine_proposal', self.test_cfg.rpn) losses = dict() gt_points = [bbox_xyxy_to_cxcywh(b)[:, :2] for b in gt_bboxes] if self.stage == 0:
generate_proposals_0, proposals_valid_list_0 = gen_proposals_from_cfg(gt_points, base_proposal_cfg,
5
2023-11-20 07:50:12+00:00
16k
wangermeng2021/llm-webui
main.py
[ { "identifier": "login_huggingface", "path": "src/utils/common.py", "snippet": "def login_huggingface(token,base_model_name_dropdown):\n if base_model_name_dropdown.lower().find(\"llama\") >= 0:\n if token:\n HUGGINGFACE_HUB_TOKEN = token\n print(\"d1:\",HUGGINGFACE_HUB_TOKEN)\n else:\n env_file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),\"token.env\")\n load_dotenv(env_file_path)\n HUGGINGFACE_HUB_TOKEN = os.getenv('HUGGINGFACE_HUB_TOKEN')\n print(\"d2:\", HUGGINGFACE_HUB_TOKEN)\n login(token=HUGGINGFACE_HUB_TOKEN)\n os.environ[\"HUGGING_FACE_HUB_TOKEN\"] = HUGGINGFACE_HUB_TOKEN" }, { "identifier": "HuggingfaceInference", "path": "src/finetune/huggingface_inference.py", "snippet": "class HuggingfaceInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,using_4bit_quantization=True,low_cpu_mem_usage=False):\n self.model = None\n self.tokenizer = None\n self.hg_model = None\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n self.bnb_config = None\n if using_4bit_quantization:\n self.bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n self.low_cpu_mem_usage = low_cpu_mem_usage\n def load_model(self):\n try:\n \n if self.model_path.split(os.sep)[-1].rfind(\"llama\") >=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n if not self.tokenizer.pad_token:\n if self.model_path.split(os.sep)[-1].lower().rfind(\"gpt2\")>=0:\n self.tokenizer.pad_token = self.tokenizer.eos_token\n else:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.hg_model.resize_token_embeddings(len(self.tokenizer))\n\n except Exception as e:\n return -1, e\n self.model = pipeline(\n \"text-generation\",\n model=self.hg_model,\n tokenizer=self.tokenizer,\n max_new_tokens = self.max_new_tokens,\n temperature=self.temperature,\n top_p=self.top_p,top_k=self.top_k,do_sample=True,\n return_full_text=False,\n repetition_penalty=self.repetition_penalty,\n # return_dict_in_generate = True\n )\n return 0, \"\"\n def infer(self ,input):\n output = self.model(input)\n return output[0]['generated_text'] if output else None\n def free_memory(self):\n if self.hg_model:\n del self.hg_model\n self.hg_model = None\n if self.tokenizer:\n del self.tokenizer\n self.tokenizer = None\n if self.model:\n del self.model\n self.model = None" }, { "identifier": "LlamaCppInference", "path": "src/finetune/llama_cpp_inference.py", "snippet": "class LlamaCppInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,n_gpu_layers=35, n_ctx=4048,verbose=False):\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prefix1 = \"\"\n self.prefix2 = \"\"\n self.model = None\n\n def load_model(self):\n load_model_status = 0\n msg = None\n try:\n self.model = LlamaCpp(model_path=self.model_path, n_gpu_layers=35, n_ctx=4096,max_tokens=self.max_new_tokens, temperature=self.temperature,\n verbose=False, top_k=self.top_k, top_p=self.top_p,repeat_penalty=self.repetition_penalty)\n except Exception as e:\n load_model_status = -1\n msg = e\n return load_model_status, msg\n def infer(self ,input):\n return self.model(input)\n\n\n def free_memory(self):\n if self.model:\n del self.model\n self.model = None" }, { "identifier": "QAWithRAG", "path": "src/rag/qa_with_rag.py", "snippet": "class QAWithRAG():\n def __init__(self ,config: dict ={}):\n self.text_splitter = None\n self.embedding_function = None\n self.vectorstore = None\n self.retriever = None\n self.chat_llm = None\n\n self.chat_history =[]\n # self.persist_directory = \"./chroma_db\"\n self.persist_directory = None\n self.qa = None\n self.langchain_llm = None\n def free_memory(self):\n if self.chat_llm:\n self.chat_llm.free_memory()\n del self.chat_llm\n self.chat_llm = None\n if self.langchain_llm:\n del self.langchain_llm\n self.langchain_llm = None\n if self.qa:\n del self.qa\n self.qa = None\n\n\n def get_text_splitter(self ,chunk_size ,chunk_overlap ,separators):\n self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, length_function=len,\n separators=separators)\n def load_embedding_model(self ,model_path=\"\"):\n self.embedding_function = HuggingFaceEmbeddings(model_name=model_path ,model_kwargs = {'device': 'cpu'})\n def load_chat_model(self ,model_path,using_4bit_quantization,low_cpu_mem_usage,\n max_new_tokens, temperature, top_k, top_p, repeat_penalty\n ):\n self.set_prompt_template(model_path)\n load_model_status = 0\n if model_path.split('.')[-1] == \"gguf\":\n self.chat_llm = LlamaCppInference(model_path=model_path, max_new_tokens=max_new_tokens, temperature=temperature,\n top_k=top_k, top_p=top_p, repetition_penalty=repeat_penalty)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = self.chat_llm.model\n else:\n self.chat_llm = HuggingfaceInference(model_path, max_new_tokens, temperature, top_p, top_k, repeat_penalty, using_4bit_quantization,low_cpu_mem_usage)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = HuggingFacePipeline(pipeline=self.chat_llm.model)\n\n return load_model_status, msg\n\n #\n def get_document_data(self ,doc_path):\n self.chat_history = []\n self.chat_history.clear()\n self.doc_ext = doc_path.split('.')[-1]\n if self.doc_ext == \"txt\":\n loader = TextLoader(doc_path, encoding='utf8')\n elif self.doc_ext == \"pdf\":\n loader = PyPDFLoader(doc_path)\n elif self.doc_ext == \"docx\":\n loader = Docx2txtLoader(doc_path)\n else:\n raise ValueError(f\"Unsupported format: {self.doc_ext}\")\n data = loader.load()\n return data\n def add_document_to_vector_store(self, doc_path ,search_top_k ,search_score_threshold):\n data = self.get_document_data(doc_path)\n data = self.text_splitter.split_documents(data)\n try:\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n except InvalidDimensionException:\n Chroma().delete_collection()\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n self.set_retriever(search_top_k ,search_score_threshold)\n\n def set_retriever(self ,search_top_k ,score_threshold):\n self.retriever = self.vectorstore.as_retriever(search_type='similarity_score_threshold',\n search_kwargs={'k': search_top_k, \"score_threshold\": score_threshold})\n def set_prompt_template(self ,chat_model_path):\n\n if chat_model_path.lower().find(\"mistral\") >= 0 and chat_model_path.lower().find(\"instruct\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"llama\") >= 0 and chat_model_path.lower().find(\"chat\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"zephyr\") >= 0:\n prompt_template = \"\"\"<|user|>\\n Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: </s><|assistant|>\\n\"\"\"\n else:\n prompt_template = \"\"\"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer:\"\"\"\n\n self.prompt_template = PromptTemplate(\n template=prompt_template, input_variables=[\"context\", \"question\"]\n )\n def generate(self, question):\n self.chat_history = []\n if self.retriever:\n\n chain_type_kwargs = {\"prompt\": self.prompt_template ,\"verbose\": False}\n self.qa = RetrievalQA.from_chain_type(llm=self.langchain_llm, chain_type=\"stuff\", retriever=self.retriever,\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs)\n result = self.qa({\"query\": question}, return_only_outputs=True)\n retrieved_txt_list = []\n if len(result['source_documents'] ) >0:\n if self.doc_ext == \"txt\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"pdf\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"docx\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n answer = result['result']\n else:\n answer = \"Sorry, I can't find any relevant information in document. \" + result['result']\n return answer, retrieved_txt_list\n else:\n return \"\", retrieved_txt_list" }, { "identifier": "read_yaml", "path": "src/utils/common.py", "snippet": "def read_yaml(yaml_path):\n with open(yaml_path) as f1:\n try:\n data = yaml.safe_load(f1)\n return data\n except yaml.YAMLError as e:\n raise ValueError(f'Error loading yaml file: {e}')" }, { "identifier": "get_first_row_from_dataset", "path": "src/utils/common.py", "snippet": "def get_first_row_from_dataset(dataset_path):\n if os.path.exists(os.path.join(dataset_path, \"dataset_dict.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_infos.json\")):\n dataset = datasets.load_dataset(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_info.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n else:\n raise ValueError(\n f'Invalid Dataset format {dataset_path}.')\n try:\n split_list = list(dataset.keys())\n except:\n split_list = [\"train\"]\n new_split_list= [\"\",\"\",\"\"]\n for split in split_list:\n if split.find(\"train\") >= 0:\n new_split_list[0] = split\n elif split.find(\"val\") >= 0:\n new_split_list[1] = split\n elif split.find(\"test\") >= 0:\n new_split_list[2] = split\n\n return dataset[new_split_list[0]][0],new_split_list" }, { "identifier": "get_runs_model_names_from_dir", "path": "src/utils/common.py", "snippet": "def get_runs_model_names_from_dir(root_dir):\n\n run_names = os.listdir(root_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(root_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n model_bin_path = os.path.exists(\n os.path.join(root_dir,\n run_name, \"output_model\", run_output_model_name, \"ori\",\n \"pytorch_model.bin\"))\n if run_output_model_name.find(\"merged_\") >= 0 and model_bin_path:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n return runs_output_model" }, { "identifier": "get_hg_model_names_from_dir", "path": "src/utils/common.py", "snippet": "def get_hg_model_names_from_dir(root_dir):\n model_names = os.listdir(root_dir)\n model_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n return model_names" }, { "identifier": "get_hg_model_names_and_gguf_from_dir", "path": "src/utils/common.py", "snippet": "def get_hg_model_names_and_gguf_from_dir(hg_model_root_dir,runs_model_root_dir):\n output = []\n runs_gguf_files = glob.glob(os.path.join(runs_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"*.gguf\"), recursive=False)\n root_model_hg_dir0 = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"config.json\"),recursive=False)\n root_model_hg_dir1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"config.json\"), recursive=False)\n runs_hg_dir = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"config.json\"),recursive=False)\n runs_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir0.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n runs_hg_dir.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n\n for file in runs_gguf_files:\n file_pos = file.find(\"runs\")\n output.append(file[file_pos:])\n for file in root_model_gguf_files:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_gguf_files1:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_hg_dir0:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in root_model_hg_dir1:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in runs_hg_dir:\n file_pos = file.find(\"runs\")+len(\"runs\")+1\n output.append(file[file_pos:])\n return output" }, { "identifier": "validate_model_path", "path": "src/utils/common.py", "snippet": "def validate_model_path(model_name):\n if not model_name:\n return False,\"\"\n home_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n base_model_config_path1 = os.path.join(home_dir, \"models\", model_name)\n base_model_config_path2 = os.path.join(base_model_config_path1, \"config.json\")\n run_model_config_path1 = os.path.join(home_dir, \"runs\", model_name)\n run_model_config_path2 = os.path.join(run_model_config_path1, \"config.json\")\n if os.path.exists(base_model_config_path1) and base_model_config_path1.endswith(\".gguf\"):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path1) and run_model_config_path1.endswith(\".gguf\") :\n return True,run_model_config_path1\n if os.path.exists(base_model_config_path2):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path2):\n return True,run_model_config_path1\n return False,\"\"" }, { "identifier": "get_runs_models", "path": "src/utils/common.py", "snippet": "def get_runs_models():\n training_runs_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'runs')\n run_names = os.listdir(training_runs_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file)))\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(training_runs_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n if run_output_model_name.find(\"merged_\") >= 0:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n runs_output_model = runs_output_model[::-1]\n return runs_output_model" }, { "identifier": "get_model_type", "path": "src/utils/chat_prompts.py", "snippet": "def get_model_type(model_path):\n if model_path:\n if model_path.lower().find(\"mistral\") >= 0 and model_path.lower().find(\"instruct\") >= 0:\n model_type = \"mistral\"\n elif model_path.lower().find(\"llama\") >= 0 and model_path.lower().find(\"chat\") >= 0:\n model_type = \"llama2\"\n elif model_path.lower().find(\"zephyr\") >= 0:\n model_type = \"zephyr\"\n else:\n model_type = \"other model\"\n else:\n model_type = \"other model\"\n return model_type" }, { "identifier": "get_chat_history_prompt", "path": "src/utils/chat_prompts.py", "snippet": "def get_chat_history_prompt(chat_history,model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt = ','.join(chat_history[:-2])\n prompt = prompt + chat_history[-2]\n elif model_type == \"llama2\":\n prompt = format_chat_history_prompt_for_llama2_7b_chat(chat_history)\n elif model_type == \"zephyr\":\n prompt = format_chat_history_prompt_for_zephyr_7b_instruct(chat_history)\n elif model_type == \"mistral\":\n prompt = format_chat_history_prompt_for_mistral_7b_instruct(chat_history)\n return prompt" }, { "identifier": "get_model_prompt_template", "path": "src/utils/chat_prompts.py", "snippet": "def get_model_prompt_template(model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n elif model_type == \"llama2\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n elif model_type == \"zephyr\":\n prompt_template = PromptTemplate.from_template(\n \"<|user|>\\n{question}</s><|assistant|>\\n\"\n )\n elif model_type == \"mistral\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n return prompt_template" }, { "identifier": "download_model", "path": "src/utils/download_model.py", "snippet": "class ModelDownloader:\n def __init__(self, max_retries=5):\n def sanitize_model_and_branch_names(self, model, branch):\n def get_download_links_from_huggingface(self, model, branch, text_only=False, specific_file=None):\n def get_output_folder(self, model, branch, is_lora, is_llamacpp=False, base_folder=None):\n def get_single_file(self, url, output_folder, start_from_scratch=False):\n def start_download_threads(self, file_list, output_folder, start_from_scratch=False, threads=4):\n def download_model_files(self, model, branch, links, sha256, output_folder, progress_bar=None, start_from_scratch=False, threads=1, specific_file=None, is_llamacpp=False):\n def check_model_files(self, model, branch, links, sha256, output_folder):" }, { "identifier": "QloraTrainer", "path": "src/finetune/qlora_trainer.py", "snippet": "class QloraTrainer(PeftTrainer):\n\n def __init__(self, config: dict):\n self.config = config\n self.tokenizer = None\n self.base_model = None\n self.merged_model = None\n self.dataset = None\n self.fused_model = None\n self.train_dataset = None\n self.val_dataset = None\n self.logging_callback = self.LoggingCallbacks()\n print(\"config:\",config)\n def load_dataset(self):\n if self.config[\"dataset\"][\"hg_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_infos.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset= datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_val_dataset\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_dict.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_val_dataset\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"hg_dataset_dir\"]}.')\n else:\n\n if self.config[\"dataset\"][\"local_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_infos.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_val_set\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_dict.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_val_set\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"local_dataset_dir\"]}.')\n\n\n if self.config[\"dataset\"][\"max_length\"] == \"Model Max Length\":\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"mistral\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"zephyr\") >= 0:\n context_window = 1024*4\n else:\n context_window = self.tokenizer.model_max_length\n if self.tokenizer.model_max_length == int(1e30):\n context_window = 1024\n else:\n context_window = self.config[\"dataset\"][\"max_length\"]\n print(\"context_window:\",context_window)\n self.train_dataset = self.train_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n # padding=True\n ))\n if self.val_dataset:\n self.val_dataset = self.val_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n padding=True\n ))\n def generate_prompt(self,sample,eos_token):\n\n prompt = self.config[\"dataset\"][\"prefix1\"]+sample[self.config[\"dataset\"][\"datatset_col1\"]]+\\\n self.config[\"dataset\"][\"prefix2\"] + sample[self.config[\"dataset\"][\"datatset_col2\"]]+eos_token\n # print(\"prompt:\",prompt)\n return prompt\n\n def load_model(self):\n\n if self.config[\"model\"][\"fine_tuning_type\"] == \"QLoRA\":\n bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n elif self.config[\"model\"][\"fine_tuning_type\"] == \"LoRA\":\n bnb_config = None\n try:\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n except Exception as e:\n return -1,e\n if not self.tokenizer.pad_token:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.base_model.resize_token_embeddings(len(self.tokenizer))\n if self.config[\"training\"][\"gradient_checkpointing\"] and not self.config[\"model\"][\"base_model_name\"].rfind(\"phi\")>=0:\n # self.base_model.gradient_checkpointing_enable()\n self.base_model = prepare_model_for_kbit_training(self.base_model,use_gradient_checkpointing=True,gradient_checkpointing_kwargs={'use_reentrant':False})\n else:\n self.base_model = prepare_model_for_kbit_training(self.base_model, use_gradient_checkpointing=False,gradient_checkpointing_kwargs={'use_reentrant':False})\n if self.config[\"model\"][\"base_model_name\"].lower().rfind(\"llama\")>=0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"mistral\") >= 0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"zephyr\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"llama\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"falcon\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"falcon\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"gpt2\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"gpt2\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"phi\") >= 0:\n target_modules = [\"Wqkv\", \"out_proj\"]\n task_type = \"CAUSAL_LM\"\n else:\n raise ValueError(f'{self.config[\"model\"][\"base_model_name\"]} is not yet supported.')\n #T5,bart, task_type = \"SEQ_2_SEQ_LM\" ,AutoModelForSeq2SeqLM\n \n lora_config = LoraConfig(\n r=self.config[\"model\"][\"lora_r\"],\n lora_alpha=self.config[\"model\"][\"lora_alpha\"],\n target_modules=target_modules,\n lora_dropout=self.config[\"model\"][\"lora_dropout\"],\n bias=self.config[\"model\"][\"lora_bias\"],\n task_type=task_type,\n )\n self.fused_model = get_peft_model(self.base_model, lora_config)\n # self.fused_model.gradient_checkpointing = True\n return 0,\"\"\n def train(self):\n self.run_name = datetime.now().strftime(\"run_%Y-%m-%d_%H-%M-%S\")\n logging_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"tensorboard\")\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"output_model\", run_output_model_name + \"_adapter\")\n checkpoint_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name)\n self.trainer = transformers.Trainer(\n model=self.fused_model,\n train_dataset=self.train_dataset,\n eval_dataset= self.val_dataset if self.val_dataset else None,\n args=transformers.TrainingArguments(\n per_device_train_batch_size=self.config[\"training\"][\"batch_size\"],\n gradient_accumulation_steps=self.config[\"training\"][\"gradient_accumulation_steps\"],\n warmup_steps=self.config[\"training\"][\"warmup_steps\"],\n num_train_epochs=self.config[\"training\"][\"epochs\"],\n learning_rate=self.config[\"training\"][\"learning_rate\"],\n fp16=True,\n output_dir=checkpoint_dir,\n report_to=\"tensorboard\",\n optim=self.config[\"training\"][\"optimizer\"],\n lr_scheduler_type=self.config[\"training\"][\"lr_scheduler_type\"],\n load_best_model_at_end=True if self.val_dataset else False,\n save_strategy=\"steps\",\n save_steps = self.config[\"training\"][\"eval_steps\"],\n save_total_limit=1,\n evaluation_strategy=\"steps\" if self.val_dataset else \"no\",\n eval_steps=self.config[\"training\"][\"eval_steps\"], # eval interval\n per_device_eval_batch_size=1,\n # eval_steps=10, # eval interval\n logging_steps=100,#self.config[\"training\"][\"eval_steps\"]\n # run_name=self.run_name,\n logging_dir=logging_dir,\n ),\n\n callbacks=[self.logging_callback,transformers.EarlyStoppingCallback(early_stopping_patience=self.config[\"training\"][\"early_stopping_patience\"]) ] if self.config[\"training\"][\"early_stopping_patience\"]>0 else [self.logging_callback],\n data_collator=transformers.DataCollatorForLanguageModeling(self.tokenizer, mlm=False),\n\n )\n\n self.fused_model.config.use_cache = False # silence the warnings. Please re-enable for inference!\n try:\n self.trainer.train()\n except Exception as e:\n return -1,e\n # model_save_path = f\"{self.config['training']['output_dir']}/{self.config['model']['base_model_name']}_adapter\"\n self.trainer.save_model(output_model_dir)\n return 0,\"\"\n def merge_and_save(self):\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n else:\n base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_adapter_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\n run_output_model_name + \"_adapter\")\n\n model = PeftModel.from_pretrained(base_model, output_adapter_model_dir)\n\n merged_model = model.merge_and_unload()\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_merged_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\"merged_\"+run_output_model_name,\"ori\")\n merged_model.save_pretrained(output_merged_model_dir)\n self.tokenizer.save_pretrained(output_merged_model_dir)\n\n def _print_trainable_parameters(self, model):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in model.named_parameters():\n all_param += param.numel()\n if param.requires_grad:\n trainable_params += param.numel()\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n\n class LoggingCallbacks(transformers.TrainerCallback):\n # current_step = 0\n # max_steps = 0\n\n def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n pass\n\n def on_step_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n global TRAINING_STATUS\n if TRAINING_STATUS.status == 1:\n control.should_epoch_stop = True\n control.should_training_stop = True\n else:\n self.max_steps = state.max_steps\n self.current_step = state.global_step\n\n def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, logs, **kwargs):\n pass\n\n def free_memroy(self):\n try:\n del self.fused_model\n del self.tokenizer\n del self.base_model\n del self.trainer\n torch.cuda.empty_cache()\n except Exception as e:\n print(\"Free memory error:\",e)" }, { "identifier": "TRAINING_STATUS", "path": "src/finetune/qlora_trainer.py", "snippet": "TRAINING_STATUS = TrainingStatus()" }, { "identifier": "download_model_wrapper", "path": "src/utils/download_huggingface_repo.py", "snippet": "def download_model_wrapper(repo_id,local_model_root_dir, specific_file=None, return_links=False, check=False,progress = gr.Progress()):\n if repo_id.endswith(\".gguf\"):\n try:\n model_dir = os.path.join(local_model_root_dir, '/'.join(repo_id.split('/')[0:-1]))\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading file {repo_id.split('/')[-1]} to `{model_dir}/...`</span>\"\n hf_hub_download(repo_id='/'.join(repo_id.split('/')[0:-1]), filename=repo_id.split('/')[-1], local_dir=model_dir, resume_download=True,\n force_download=False)\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n else:\n if repo_id == \"\" or repo_id == \"None\":\n # return gr.update(value=\"Model's name is empty!\",visible=True)\n yield f\"Model's name is empty!\"\n else:\n model_dir = os.path.join(local_model_root_dir, repo_id)\n\n model_config_path = os.path.join(model_dir, \"config.json\")\n model_config_path1 = os.path.join(model_dir, \"pytorch_model.bin\")\n model_config_path2 = os.path.join(model_dir, \"model.safetensors\")\n if os.path.exists(model_config_path1) or os.path.exists(model_config_path2):\n yield '<span style=\"color:green\">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded.</span>'\n else:\n\n try:\n progress(0.0)\n # download_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\"download-model.py\")\n # downloader = importlib.import_module(download_model_path).ModelDownloader()\n downloader = download_model.ModelDownloader()\n model, branch = downloader.sanitize_model_and_branch_names(repo_id, None)\n yield (\"Getting the download links from Hugging Face\")\n links, sha256, is_lora, is_llamacpp, link_file_size_list = downloader.get_download_links_from_huggingface(model,\n branch,\n text_only=False,\n specific_file=specific_file\n )\n if return_links:\n yield '\\n\\n'.join([f\"`{Path(link).name}`\" for link in links])\n yield (\"Getting the output folder\")\n # base_folder = shared.args.lora_dir if is_lora else shared.args.model_dir\n base_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models\")\n output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp,\n base_folder=base_folder)\n link_file_size_list = np.array(link_file_size_list)\n links = np.array(links)\n sorted_index = np.argsort(link_file_size_list)\n link_file_size_list = link_file_size_list[sorted_index]\n links = links[sorted_index]\n total_file_size = sum(link_file_size_list)\n copyed_file_size = 0\n for link, link_file_size in zip(links, link_file_size_list):\n model_file_name = link.split('/')[-1]\n if model_file_name.find(\"Pooling\")>=0:\n model_file_name = model_file_name+\"/config.json\"\n # yield (f\"Downloading file {model_file_name} to `{output_folder}/...`\")\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading file {model_file_name} to `{output_folder}/...`</span>\"\n hf_hub_download(repo_id=repo_id, filename=model_file_name, local_dir=model_dir, resume_download=True,\n force_download=False)\n copyed_file_size += link_file_size\n progress(copyed_file_size / total_file_size)\n # yield (\"Download successful!\")\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')" }, { "identifier": "download_dataset_wrapper", "path": "src/utils/download_huggingface_repo.py", "snippet": "def download_dataset_wrapper(repo_id,local_dataset_root_dir,progress = gr.Progress()):\n repo_id = repo_id.strip()\n if repo_id == \"\":\n yield \"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;This Dataset's name is empty!</span>\"\n else:\n dataset_dir = os.path.join(local_dataset_root_dir, repo_id)\n # dataset_config_path1 = os.path.join(dataset_dir, \"config.json\")\n dataset_config_path1 = os.path.join(dataset_dir, \"dataset_infos.json\")\n dataset_config_path2 = os.path.join(dataset_dir, \"dataset_dict.json\")\n\n if os.path.exists(dataset_config_path1) or os.path.exists(dataset_config_path2):\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;This Dataset has already been downloaded.</span>\"\n else:\n try:\n\n progress(0.3)\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading dataset to `{dataset_dir}/...`</span>\"\n datasets = load_dataset(repo_id)\n progress(0.8)\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n datasets.save_to_disk(dataset_dir)\n # datasets = load_from_disk(\"dddd\")\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')" } ]
import pandas as pd import math import numpy as np import gc import os,requests import subprocess,threading import time import gradio as gr import os import traceback import numpy as np import glob import shutil import torch import socket from src.utils.common import login_huggingface from src.finetune.huggingface_inference import HuggingfaceInference from src.finetune.llama_cpp_inference import LlamaCppInference from src.rag.qa_with_rag import QAWithRAG from src.utils.common import read_yaml,get_first_row_from_dataset,\ get_runs_model_names_from_dir,get_hg_model_names_from_dir,get_hg_model_names_and_gguf_from_dir,validate_model_path,get_runs_models from src.utils.chat_prompts import get_model_type,get_chat_history_prompt,get_model_prompt_template from transformers.training_args import OptimizerNames from huggingface_hub import hf_hub_download from src.utils import download_model from pathlib import Path from src.finetune.qlora_trainer import QloraTrainer from src.finetune.qlora_trainer import TRAINING_STATUS from src.utils.download_huggingface_repo import download_model_wrapper,download_dataset_wrapper
13,880
try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def download_hub_home_chat_model_postprocess(): return gr.update(visible=True), gr.update(visible=False) def click_download_hub_home_chat_model_btn(): return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def change_home_chat_model_source_radio(home_chat_model_source_radio, hub_home_chat_model_names_dropdown): local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") if home_chat_model_source_radio == "Download From Huggingface Hub": if not hub_home_chat_model_names_dropdown: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>' else: if validate_model_path(hub_home_chat_model_names_dropdown)[0]: model_download_status = '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>' else: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>' return gr.update(visible=True), gr.update(visible=False), gr.update( visible=False), gr.update(visible=True, value=model_download_status), gr.update( visible=True), gr.update( visible=False) else: model_download_status = "" return gr.update(visible=False), gr.update(visible=True), gr.update( visible=True), gr.update(visible=False, value=model_download_status), gr.update( visible=False), gr.update( visible=False) click_download_hub_home_chat_model_names_btn_event = download_hub_home_chat_model_names_btn.click( check_local_model_or_dataset_is_empty1, [hub_home_chat_model_names_dropdown,Huggingface_hub_token]).success( click_download_hub_home_chat_model_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown]).then( download_model_wrapper, [hub_home_chat_model_names_dropdown, local_home_chat_model_root_dir_textbox], download_hub_home_chat_model_status_markdown). \ then(download_hub_home_chat_model_postprocess, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn]) stop_download_hub_home_chat_model_names_btn.click(click_stop_download_hub_home_chat_model_names_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown], cancels=[ click_download_hub_home_chat_model_names_btn_event]) home_chat_model_source_radio.change(change_home_chat_model_source_radio, [home_chat_model_source_radio, hub_home_chat_model_names_dropdown], [hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, refresh_local_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def change_refresh_local_home_chat_model_names_btn(): local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) return gr.update(choices=local_home_chat_model_names,value = local_home_chat_model_names[0] if local_home_chat_model_names else None) refresh_local_home_chat_model_names_btn.click(change_refresh_local_home_chat_model_names_btn,[],[local_home_chat_model_names_dropdown]) def change_hub_home_chat_model_names_dropdown(hub_home_chat_model_names_dropdown): if not hub_home_chat_model_names_dropdown: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>'), \ gr.update(visible=True), gr.update(visible=False) if validate_model_path(hub_home_chat_model_names_dropdown)[0]: return gr.update( visible=True, value='<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>'), \ gr.update(visible=True), gr.update(visible=False) else: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>'), \ gr.update(visible=True), gr.update(visible=False) hub_home_chat_model_names_dropdown.change(change_hub_home_chat_model_names_dropdown, hub_home_chat_model_names_dropdown, [download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def click_load_home_chat_model_btn(home_chat_model_source_radio, hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, max_new_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repeat_penalty_slider, chat_history_window_slider,using_4bit_quantization_checkbox,low_cpu_mem_usage_checkbox, progress=gr.Progress()): if home_chat_model_source_radio == "Download From Huggingface Hub": cur_model_name = hub_home_chat_model_names_dropdown else: cur_model_name = local_home_chat_model_names_dropdown if not validate_model_path(cur_model_name)[0]: raise gr.Error(f"Model does not exist!") global infer_model global stop_generation_status stop_generation_status = True progress(0.6) if infer_model: infer_model.free_memory() infer_model = None torch.cuda.empty_cache() yield "Loading model ..." load_model_status = 0 model_path = validate_model_path(cur_model_name)[1] if model_path.split('.')[-1] == "gguf":
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889' # os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889' LOCAL_HOST_IP = "0.0.0.0" TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/" INIT_DATASET_NAME = "test_python_code_instructions_5000_rows" RAG_DATA_LIST_DROPDOWN = "" TEXT_SPLITTER_DROPDOWN = "" CHUNK_SIZE_SLIDER = 0 CHUNK_OVERLAP_SLIDER = -1 SEPARATORS_TEXTBOX = "" EMBEDDING_MODEL_SOURCE_RADIO = "" HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = "" LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = "" CHAT_MODEL_SOURCE_RADIO = "" HUB_CHAT_MODEL_NAMES_DROPDOWN = "" LOCAL_CHAT_MODEL_NAMES_DROPDOWN = "" SEARCH_TOP_K_SLIDER = "" SEARCH_SCORE_THRESHOLD_SLIDER = "" training_ret_val = -1 error_msg = "" current_running_model_name = "" infer_model = None stop_generation_status = False chatbot_history=[] chatbot_height = 500 rag_chatbot_history=[] rag_stop_generation_status = False qa_with_rag = QAWithRAG() train_param_config = {} train_param_config["dataset"]={} train_param_config["model"]={} train_param_config["training"]={} model_zoo_config = {} transformer_optimizer_list = [] model_context_window = 0 init_train_file_path = None init_val_file_path = None INIT_PREFIX1 = "" INIT_PREFIX2 = "" INIT_PREFIX3 = "" INIT_PREFIX4 = "" INIT_COL1_TEXT = "" INIT_COL2_TEXT = "" INIT_COL3_TEXT = "" INIT_COL4_TEXT = "" col_names = [] DATASET_FIRST_ROW = None local_model_list = "" local_model_root_dir = "" base_model_names = [] training_base_model_names = [] embedding_model_names = [] base_model_context_window = [] local_dataset_list = [] local_dataset_root_dir = "" def get_local_embedding_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_dataset_list(): local_dataset_list = [] local_dataset_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets") matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_infos.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_infos.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_dict.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_dict.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) return local_dataset_list,local_dataset_root_dir def start_tensorboard_server(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((LOCAL_HOST_IP, 6006)) s.close() except Exception as e: tensorboard_cmd = f"tensorboard --logdir {os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')} --reload_multifile True" tensorboard_proc = subprocess.Popen(tensorboard_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True) # bufsize=0, close_fds=True def init(): global config_dict,transformer_optimizer_list,model_context_window,init_train_file_path,init_val_file_path global INIT_PREFIX1,INIT_COL1_TEXT,INIT_PREFIX2,INIT_COL2_TEXT,INIT_PREFIX3,INIT_COL3_TEXT,INIT_PREFIX4,INIT_COL4_TEXT,col_names,DATASET_FIRST_ROW global local_model_list,local_model_root_dir global base_model_names,base_model_context_window,embedding_model_names,training_base_model_names global local_dataset_list, local_dataset_root_dir start_tensorboard_server() model_zoo_config = read_yaml(os.path.join(os.path.dirname(os.path.abspath(__file__)),"config","model_zoo.yaml")) transformer_optimizer_list = list(vars(OptimizerNames)["_value2member_map_"].keys()) #get dynamic context window from selected model model_context_window = [2048,1024,512] init_train_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets", INIT_DATASET_NAME) DATASET_FIRST_ROW,split_list = get_first_row_from_dataset(init_train_file_path) col_names = list(DATASET_FIRST_ROW) col_names.insert(0,"") INIT_PREFIX1 = "<s>[INST] " INIT_PREFIX2 = "here are the inputs " INIT_PREFIX3 = " [/INST]" INIT_PREFIX4 = "</s>" INIT_COL1_TEXT = str(DATASET_FIRST_ROW[col_names[1]]) INIT_COL2_TEXT = str(DATASET_FIRST_ROW[col_names[2]]) INIT_COL3_TEXT = str(DATASET_FIRST_ROW[col_names[3]]) INIT_COL4_TEXT = "" local_model_list,local_model_root_dir = get_local_model_list() base_model_names = [model_name for model_name in model_zoo_config["model_list"]] training_base_model_names = [model_name for model_name in base_model_names if not model_name.endswith(".gguf")] # base_model_context_window = [model_name[1] for model_name in model_zoo_config["model_list"]] embedding_model_names = [model_name for model_name in model_zoo_config["embedding_model_list"]] local_dataset_list, local_dataset_root_dir = get_local_dataset_list() with gr.Blocks(title="FINETUNE",css="#vertical_center_align_markdown { position:absolute; top:30%;background-color:white;} .white_background {background-color: #ffffff} .none_border {border: none;border-collapse:collapse;}") as demo: init() local_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_dataset_root_dir_textbox = gr.Textbox(label="",value=local_dataset_root_dir, visible=False) local_embedding_model_root_dir_textbox = gr.Textbox(label="", value=os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models"), visible=False) local_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_home_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) session_state = gr.State(value={}) # html = gr.HTML("<p align='center';>llm-web-ui</p>",elem_id="header") with gr.Tab("Home"): with gr.Row(): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;Chat Model", elem_classes="white_background") local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir, runs_model_root_dir) home_chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_home_chat_model_dir})"] home_chat_model_source_radio = gr.Radio(home_chat_model_source_radio_choices, label="Chat Model source", show_label=False, value=home_chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_home_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model", show_label=False, allow_custom_value=True, value=base_model_names[ 0] if base_model_names else None, interactive=True, scale=4, min_width=1) local_home_chat_model_names_dropdown = gr.Dropdown(local_home_chat_model_names, label=f"Chat Model", show_label=False, value=local_home_chat_model_names[ 0] if local_home_chat_model_names else None, interactive=True, scale=4, min_width=1, visible=False) download_hub_home_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_home_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) refresh_local_home_chat_model_names_btn = gr.Button("Refresh", scale=1, visible=False) load_home_chat_model_btn = gr.Button("Load Model", scale=1, visible=True) using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>') else: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') # home_chat_model_running_status_markdown = gr.Markdown( # '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=3 ) generate_btn = gr.Button("Generate", scale=1) stop_btn = gr.Button("Stop", scale=1) # clear_btn = gr.Button("Clear",scale=1) with gr.Tab("Fine-Tuning"): with gr.Tabs() as tensorboard_tab: with gr.TabItem("Training", id=0): with gr.Row(): with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;1.Training", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;1).Model", elem_classes="white_background") with gr.Group(): # gr.Markdown("<br> &nbsp;&nbsp;&nbsp; Base Model") base_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_model_root_dir})"] base_model_source_radio = gr.Radio(base_model_source_radio_choices, label="Base Model", value=base_model_source_radio_choices[0], interactive=True) with gr.Row(elem_classes="white_background"): base_model_name_dropdown = gr.Dropdown(training_base_model_names, label="Model Name", value=training_base_model_names[0] if training_base_model_names else None, interactive=True, visible=True, scale=5, allow_custom_value=True) download_local_model_btn = gr.Button("Download", scale=1, visible=True) stop_download_local_model_btn = gr.Button("Stop", scale=1, visible=False) # model_download_status = gr.Markdown("<div id='vertical_center_align_markdown'><p style='text-align: center;'>Not downloaded</p></div>", elem_classes="white_background",scale=1,full_width=True,visible=False) if validate_model_path(training_base_model_names[0])[0]: download_model_status_markdown = gr.Markdown('<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_model_status_markdown = gr.Markdown('<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): # local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") # runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_model_list = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) local_model_list = get_hg_model_names_from_dir(os.path.dirname(os.path.abspath(__file__)), "models") local_model_dropdown = gr.Dropdown(local_model_list, label="Local Model", info="", value=local_model_list[0] if len(local_model_list) > 0 else None, interactive=True, elem_classes="white_background", scale=5, visible=False) refresh_local_model_list_btn = gr.Button("Refresh", scale=1, visible=False) fine_tuning_type_dropdown = gr.Dropdown(["QLoRA", "LoRA"], label="Fine-Tuning Type", info="", value="QLoRA", interactive=True) with gr.Group(): with gr.Row(elem_classes="white_background"): # gr.Markdown("### &nbsp;&nbsp;&nbsp; LoRA Config", elem_classes="white_background") lora_r_list = [str(ri) for ri in range(8, 65, 8)] lora_r_slider = gr.Slider(8, 64, value=8, step=8, label="lora_r", interactive=True) # lora_r_dropdown = gr.Dropdown(lora_r_list,label="lora_r", value=lora_r_list[0],interactive=True,allow_custom_value=True) lora_alpha_slider = gr.Slider(8, 96, value=32, step=8, label="lora_alpha", interactive=True) # lora_alpha_list = [str(ri) for ri in range(8, 97, 8)] # lora_alpha_dropdown = gr.Dropdown(lora_alpha_list,label="lora_alpha", value=lora_alpha_list[3],interactive=True,allow_custom_value=True) with gr.Row(elem_classes="white_background"): lora_dropout_slider = gr.Slider(0, 1, value=0.05, step=0.01, label="lora_dropout", interactive=True) lora_bias_dropdown = gr.Dropdown(["none", "all", "lora_only"], label="lora_bias", info="", value="none", interactive=True) with gr.Group(): gr.Markdown("### &nbsp;2).Dataset",elem_classes="white_background") dataset_source_radio_choices = ["Download From Huggingface Hub", f"From Local HG Dataset In {local_dataset_root_dir})"] dataset_source_radio = gr.Radio(dataset_source_radio_choices, label="Dataset Source", value=dataset_source_radio_choices[1], interactive=True) with gr.Row(equal_height=True): hg_dataset_path_textbox = gr.Textbox(label="Dataset Name:",elem_classes="none_border",visible=False, interactive=True, scale=4, value="iamtarun/python_code_instructions_18k_alpaca") download_local_dataset_btn = gr.Button("Download", scale=1, visible=False) stop_download_local_dataset_btn = gr.Button("Stop", scale=1, visible=False) download_dataset_status_markdown = gr.Markdown('') with gr.Row(): hg_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1,value="train") hg_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1) with gr.Row(): local_dataset_list.pop( local_dataset_list.index(INIT_DATASET_NAME)) local_dataset_list.insert(0, INIT_DATASET_NAME) local_train_path_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Train Dataset", info="", value=local_dataset_list[0] if len(local_dataset_list)>0 else None, interactive=True, elem_classes="white_background", scale=5, visible=True) refresh_local_train_path_dataset_list_btn = gr.Button("Refresh", scale=1, visible=True) with gr.Row(): local_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=True, elem_classes="white_background", scale=1,value="train",visible=True) local_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=True, elem_classes="white_background", scale=1,visible=True) with gr.Group(elem_classes="white_background"): # gr.Markdown("<h4><br> &nbsp;&nbsp;Prompt Template: (Prefix1 + ColumnName1 + Prefix2 + ColumnName2)</h4>",elem_classes="white_background") gr.Markdown("<br> &nbsp;&nbsp;&nbsp;&nbsp;**Prompt Template: (Prefix1+ColumnName1+Prefix2+ColumnName2+Prefix3+ColumnName3+Prefix4+ColumnName4)**",elem_classes="white_background") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>",elem_classes="white_background") # using_llama2_chat_template_checkbox = gr.Checkbox(True, label="Using Llama2/Mistral chat template",interactive=True,visible=False) with gr.Row(elem_classes="white_background"): # prompt_template prefix1_textbox = gr.Textbox(label="Prefix1:",value=INIT_PREFIX1,lines=2,interactive=True,elem_classes="white_background") datatset_col1_dropdown = gr.Dropdown(col_names, label="ColumnName1:", info="",value=col_names[1],interactive=True,elem_classes="white_background") prefix2_textbox = gr.Textbox(label="Prefix2:",value=INIT_PREFIX2,lines=2,interactive=True,elem_classes="white_background") datatset_col2_dropdown = gr.Dropdown(col_names, label="ColumnName2:", info="",value=col_names[2],interactive=True,elem_classes="white_background") with gr.Row(elem_classes="white_background"): prefix3_textbox = gr.Textbox(label="Prefix3:",value=INIT_PREFIX3,lines=2,interactive=True,elem_classes="white_background") datatset_col3_dropdown = gr.Dropdown(col_names, label="ColumnName3:", info="",value=col_names[3],interactive=True,elem_classes="white_background") prefix4_textbox = gr.Textbox(label="Prefix4:",value=INIT_PREFIX4,lines=2,interactive=True,elem_classes="white_background") datatset_col4_dropdown = gr.Dropdown(col_names, label="ColumnName4:", info="",value=col_names[0],interactive=True,elem_classes="white_background") # print("") prompt_sample = INIT_PREFIX1 + INIT_COL1_TEXT + INIT_PREFIX2 + INIT_COL2_TEXT + INIT_PREFIX3 + INIT_COL3_TEXT + INIT_PREFIX4 + INIT_COL4_TEXT prompt_sample_textbox = gr.Textbox(label="Prompt Sample:",interactive=False,value=prompt_sample,lines=4) max_length_dropdown = gr.Dropdown(["Model Max Length"]+model_context_window, label="Max Length",value="Model Max Length", interactive=True,allow_custom_value=True) with gr.Group(): gr.Markdown("### &nbsp;3).Training Arguments",elem_classes="white_background") with gr.Row(elem_classes="white_background"): epochs_slider = gr.Slider(1, 100, value=10, step=1, label="Epochs", interactive=True) # epochs_dropdown = gr.Dropdown([1]+[bi for bi in range(10,101,10)], label="Epochs",value=1, interactive=True,allow_custom_value=True) batch_size_list = [1,2,3]+[bi for bi in range(4,32+1,4)] batch_size_slider = gr.Slider(1, 100, value=1, step=1, label="Batch Size", interactive=True) # batch_size_dropdown = gr.Dropdown(batch_size_list,label="Batch Size", info="",value=batch_size_list[0],interactive=True,allow_custom_value=True) # learning_rate_textbox = gr.Textbox(label="Learning Rate", value=2e-4,interactive=True) with gr.Row(elem_classes="white_background"): learning_rate_slider = gr.Slider(0, 0.01, value=2e-4, step=0.0001, label="Learning Rate", interactive=True) warmup_steps_slider = gr.Slider(0, 400, value=100, step=10, label="Warmup Steps", interactive=True) with gr.Row(elem_classes="white_background"): optimizer_dropdown = gr.Dropdown(transformer_optimizer_list, label="Optimizer", info="", value=transformer_optimizer_list[1], interactive=True) lr_scheduler_list = ["linear","cosine","cosine_with_hard_restarts","polynomial_decay","constant","constant_with_warmup","inverse_sqrt","reduce_on_plateau"] lr_scheduler_type_dropdown = gr.Dropdown(lr_scheduler_list, label="LR Scheduler Type", info="", value=lr_scheduler_list[0], interactive=True) with gr.Row(elem_classes="white_background"): early_stopping_patience_slider = gr.Slider(0, 50+1, value=0, step=5, label="Early Stopping Patience", interactive=True) gradient_accumulation_steps_slider = gr.Slider(1, 50, value=1, step=1, label="Gradient Accumulation Steps") with gr.Row(elem_classes="white_background"): eval_steps_slider = gr.Slider(0, 1000, value=100, step=100, label="eval_steps", interactive=True) gradient_checkpointing_checkbox = gr.Checkbox(True,label="Gradient Checkpointing",interactive=True) train_btn = gr.Button("Start Training") with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;2.Test",elem_classes="white_background") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file:os.path.getmtime(os.path.join(training_runs_dir,file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir,run_name) run_output_model = os.path.join(run_name_dir,"output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_")>=0: runs_output_model.append(os.path.join(run_name,"output_model",run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[0] if runs_output_model else None, interactive=True) gr.Markdown("") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>", elem_classes="white_background") with gr.Row(): test_input_textbox = gr.Textbox(label="Input:", interactive=True, value="", lines=4, scale=4) generate_text_btn = gr.Button("Generate",scale=1) finetune_test_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) # test_prompt = gr.Textbox(label="Prompt:", interactive=False, lines=2, scale=1) test_output = gr.Textbox(label="Output:", interactive=False,lines=4, scale=1) # def change_test_input_textbox(test_prefix1_textbox,test_input_textbox,test_prefix2_textbox): # return gr.update(value=test_prefix1_textbox+test_input_textbox+test_prefix2_textbox) # test_input_textbox.change(change_test_input_textbox,[test_prefix1_textbox,test_input_textbox,test_prefix2_textbox],test_prompt) with gr.Group(): gr.Markdown("## &nbsp;3.Quantization",elem_classes="white_background") with gr.Row(): quantization_type_list = ["gguf"] quantization_type_dropdown = gr.Dropdown(quantization_type_list, label="Quantization Type",value=quantization_type_list[0], interactive=True,scale=3) local_quantization_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Dataset for quantization", value=local_dataset_list[0] if len( local_dataset_list) > 0 else None, interactive=True, elem_classes="white_background", scale=7, visible=False) refresh_local_quantization_dataset_btn = gr.Button("Refresh", scale=2, visible=False) def click_refresh_local_quantization_dataset_btn(): local_dataset_list, _ = get_local_dataset_list() return gr.update(choices=local_dataset_list, value=local_dataset_list[0] if len(local_dataset_list) > 0 else "") refresh_local_quantization_dataset_btn.click(click_refresh_local_quantization_dataset_btn,[],local_quantization_dataset_dropdown) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_") >= 0: runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] quantization_runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[ 0] if runs_output_model else None, interactive=True, scale=6) quantize_btn = gr.Button("Quantize", scale=1,visible=False) if runs_output_model: model_name = runs_output_model[0].split(os.sep)[-2].split('_')[-1] quantized_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.sep.join(runs_output_model[0].split(os.sep)[0:-1]), "quantized_" + quantization_type_list[0] + "_" + model_name) if not os.path.exists(quantized_model_dir): os.makedirs(quantized_model_dir) quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''&nbsp;&nbsp;&nbsp;&nbsp;1.Follow the instructions in the llama.cpp to generate a GGUF:[https://github.com/ggerganov/llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp#prepare-data--run),<span style="color:red">&nbsp;&nbsp;Q4_K_M is recommend</span>''',visible=True) if runs_output_model: gguf_quantization_markdown2 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;2.Convert {runs_output_model[0]} to gguf model",visible=True) else: gguf_quantization_markdown2 = gr.Markdown( f"", visible=True) gguf_quantization_markdown3 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;3.Deploy gguf model", visible=False) else: quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''''',visible=True) gguf_quantization_markdown2 = gr.Markdown(f"",visible=True) gguf_quantization_markdown3 = gr.Markdown(f"", visible=True) with gr.Group(visible=False): gr.Markdown("## &nbsp;4.Deploy",elem_classes="white_background") with gr.Row(): deployment_framework_dropdown = gr.Dropdown(["TGI","llama-cpp-python"], label="Deployment Framework",value="TGI", interactive=True) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) # ori_model_runs_output_model = [] tgi_model_format_runs_output_model = [] gguf_model_format_runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: model_bin_path = os.path.exists( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "ori", "pytorch_model.bin")) if run_output_model_name.find("merged_") >= 0 and model_bin_path: tgi_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) gptq_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs',run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1], "pytorch_model.bin") if os.path.exists(gptq_model_path): tgi_model_format_runs_output_model.append(os.path.join(run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1])) gguf_model_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1]) if os.path.exists(gguf_model_dir): gguf_model_names = os.listdir(gguf_model_dir) for gguf_model_name in gguf_model_names: if gguf_model_name.split('.')[-1] == "gguf": gguf_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1], gguf_model_name)) tgi_model_format_runs_output_model = tgi_model_format_runs_output_model[::-1] gguf_model_format_runs_output_model = gguf_model_format_runs_output_model[::-1] deployment_runs_output_model_dropdown = gr.Dropdown(tgi_model_format_runs_output_model, label="runs_output_model", value=tgi_model_format_runs_output_model[ 0] if tgi_model_format_runs_output_model else None, interactive=True,scale=6) refresh_deployment_runs_output_model_btn = gr.Button("Refresh", scale=1, visible=True) if tgi_model_format_runs_output_model: model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.path.dirname(tgi_model_format_runs_output_model[0])) model_name = os.path.basename(tgi_model_format_runs_output_model[0]) if model_name.rfind("quantized_gptq_") >= 0: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name} --quantize gptq''' else: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name}''' run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value=run_server_value) run_client_value = '''Command-Line Interface(CLI):\ncurl 127.0.0.1:8080/generate -X POST -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' -H 'Content-Type: application/json'\n\nPython:\nfrom huggingface_hub import InferenceClient \nclient = InferenceClient(model="http://127.0.0.1:8080")\noutput = client.text_generation(prompt="What is Deep Learning?",max_new_tokens=512) ''' run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6,scale=1,value=run_client_value) else: run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value="") run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6, scale=1, value="") # deploy_llm_code = gr.Code(code_str, language="shell", lines=5, label="Install Requirements:") install_requirements_value = ''' ### &nbsp;&nbsp; 1.install docker ### &nbsp;&nbsp; 2.Install NVIDIA Container Toolkit <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.1 Configure the repository: </h4> <p> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list \ && \ sudo apt-get update </p> <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.2 Install the NVIDIA Container Toolkit packages: </h4> <p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; sudo apt-get install -y nvidia-container-toolkit </p> ''' with gr.Accordion("Install Requirements",open=False) as install_requirements_accordion: install_requirements_markdown = gr.Markdown(install_requirements_value) run_llama_cpp_python_code = gr.Code("", language="python", lines=10, label="run_model_using_llama_cpp_python.py",visible=False) # run_script_textbox = gr.Textbox(label="Install Requirements:", interactive=False, scale=1,value=install_requirements_value) #dependencies with gr.TabItem("Tensorboard", id=1) as fdddd: # training_log_markdown = gr.Markdown('',every=mytestfun) with gr.Row(): # training_log_textbox = gr.Textbox(label="logging:",value="", interactive=True, lines=2, scale=1) with gr.Group(): training_log_markdown = gr.Markdown('') stop_training_btn = gr.Button("Stop Training") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names = [run_name for run_name in run_names if os.path.isdir(os.path.join(training_runs_dir,run_name))] run_names.sort(key=lambda f: os.path.getmtime(os.path.join(training_runs_dir, f))) # print("dddddddd:",run_names) with gr.Group(): # with gr.Row(): training_runs_dropdown = gr.Dropdown(run_names, label="Training Runs",value=run_names[0] if run_names else None, interactive=True, scale=1) delete_text_btn = gr.Button("Delete Run", scale=1) iframe = f'<iframe src={TENSORBOARD_URL} style="border:none;height:1024px;width:100%">' tensorboard_html = gr.HTML(iframe) with gr.Tab("RAG"): with gr.Row(): with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf","*.txt","*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file),reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) # chat_data_source_radio_choices = ["Chat With Document", # f"Chat With Image"] gr.Markdown("### &nbsp;Chat With Document", elem_classes="white_background") # chat_data_source_radio = gr.Radio(chat_data_source_radio_choices, # label="", # value=chat_data_source_radio_choices[0], # interactive=True) with gr.Row(): rag_data_list_dropdown = gr.Dropdown(matched_file_name_list, label=f"Local Documents In {rag_data_dir}", value=matched_file_name_list[0] if matched_file_name_list else None, interactive=True,scale=4, min_width=1) refresh_rag_data_list_btn = gr.Button("Refresh", scale=1, min_width=1) # if not current_running_model_name: # model_running_status_markdown = gr.Markdown(f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;No modelis running!</span>") # else: # model_running_status_markdown = gr.Markdown(f"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Model is runing:{current_running_model_name}.</span>") def click_refresh_rag_data_list_btn(): rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf", "*.txt", "*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file), reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) return gr.update(choices=matched_file_name_list,value=matched_file_name_list[0] if matched_file_name_list else None) refresh_rag_data_list_btn.click(click_refresh_rag_data_list_btn,[],rag_data_list_dropdown) # def update_model_running_status(): # return gr.update(value=f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{current_running_model_name} is runing!.</span>") # # load_model_btn.click(click_load_model_btn,model_list_dropdown,[model_list_dropdown]).success(update_model_running_status,[],model_running_status_markdown) with gr.Row(): rag_chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): rag_input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=6) rag_generate_btn = gr.Button("Generate", scale=1) rag_stop_btn = gr.Button("Stop", scale=1) # rag_clear_btn = gr.Button("Clear", scale=1) rag_model_running_status_markdown = gr.Markdown( f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) # retrieved_document_chunks_markdown = gr.Markdown( # f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) retrieved_document_chunks_dataframe = gr.Dataframe( headers=["ID", "Chunk"], datatype=["str", "str"], show_label=False, value=None ) with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Group(): gr.Markdown("### &nbsp;&nbsp;1.Chunking", elem_classes="white_background") with gr.Row(): text_splitter_dropdown = gr.Dropdown(["RecursiveCharacterTextSplitter"], label=f"Text Splitter", value="RecursiveCharacterTextSplitter", interactive=True, scale=1, min_width=1) with gr.Row(): chunk_size_slider = gr.Slider(32, 1024, value=256, step=32, label="Chunk Size", interactive=True, scale=1) chunk_overlap_slider = gr.Slider(0, 500, value=20, step=10, label="Chunk Overlap", interactive=True) Separators_textbox = gr.Textbox(label="Separators", value='''["\n\n", "\n", ".", " ", ""]''', interactive=True,visible=False) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;2.Vector Store Retriever", elem_classes="white_background") # local_embedding_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"rag","embedding_models") local_embedding_model_names = get_hg_model_names_from_dir(local_embedding_model_dir,"embedding_models") embedding_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_embedding_model_dir})"] embedding_model_source_radio = gr.Radio(embedding_model_source_radio_choices, label="Embedding Model Source", value=embedding_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_embedding_model_names_dropdown = gr.Dropdown(embedding_model_names, label=f"",show_label=False, value=embedding_model_names[0] if embedding_model_names else None, interactive=True, scale=4, min_width=1) download_hub_embedding_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_embedding_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_embedding_model_names_dropdown = gr.Dropdown(local_embedding_model_names, label=f"Embedding Model",show_label=False, value=local_embedding_model_names[0] if local_embedding_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_embedding_model_names_btn = gr.Button("Refresh", scale=1,visible=False) # model_config_path1 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "pytorch_model.bin") # model_config_path2 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "model.safetensors") model_config_path = os.path.join(local_embedding_model_dir, embedding_model_names[0], "config.json") if os.path.exists(model_config_path): download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): search_top_k_slider = gr.Slider(1, 10, value=3, step=1, label="Search Top K", interactive=True) search_score_threshold_slider = gr.Slider(0, 1, value=0.5, step=0.1, label="Search Score Threshold",interactive=True) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;3.Chat Model", elem_classes="white_background") local_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_chat_model_names = get_hg_model_names_from_dir(local_chat_model_dir) local_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_chat_model_dir,runs_model_root_dir) chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_chat_model_dir})"] chat_model_source_radio = gr.Radio(chat_model_source_radio_choices, label="Chat Model source",show_label=False, value=chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model",show_label=False,allow_custom_value=True, value=base_model_names[0] if base_model_names else None, interactive=True, scale=4, min_width=1) download_hub_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_chat_model_names_dropdown = gr.Dropdown(local_chat_model_names, label=f"Chat Model",show_label=False, value=local_chat_model_names[0] if local_chat_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_chat_model_names_btn = gr.Button("Refresh", scale=1,visible=False) rag_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Tab("Setting"): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Row(): max_new_tokens_slider = gr.Slider(1, 4096, value=256, step=0.1, label="Max New Tokens", interactive=True) temperature_slider = gr.Slider(0, 5, value=1, step=0.1, label="Temperature", interactive=True) with gr.Row(): top_k_slider = gr.Slider(1, 100, value=50, step=1, label="Top_k", interactive=True) top_p_slider = gr.Slider(0, 1, value=1, step=0.1, label="Top_p", interactive=True) with gr.Row(): repeat_penalty_slider = gr.Slider(1, 5, value=1, step=0.1, label="Repeat Penalty", interactive=True) with gr.Row(): chat_history_window_slider = gr.Slider(1, 20, value=3, step=1, label="Chat History Window", interactive=True) low_cpu_mem_usage_checkbox = gr.Checkbox(False, label="Low Cpu Mem Usage",interactive=True,visible=False) Huggingface_hub_token = gr.Textbox(label="Huggingface Hub Token", value="") def check_local_model_or_dataset_is_empty1(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty2(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty3(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty4(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty5(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def download_hub_home_chat_model_postprocess(): return gr.update(visible=True), gr.update(visible=False) def click_download_hub_home_chat_model_btn(): return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def change_home_chat_model_source_radio(home_chat_model_source_radio, hub_home_chat_model_names_dropdown): local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") if home_chat_model_source_radio == "Download From Huggingface Hub": if not hub_home_chat_model_names_dropdown: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>' else: if validate_model_path(hub_home_chat_model_names_dropdown)[0]: model_download_status = '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>' else: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>' return gr.update(visible=True), gr.update(visible=False), gr.update( visible=False), gr.update(visible=True, value=model_download_status), gr.update( visible=True), gr.update( visible=False) else: model_download_status = "" return gr.update(visible=False), gr.update(visible=True), gr.update( visible=True), gr.update(visible=False, value=model_download_status), gr.update( visible=False), gr.update( visible=False) click_download_hub_home_chat_model_names_btn_event = download_hub_home_chat_model_names_btn.click( check_local_model_or_dataset_is_empty1, [hub_home_chat_model_names_dropdown,Huggingface_hub_token]).success( click_download_hub_home_chat_model_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown]).then( download_model_wrapper, [hub_home_chat_model_names_dropdown, local_home_chat_model_root_dir_textbox], download_hub_home_chat_model_status_markdown). \ then(download_hub_home_chat_model_postprocess, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn]) stop_download_hub_home_chat_model_names_btn.click(click_stop_download_hub_home_chat_model_names_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown], cancels=[ click_download_hub_home_chat_model_names_btn_event]) home_chat_model_source_radio.change(change_home_chat_model_source_radio, [home_chat_model_source_radio, hub_home_chat_model_names_dropdown], [hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, refresh_local_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def change_refresh_local_home_chat_model_names_btn(): local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) return gr.update(choices=local_home_chat_model_names,value = local_home_chat_model_names[0] if local_home_chat_model_names else None) refresh_local_home_chat_model_names_btn.click(change_refresh_local_home_chat_model_names_btn,[],[local_home_chat_model_names_dropdown]) def change_hub_home_chat_model_names_dropdown(hub_home_chat_model_names_dropdown): if not hub_home_chat_model_names_dropdown: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>'), \ gr.update(visible=True), gr.update(visible=False) if validate_model_path(hub_home_chat_model_names_dropdown)[0]: return gr.update( visible=True, value='<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>'), \ gr.update(visible=True), gr.update(visible=False) else: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>'), \ gr.update(visible=True), gr.update(visible=False) hub_home_chat_model_names_dropdown.change(change_hub_home_chat_model_names_dropdown, hub_home_chat_model_names_dropdown, [download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def click_load_home_chat_model_btn(home_chat_model_source_radio, hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, max_new_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repeat_penalty_slider, chat_history_window_slider,using_4bit_quantization_checkbox,low_cpu_mem_usage_checkbox, progress=gr.Progress()): if home_chat_model_source_radio == "Download From Huggingface Hub": cur_model_name = hub_home_chat_model_names_dropdown else: cur_model_name = local_home_chat_model_names_dropdown if not validate_model_path(cur_model_name)[0]: raise gr.Error(f"Model does not exist!") global infer_model global stop_generation_status stop_generation_status = True progress(0.6) if infer_model: infer_model.free_memory() infer_model = None torch.cuda.empty_cache() yield "Loading model ..." load_model_status = 0 model_path = validate_model_path(cur_model_name)[1] if model_path.split('.')[-1] == "gguf":
infer_model = LlamaCppInference(model_path=model_path, max_new_tokens=max_new_tokens_slider,
2
2023-11-25 12:37:21+00:00
16k
danilonumeroso/conar
models/tsp_reasoner.py
[ { "identifier": "vmapped_beam_search_rollout", "path": "baselines/beam_search.py", "snippet": "BEAM_WIDTH = 128\ndef expand_single(beam_vis, beam_last, beam_cost, beam_par, W):\ndef beam_search_rollout_step(W, beam_width, i, tpl):\ndef beam_search_rollout(start_route, W, num_nodes, beam_width):\ndef beam_search_baseline(data, return_ratio=True):" }, { "identifier": "AlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class AlgorithmReasoner(nn.Module):\n @staticmethod\n def prepare_batch(batch):\n batch = batch.clone()\n for name, tensor in batch.items():\n if not torch.is_tensor(tensor):\n continue\n if name.endswith('_temporal') and 'index' not in name:\n tensor = tensor.transpose(1, 0)\n batch[name] = tensor\n return batch\n\n @staticmethod\n def get_masks(train, batch, continue_logits, enforced_mask):\n mask = continue_logits[batch.batch] > 0\n mask_cp = (continue_logits > 0.0).bool()\n mask_edges = mask[batch.edge_index[0]]\n if not train and enforced_mask is not None:\n enforced_mask_ids = enforced_mask[batch.batch]\n mask &= enforced_mask_ids\n mask_cp &= enforced_mask\n return mask_cp, mask, mask_edges\n\n def add_encoder(self, stage, name, loc, data_type, data_sample, bias):\n if name == 'adj': # we use edge indices\n return\n if data_type == Type.SCALAR or data_type == Type.MASK or data_type == Type.MASK_ONE:\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n self.encoders[stage][name] = nn.Linear(in_shape, self.latent_features, bias=bias)\n\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are 1-hot encoded on the edges\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.encoders[stage][name] = nn.ModuleList([\n nn.Linear(1, self.latent_features, bias=bias),\n nn.Linear(1, self.latent_features, bias=bias)\n ])\n\n def add_decoder(self, stage, name, loc, data_type, data_sample, bias):\n assert name != 'adj', 'Adjacency matrix should not be decoded'\n dec = None\n if loc == Location.NODE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.Linear(2*self.latent_features, 1, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.Linear(2*self.latent_features, in_shape, bias=bias)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are decoded from both node and edge information\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if loc == Location.GRAPH:\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n in_shape = data_sample.shape[-1] if data_type == Type.CATEGORICAL else 1\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n\n if loc == Location.EDGE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n if data_type == Type.POINTER:\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n assert dec is not None, breakpoint()\n self.decoders[stage][name] = dec\n\n\n\n\n def __init__(self,\n spec,\n data,\n latent_features,\n algo_processor,\n bias=True,\n use_TF=False,\n use_sinkhorn=True,\n L1_loss=False,\n xavier_on_scalars=True,\n global_termination_pool='max', #'predinet',\n get_attention=False,\n use_batch_norm=False,\n transferring=False,\n timeit=True,\n **kwargs):\n\n super().__init__()\n self.step_idx = 0\n self.latent_features = latent_features\n self.assert_checks = False\n self.timeit = timeit\n self.debug = False\n self.debug_epoch_threshold = 1e9\n self.L1_loss = L1_loss\n self.global_termination_pool = global_termination_pool\n self.next_step_pool = True\n self.processor = algo_processor\n self.triplet_reasoning = False\n if isinstance(self.processor.processors[0].processor, TripletMPNN):\n self.triplet_reasoning = True\n self.triplet_reductor = nn.Linear(2*latent_features, latent_features, bias=bias)\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.get_attention = get_attention\n self.lambda_mul = 1 # 0.0001\n self.transferring = transferring\n self.node_encoder = nn.Sequential(\n nn.Linear(2*latent_features, latent_features, bias=bias),\n )\n self.encoders = nn.ModuleDict({\n 'input': nn.ModuleDict({\n }),\n 'hint': nn.ModuleDict({\n }),\n })\n self.decoders = nn.ModuleDict({\n 'hint': nn.ModuleDict({\n }),\n 'output': nn.ModuleDict({\n })\n })\n for name, (stage, loc, datatype) in spec.items():\n if name == 'adj': # we use edge indices\n continue\n if stage == 'input':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'output':\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'hint':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n\n self.node_pointer_vec = nn.Parameter(torch.randn(latent_features))\n if xavier_on_scalars:\n assert False, \"NEEDS REFACTORING\"\n torch.nn.init.trunc_normal_(self.encoders['input']['edge_attr'].weight, std=1/torch.sqrt(torch.tensor(latent_features)))\n\n if global_termination_pool == 'attention':\n inp_dim = latent_features\n self.global_attn = GlobalAttentionPlusCoef(\n nn.Sequential(\n nn.Linear(inp_dim, latent_features, bias=bias),\n nn.LeakyReLU(),\n nn.Linear(latent_features, 1, bias=bias)\n ),\n nn=None)\n\n if global_termination_pool == 'predinet':\n lf = latent_features\n self.predinet = PrediNet(lf, 1, lf, lf, flatten_pooling=torch_geometric.nn.glob.global_max_pool)\n\n self.termination_network = nn.Sequential(\n nn.BatchNorm1d(latent_features) if use_batch_norm else nn.Identity(),\n nn.Linear(latent_features, 1, bias=bias),\n )\n\n def get_continue_logits(self, batch_ids, latent_nodes, sth_else=None):\n if self.global_termination_pool == 'mean':\n graph_latent = torch_geometric.nn.global_mean_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'max':\n graph_latent = torch_geometric.nn.global_max_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'attention':\n graph_latent, coef = self.global_attn(latent_nodes, batch_ids)\n if self.get_attention:\n self.attentions[self.step_idx] = coef.clone().detach()\n self.per_step_latent[self.step_idx] = sth_else\n\n if self.global_termination_pool == 'predinet':\n assert not torch.isnan(latent_nodes).any()\n graph_latent = self.predinet(latent_nodes, batch_ids)\n\n if self.get_attention:\n self.attentions[self.step_idx] = latent_nodes\n continue_logits = self.termination_network(graph_latent).view(-1)\n return continue_logits\n\n def zero_termination(self):\n self.true_positive = 0\n self.false_positive = 0\n self.false_negative = 0\n self.true_negative = 0\n\n def zero_steps(self):\n self.sum_of_processed_nodes = 0\n self.sum_of_processed_edges = 0\n self.step_idx = 0\n self.sum_of_steps = 0\n self.cnt = 0\n\n @staticmethod\n def convert_logits_to_outputs(spec,\n logits,\n fr,\n to,\n num_nodes,\n batch_ids,\n include_probabilities=True,\n dbg=False):\n outs = defaultdict(dict)\n\n for stage in logits.keys():\n for name in logits[stage].keys():\n if name not in logits[stage] or name not in spec:\n continue\n stage, loc, data_type = spec[name]\n assert stage != Stage.INPUT\n if data_type == Type.SOFT_POINTER:\n assert False, f\"Not yet added, please add {name}\"\n if data_type in [Type.CATEGORICAL]:\n indices = logits[stage][name].argmax(-1)\n outshape = logits[stage][name].shape[-1]\n outs[stage][name] = F.one_hot(indices, num_classes=outshape).float()\n if data_type == Type.MASK_ONE:\n _, amax = torch_scatter.scatter_max(logits[stage][name], batch_ids, dim=0)\n amax = amax.squeeze(-1)\n outs[stage][name] = torch.zeros_like(logits[stage][name])\n outs[stage][name][amax] = 1\n if data_type == Type.MASK:\n outs[stage][name] = (logits[stage][name] > 0).float()\n if data_type == Type.SCALAR:\n outs[stage][name] = logits[stage][name]\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n _, pointers = torch_scatter.scatter_max(pointer_logits, fr, dim_size=num_nodes)\n pointers = to[pointers]\n pointer_probabilities = torch_geometric.utils.softmax(pointer_logits, fr, num_nodes=num_nodes)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n pointers = pointer_logits.argmax(-1)\n pointer_probabilities = F.softmax(pointer_logits, dim=-1)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n return outs\n\n def set_initial_states(self, batch, init_last_latent=None):\n self.processor.zero_lstm(batch.num_nodes) # NO-OP if processor(s) don't use LSTM\n self.last_latent = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n if init_last_latent is not None:\n self.last_latent = init_last_latent\n self.last_latent_edges = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n self.last_continue_logits = torch.ones(batch.num_graphs, device=batch.edge_index.device)\n self.last_logits = defaultdict(dict)\n\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n if name not in self.decoders[stage]:\n continue\n if stage == Stage.OUTPUT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n if data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name).unsqueeze(-1)\n if data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name).int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.last_logits[stage][name] = torch.full((batch.edge_index.shape[1], int(ptrs.max().item())+1), -1e9).to(batch.edge_index.device)\n self.last_logits[stage][name][torch.arange(ptrs.shape[0]), ptrs] = 1e9\n else:\n assert False, breakpoint()\n\n if stage == Stage.HINT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0].unsqueeze(-1)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n else:\n assert False, breakpoint()\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0, :].unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name)[0, :].int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.max_nodes_in_graph = int(ptrs.max().item())+1 # FIXME try another way to infer\n self.last_logits[stage][name] = torch.where(edge_one_hot_encode_pointers_edge(ptrs, batch, self.max_nodes_in_graph).bool(), 1e9, -1e9).to(batch.edge_index.device)\n else:\n assert False, breakpoint()\n\n self.all_hint_logits = []\n self.all_masks_graph = []\n\n def update_per_mask(self, before, after, mask=None):\n # NOTE: this does expansion of the mask, if you do\n # NOT use expansion, use torch.where\n if mask is None:\n mask = self.mask\n mask = mask.unsqueeze(-1).expand_as(before)\n return torch.where(mask, after, before)\n\n def update_state_dict(self, before, after):\n new_before = defaultdict(dict)\n for stage in after.keys():\n for name in after[stage].keys():\n _, loc, data_type = self.dataset_spec[name]\n if loc == Location.GRAPH:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_cp)\n if loc == Location.EDGE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL, Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_edges)\n else:\n assert False, \"Please implement\"\n if loc == Location.NODE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name])\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = torch.where(self.mask_edges, after[stage][name], before[stage][name])\n else:\n assert False, breakpoint()\n return new_before\n\n def update_states(self, batch, current_latent, edges_current_latent,\n logits, continue_logits):\n self.last_continue_logits = torch.where(self.mask_cp, continue_logits,\n self.last_continue_logits)\n self.last_latent = self.update_per_mask(self.last_latent, current_latent)\n self.last_latent_edges = self.update_per_mask(self.last_latent_edges, edges_current_latent, mask=self.mask_edges)\n self.last_logits = self.update_state_dict(self.last_logits, logits)\n self.all_hint_logits.append(self.last_logits['hint'])\n self.all_masks_graph.append(self.mask_cp)\n preds = type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)\n self.last_hint = preds['hint']\n self.last_output = preds['output']\n\n def prepare_initial_masks(self, batch):\n self.mask = torch.ones_like(batch.batch, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_cp = torch.ones(batch.num_graphs, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_edges = torch.ones_like(batch.edge_index[0], dtype=torch.bool, device=batch.edge_index.device)\n\n def loop_condition(self, termination, STEPS_SIZE):\n return (((not self.training and termination.any()) or\n (self.training and termination.any())) and\n self.step_idx+1 < STEPS_SIZE)\n\n def loop_body(self,\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=1000):\n\n current_latent, edges_current_latent, preds, continue_logits =\\\n self.forward(\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n first_n_processors=first_n_processors,\n )\n termination = continue_logits\n\n self.debug_batch = batch\n self.debug_hint_out_curr = hint_out_curr\n if self.timeit:\n st = time.time()\n self.update_states(batch, current_latent, edges_current_latent, preds, termination)\n if self.timeit:\n print(f'updating states: {time.time()-st}')\n\n def get_step_input(self, x_curr, batch):\n if self.training and self.use_TF or self.hardcode_outputs:\n return x_curr\n return type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)['hint']\n\n def encode_inputs(self, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.INPUT:\n continue\n if name not in self.encoders[stage]:\n continue\n data = getattr(batch, name)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n assert False, breakpoint() # we don't have it for now (B-F/MST), will figure out later\n if data_type != Type.CATEGORICAL:\n data = data.unsqueeze(-1)\n if loc == Location.EDGE:\n edge_fts += self.encoders[stage][name](data)\n if loc == Location.NODE:\n node_fts += self.encoders[stage][name](data)\n return node_fts, edge_fts\n\n def encode_hints(self, hints, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n graph_fts = torch.zeros(batch.num_graphs, self.latent_features, device=batch.edge_index.device)\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n if name not in self.encoders[stage]:\n continue\n hint = hints[name]\n if loc == Location.NODE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n node_fts = node_fts + self.encoders['hint'][name](hint)\n if loc == Location.EDGE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n edge_fts = edge_fts + self.encoders['hint'][name](hint)\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(hint, batch.edge_index)\n edge_fts = edge_fts + self.encoders['hint'][name](pred_gt_one_hot.unsqueeze(-1))\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers_edge(hint, batch, self.max_nodes_in_graph)\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n encoding = self.encoders['hint'][name][0](pred_gt_one_hot.unsqueeze(-1))\n encoding_2 = self.encoders['hint'][name][1](pred_gt_one_hot.unsqueeze(-1))\n encoding_sparse = SparseTensor(row=batch.edge_index[0], col=batch.edge_index[1], value=encoding)\n res_1 = encoding_sparse.mean(1)[batch.edge_index[0], batch.edge_index[1]-starts_edge]\n res_2 = encoding_2.mean(1)\n edge_fts += res_1 + res_2 # INPLACE\n if loc == Location.GRAPH and data_type in [Type.CATEGORICAL, Type.SCALAR, Type.MASK]:\n graph_fts = graph_fts + self.encoders['hint'][name](hint)\n return node_fts, edge_fts, graph_fts\n\n def get_input_output_hints(self, batch):\n hint_inp_curr = {}\n hint_out_curr = {}\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n hint_inp_curr[name] = getattr(batch, name)[self.step_idx]\n hint_out_curr[name] = getattr(batch, name)[self.step_idx+1]\n if 'mask' in data_type or data_type == Type.SCALAR:\n hint_inp_curr[name] = hint_inp_curr[name].unsqueeze(-1)\n hint_out_curr[name] = hint_out_curr[name].unsqueeze(-1)\n return hint_inp_curr, hint_out_curr\n\n def process(\n self,\n batch,\n EPSILON=0,\n enforced_mask=None,\n hardcode_outputs=False,\n debug=False,\n first_n_processors=1000,\n init_last_latent=None,\n **kwargs):\n\n SIZE, STEPS_SIZE = prepare_constants(batch)\n self.hardcode_outputs = hardcode_outputs\n\n # Pytorch Geometric batches along the node dimension, but we execute\n # along the temporal (step) dimension, hence we need to transpose\n # a few tensors. Done by `prepare_batch`.\n if self.assert_checks:\n check_edge_index_sorted(batch.edge_index)\n if self.epoch > self.debug_epoch_threshold:\n breakpoint()\n self.zero_steps()\n batch = type(self).prepare_batch(batch)\n # When we want to calculate last step metrics/accuracies\n # we need to take into account again different termination per graph\n # hence we save last step tensors (e.g. outputs) into their\n # corresponding tensor. The function below prepares these tensors\n # (all set to zeros, except masking for computation, which are ones)\n self.set_initial_states(batch, init_last_latent=init_last_latent)\n # Prepare masking tensors (each graph does at least 1 iteration of the algo)\n self.prepare_initial_masks(batch)\n # A flag if we had a wrong graph in the batch. Used for visualisation\n # of what went wrong\n self.wrong_flag = False\n assert self.mask_cp.all(), self.mask_cp\n if self.timeit:\n st = time.time()\n node_fts_inp, edge_fts_inp = self.encode_inputs(batch)\n if self.timeit:\n print(f'encoding inputs: {time.time()-st}')\n\n while True:\n hint_inp_curr, hint_out_curr = self.get_input_output_hints(batch)\n if not self.training:\n assert (self.last_continue_logits > 0).any() or True\n\n # Some algorithms output fewer values than they take\n # so if we reuse our last step outputs, they need to be fed back in.\n if self.timeit:\n st = time.time()\n hint_inp_curr = self.get_step_input(hint_inp_curr, batch)\n if self.timeit:\n print(f'getting step input : {time.time()-st}')\n st = time.time()\n node_fts_hint, edge_fts_hint, graph_fts = self.encode_hints(hint_inp_curr, batch)\n node_fts = node_fts_inp + node_fts_hint\n edge_fts = edge_fts_inp + edge_fts_hint\n if self.timeit:\n print(f'encoding hints: {time.time()-st}')\n\n true_termination = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n\n # Does one iteration of the algo and accumulates statistics\n self.loop_body(batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=first_n_processors)\n # And calculate what graphs would execute on the next step.\n self.mask_cp, self.mask, self.mask_edges = type(self).get_masks(self.training, batch, true_termination if self.training else self.last_continue_logits, enforced_mask)\n if not self.loop_condition(\n self.mask_cp,\n STEPS_SIZE):\n break\n assert self.mask_cp.any()\n self.step_idx += 1\n\n return self.all_hint_logits, self.last_logits, self.all_masks_graph\n\n def decode(self, batch, encoded_nodes, hidden, edge_fts, graph_fts):\n catted = torch.cat((encoded_nodes, hidden), dim=1)\n outs = defaultdict(dict)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n\n if loc == Location.NODE:\n\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name](catted)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n prod = self.decoders[stage][name][3](to.max(fr+edge)).squeeze(-1)\n if data_type in [Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION] and self.use_sinkhorn:\n prod = torch.maximum(prod, self.decoders[stage][name][3](fr.max(to+edge)).squeeze(-1))\n prod = sinkhorn_normalize(batch, prod, temperature=0.1, steps=10 if self.training else 60, add_noise=self.training)\n outs[stage][name] = prod\n\n if loc == Location.GRAPH:\n aggr_node_fts = torch_scatter.scatter_max(catted, batch.batch, dim=0)[0]\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name][0](aggr_node_fts) + self.decoders[stage][name][1](graph_fts)\n else:\n assert False\n\n if loc == Location.EDGE:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n if data_type in (Type.CATEGORICAL, Type.MASK, Type.SCALAR):\n outs[stage][name] = fr + to + edge\n elif data_type == Type.POINTER:\n pred = fr + to + edge\n pred_2 = self.decoders[stage][name][3](catted)\n ebatch = batch.edge_index_batch\n st = batch.ptr[ebatch]\n en = batch.ptr[ebatch+1]\n dense_pred_2, mask_pred_2 = tg_utils.to_dense_batch(pred_2, batch=batch.batch)\n edge_pred_2 = dense_pred_2[ebatch]\n mask_edge_pred_2 = mask_pred_2[ebatch]\n probs_logits = self.decoders[stage][name][4](torch.maximum(pred[:, None, :], edge_pred_2)).squeeze(-1)\n probs_logits[~mask_edge_pred_2] = -1e9\n outs[stage][name] = probs_logits\n else:\n assert False\n\n return outs\n\n def encode_nodes(self, current_input, last_latent):\n return torch.cat((current_input, last_latent), dim=1)\n\n def forward(self, batch, node_fts, edge_fts, graph_fts, first_n_processors=1000):\n if torch.isnan(node_fts).any():\n breakpoint()\n assert not torch.isnan(self.last_latent).any()\n assert not torch.isnan(node_fts).any()\n if self.timeit:\n st = time.time()\n if self.timeit:\n print(f'projecting nodes: {time.time()-st}')\n\n if self.timeit:\n st = time.time()\n edge_index = batch.edge_index\n hidden, edges_hidden = self.processor(node_fts, edge_fts, graph_fts, edge_index, self.last_latent, self.last_latent_edges, first_n_processors=first_n_processors, batch=batch)\n if self.timeit:\n print(f'message passing: {time.time()-st}')\n assert not torch.isnan(hidden).any()\n if self.timeit:\n st = time.time()\n if self.triplet_reasoning:\n edge_fts = self.triplet_reductor(torch.cat([edge_fts, edges_hidden], dim=-1))\n outs = self.decode(batch, node_fts, hidden, edge_fts, graph_fts)\n if self.timeit:\n print(f'decoding hints: {time.time()-st}')\n continue_logits = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n return hidden, edges_hidden, outs, continue_logits" }, { "identifier": "LitAlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class LitAlgorithmReasoner(pl.LightningModule):\n def __init__(self,\n hidden_dim,\n algo_processor,\n dataset_class,\n dataset_root,\n dataset_kwargs,\n algorithm='mst_prim',\n update_edges_hidden=False,\n use_TF=False,\n use_sinkhorn=True,\n xavier_on_scalars=True,\n learning_rate=get_hyperparameters()['lr'],\n weight_decay=get_hyperparameters()['weight_decay'],\n test_with_val=False,\n test_with_val_every_n_epoch=20,\n test_train_every_n_epoch=20,\n **algorithm_base_kwargs):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.dataset_class = dataset_class\n self.dataset_root = dataset_root\n self.dataset_kwargs = dataset_kwargs\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.timeit = False\n self.update_edges_hidden = update_edges_hidden\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.algorithm = algorithm\n self.xavier_on_scalars = xavier_on_scalars\n self.test_with_val = test_with_val\n self.test_with_val_every_n_epoch = test_with_val_every_n_epoch\n self.test_train_every_n_epoch = test_train_every_n_epoch\n self._datasets = {}\n if self.test_with_val:\n self.val_dataloader = self.val_dataloader_alt\n self.validation_step = self.validation_step_alt\n self._current_epoch = 0\n self.load_dataset('train')\n\n self.algorithm_module = AlgorithmReasoner(self.dataset.spec,\n self.dataset[0],\n hidden_dim,\n algo_processor,\n update_edges_hidden=update_edges_hidden,\n use_TF=use_TF,\n use_sinkhorn=use_sinkhorn,\n timeit=self.timeit,\n xavier_on_scalars=xavier_on_scalars,\n **algorithm_base_kwargs)\n self.save_hyperparameters(ignore=['algo_processor'])\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch in the ``Trainer``, or 0 if not attached.\"\"\"\n return self.trainer.current_epoch if self._trainer else self._current_epoch\n\n @current_epoch.setter\n def current_epoch(self, epoch) -> int:\n self._current_epoch = epoch\n\n def prepare_for_transfer(self):\n algo_processor = copy.deepcopy(self.algorithm_module.processor)\n self.algorithm_module = AlgorithmReasoner(self.hidden_dim,\n self.node_features,\n self.edge_features,\n self.output_features,\n algo_processor,\n use_TF=False,\n timeit=self.timeit,\n **self.algorithm_base_kwargs)\n for p in self.algorithm_module.processor.parameters():\n p.requires_grad = False\n\n @staticmethod\n def pointer_loss(predecessor_pred, predecessor_gt_edge_1h,\n softmax_idx, num_nodes):\n loss_unreduced = cross_entropy(predecessor_pred, softmax_idx, predecessor_gt_edge_1h, num_nodes)\n sum_loss = loss_unreduced.flatten().sum()\n cnt_loss = predecessor_gt_edge_1h.count_nonzero()\n return sum_loss / cnt_loss\n\n def single_prediction_loss(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n loss = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[graph_mask], pred_gt[graph_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n\n if loc == Location.NODE:\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(pred_gt, batch.edge_index)\n loss = type(self).pointer_loss(\n pred[edge_mask],\n pred_gt_one_hot[edge_mask],\n batch.edge_index[0][edge_mask], batch.num_nodes)\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.MASK_ONE:\n lsms = torch_scatter.scatter_log_softmax(pred[node_mask], batch.batch[node_mask].unsqueeze(-1), dim=0)\n loss = (-lsms[(pred_gt[node_mask] == 1.)]).mean()\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[node_mask], pred_gt[node_mask].argmax(-1))\n if loc == Location.EDGE:\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[edge_mask], pred_gt[edge_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n loss = F.cross_entropy(\n pred[edge_mask],\n pred_gt[edge_mask])\n assert loss is not None, f'{stage}/{name}/{loc}/{data_type}'\n return loss\n\n def get_step_loss(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n if self.timeit:\n st = time.time()\n batch = self.algorithm_module.prepare_batch(batch)\n losses_dict = defaultdict(list)\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n assert graph_mask.any()\n for name in pred:\n stage, loc, data_type = self.dataset.spec[name]\n pred_gt = getattr(batch, name)[i+1]\n losses_dict[name].append(\n self.single_prediction_loss(name, pred[name], pred_gt,\n batch, graph_mask, node_mask,\n edge_mask))\n\n for name in output_logits:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n losses_dict[name].append(\n self.single_prediction_loss(name, output_logits[name],\n getattr(batch, name), batch,\n graph_mask, node_mask, edge_mask))\n\n for k, v in losses_dict.items():\n losses_dict[k] = torch.stack(v).mean()\n if self.timeit:\n print(f'loss calculation: {time.time()-st}')\n input()\n\n return losses_dict\n\n def single_prediction_acc(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n acc = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.NODE:\n if data_type == Type.MASK_ONE:\n # try:\n acc = (pred[node_mask].squeeze(-1).nonzero() == pred_gt[node_mask].nonzero()).float().mean()\n # except Exception as e:\n # breakpoint()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION, Type.MASK]:\n acc = (pred[node_mask].squeeze(-1) == pred_gt[node_mask]).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[node_mask].squeeze(-1) - pred_gt[node_mask])**2).mean()\n if data_type == Type.CATEGORICAL:\n acc = (pred[node_mask].argmax(-1) == pred_gt[node_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[node_mask].squeeze(-1), pred_gt[node_mask])\n\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n acc = (pred[graph_mask].argmax(-1) == pred_gt[graph_mask].argmax(-1)).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[graph_mask].squeeze(-1) - pred_gt[graph_mask])**2).mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[graph_mask].squeeze(-1), pred_gt[graph_mask])\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n acc = (pred[edge_mask].argmax(-1) == pred_gt[edge_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[edge_mask].squeeze(-1), pred_gt[edge_mask])\n if data_type == Type.SCALAR:\n acc = ((pred[edge_mask].squeeze(-1) - pred_gt[edge_mask])**2).mean()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n acc = (pred[edge_mask] == pred_gt[edge_mask]).float().mean()\n assert acc is not None, f\"Please implement {name}\"\n return acc\n\n def get_metrics(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n batch = self.algorithm_module.prepare_batch(batch)\n accs_dict = defaultdict(list)\n\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec, {'hint': pred},\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['hint']\n\n for name in outputs:\n acc = self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name)[i+1],\n batch,\n graph_mask,\n node_mask,\n edge_mask)\n accs_dict[name].append(acc)\n\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec,\n output_logits,\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['output']\n for name in outputs:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n accs_dict[name].append(\n self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name),\n batch,\n graph_mask,\n node_mask,\n edge_mask))\n\n for k, v in accs_dict.items():\n accs_dict[k] = torch.stack(v).mean()\n\n return accs_dict\n\n def fwd_step(self, batch, batch_idx):\n if self.timeit:\n st = time.time()\n self.algorithm_module.epoch = self.current_epoch\n all_hint_logits, output_logits, masks = self.algorithm_module.process(batch)\n if self.timeit:\n print(f'forward step: {time.time()-st}')\n input()\n return all_hint_logits, output_logits, masks\n\n def training_step(self, batch, batch_idx):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'train/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs)\n total_loss = sum(losses_dict.values()) / len(losses_dict)\n self.log('train/loss/average_loss', total_loss, prog_bar=False, on_step=True, on_epoch=True, batch_size=batch.num_graphs)\n accs_dict = {}\n if self.current_epoch % self.test_train_every_n_epoch == 0:\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'train/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n # if sum(losses_dict.values()) > 1e5:\n # breakpoint()\n return {'loss': total_loss, 'losses_dict': losses_dict, 'accuracies': accs_dict}\n\n def valtest_step(self, batch, batch_idx, mode):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'{mode}/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n if torch.isnan(sum(losses_dict.values())).any():\n breakpoint()\n self.log(f'{mode}/loss/average_loss', sum(losses_dict.values()) / len(losses_dict), batch_size=batch.num_graphs, add_dataloader_idx=False)\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'{mode}/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n return {'losses': losses_dict, 'accuracies': accs_dict}\n\n def validation_step_alt(self, batch, batch_idx, dataloader_idx):\n if dataloader_idx == 1 and not self.trainer.state.stage == 'sanity_check' and self.current_epoch % self.test_with_val_every_n_epoch == 0:\n return self.valtest_step(batch, batch_idx, 'periodic_test')\n if dataloader_idx == 0:\n return self.valtest_step(batch, batch_idx, 'val')\n\n def validation_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'val')\n\n def test_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'test')\n\n def predict_step(self, batch, batch_idx):\n return self.fwd_step(batch, batch_idx)\n\n def load_dataset(self, split, suffix=''):\n split = split+suffix\n nn = CONFIGS[self.algorithm][split]['num_nodes']\n self.dataset_kwargs['split'] = split\n if (split, nn) not in self._datasets:\n self._datasets[(split, nn)] = self.dataset_class(\n self.dataset_root,\n nn,\n CONFIGS[self.algorithm][split]['num_samples'],\n algorithm=self.algorithm,\n **self.dataset_kwargs)\n self.dataset = self._datasets[(split, nn)]\n print(f'Loading {self.dataset=} (num nodes: {nn}) with kwargs')\n pprint(self.dataset_kwargs)\n print()\n\n def get_a_loader(self, split, suffix=''):\n self.load_dataset(split, suffix='')\n self.algorithm_module.dataset_spec = self.dataset.spec\n dl = DataLoader(self.dataset,\n batch_size=get_hyperparameters()['batch_size'],\n shuffle=True if split == 'train' else False,\n drop_last=False,\n follow_batch=['edge_index'],\n num_workers=1,\n persistent_workers=True)\n return dl\n\n def train_dataloader(self):\n return self.get_a_loader('train')\n\n def val_dataloader_alt(self):\n return [self.get_a_loader('val'), self.get_a_loader('test')]\n\n def val_dataloader(self):\n return self.get_a_loader('val')\n\n def test_dataloader(self, suffix=''):\n return self.get_a_loader('test'+suffix)\n\n def configure_optimizers(self):\n lr = self.learning_rate\n wd = self.weight_decay\n optimizer = optim.Adam(self.parameters(),\n weight_decay=wd,\n lr=lr)\n return optimizer" }, { "identifier": "get_hyperparameters", "path": "hyperparameters.py", "snippet": "def get_hyperparameters():\n return {\n 'dim_latent': 128,\n 'num_bits': 8,\n 'weight_decay': 0,\n 'lr': 0.0003,\n 'nee_warmup_steps': 4000,\n 'dim_nodes_mst_prim': 1,\n 'dim_target_mst_prim': 1,\n 'device': 'cuda',\n 'batch_size': 64,\n 'bias': True,\n 'seed': 47, # for dataset generation\n 'calculate_termination_statistics': False,\n }" }, { "identifier": "CONFIGS", "path": "datasets/_configs.py", "snippet": "CONFIGS = defaultdict(lambda: _DEFAULT_CONFIG)" }, { "identifier": "cross_entropy", "path": "utils_execution.py", "snippet": "def cross_entropy(pred, softmax_idx, truth_1h, num_nodes):\n lsm_pred = torch.log(torch_geometric.utils.softmax(pred, softmax_idx, num_nodes=num_nodes)+1e-9)\n # truth_1h = F.one_hot(truth, num_nodes)\n return (-truth_1h*lsm_pred)" }, { "identifier": "check_edge_index_sorted", "path": "utils_execution.py", "snippet": "def check_edge_index_sorted(ei):\n for i in range(ei.shape[1]-1):\n assert ei[0][i] <= ei[0][i+1]\n if ei[0][i] == ei[0][i+1]:\n assert ei[1][i] < ei[1][i+1]" }, { "identifier": "prepare_constants", "path": "utils_execution.py", "snippet": "def prepare_constants(batch):\n SIZE = batch.num_nodes\n STEPS_SIZE = batch.lengths.max()-1\n return SIZE, STEPS_SIZE" }, { "identifier": "edge_one_hot_encode_pointers", "path": "utils_execution.py", "snippet": "def edge_one_hot_encode_pointers(pred, edge_index):\n pred_ei = torch.stack((torch.arange(pred.shape[0]).to(pred), pred))\n amat = torch_geometric.utils.to_dense_adj(pred_ei)\n return amat[0, edge_index[0], edge_index[1]]" }, { "identifier": "get_number_of_nodes", "path": "utils_execution.py", "snippet": "def get_number_of_nodes(algorithm, split):\n nns = CONFIGS[algorithm][split]['num_nodes']\n if isinstance(nns, int):\n nns = [nns]\n return nns" } ]
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from baselines.beam_search import vmapped_beam_search_rollout, BEAM_WIDTH from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from hyperparameters import get_hyperparameters from torch_geometric.utils import k_hop_subgraph from datasets._configs import CONFIGS from utils_execution import cross_entropy, check_edge_index_sorted, prepare_constants, edge_one_hot_encode_pointers, get_number_of_nodes from clrs import Type, Location, Stage import copy import itertools import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch_scatter import torch_geometric import pytorch_lightning as pl
13,470
class TSPReasoner(AlgorithmReasoner): def __init__(self, spec, data, latent_features, algo_processor, bias=True, use_TF=False, L1_loss=False, global_termination_pool='max', #'predinet', get_attention=False, use_batch_norm=False, transferring=False, timeit=True, double_process=False, **algo_reasoner_kwargs): super().__init__( spec, data, latent_features, algo_processor, use_TF=use_TF, timeit=timeit, L1_loss=L1_loss, global_termination_pool=global_termination_pool, get_attention=get_attention, use_batch_norm=use_batch_norm, transferring=transferring, **algo_reasoner_kwargs, ) self.step_idx = 0 self.assert_checks = False self.debug = False self.debug_epoch_threshold = 1e9 self.next_step_pool = True self.double_process = double_process self.lambda_mul = 1# 0.0001 self.transferring = transferring def get_input_output_hints(self, batch): hint_inp_curr = dict() hint_out_curr = dict() return hint_inp_curr, hint_out_curr def process( self, *args, **kwargs): self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, first_n_processors=1000 if not self.double_process else 1, **kwargs) if self.double_process: self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, init_last_latent=self.last_latent, **kwargs) return self.all_hint_logits, self.last_logits, self.all_masks_graph class LitTSPReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, use_TF=False, ensure_permutation='greedy', transferring=False,
class TSPReasoner(AlgorithmReasoner): def __init__(self, spec, data, latent_features, algo_processor, bias=True, use_TF=False, L1_loss=False, global_termination_pool='max', #'predinet', get_attention=False, use_batch_norm=False, transferring=False, timeit=True, double_process=False, **algo_reasoner_kwargs): super().__init__( spec, data, latent_features, algo_processor, use_TF=use_TF, timeit=timeit, L1_loss=L1_loss, global_termination_pool=global_termination_pool, get_attention=get_attention, use_batch_norm=use_batch_norm, transferring=transferring, **algo_reasoner_kwargs, ) self.step_idx = 0 self.assert_checks = False self.debug = False self.debug_epoch_threshold = 1e9 self.next_step_pool = True self.double_process = double_process self.lambda_mul = 1# 0.0001 self.transferring = transferring def get_input_output_hints(self, batch): hint_inp_curr = dict() hint_out_curr = dict() return hint_inp_curr, hint_out_curr def process( self, *args, **kwargs): self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, first_n_processors=1000 if not self.double_process else 1, **kwargs) if self.double_process: self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, init_last_latent=self.last_latent, **kwargs) return self.all_hint_logits, self.last_logits, self.all_masks_graph class LitTSPReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, use_TF=False, ensure_permutation='greedy', transferring=False,
learning_rate=get_hyperparameters()['lr'],
3
2023-11-20 15:32:43+00:00
16k
harisankar95/pathfinding3D
test/test_path.py
[ { "identifier": "DiagonalMovement", "path": "pathfinding3d/core/diagonal_movement.py", "snippet": "class DiagonalMovement:\n always = 1\n never = 2\n if_at_most_one_obstacle = 3\n only_when_no_obstacle = 4" }, { "identifier": "Grid", "path": "pathfinding3d/core/grid.py", "snippet": "class Grid:\n def __init__(\n self,\n width: int = 0,\n height: int = 0,\n depth: int = 0,\n matrix: MatrixType = None,\n grid_id: Optional[int] = None,\n inverse: bool = False,\n ):\n \"\"\"\n A grid represents the map (as 3d-list of nodes).\n\n Parameters\n ----------\n width : int, optional\n The width of the grid.\n height : int, optional\n The height of the grid.\n depth : int, optional\n The depth of the grid.\n matrix : MatrixType\n A 3D array of values (numbers or objects specifying weight)\n that determine how nodes are connected and if they are walkable.\n If no matrix is given, all nodes will be walkable.\n inverse : bool, optional\n If true, all values in the matrix that are not 0 will be considered\n walkable. Otherwise all values that are 0 will be considered walkable.\n \"\"\"\n self.width, self.height, self.depth = self._validate_dimensions(width, height, depth, matrix)\n self.nodes = (\n build_nodes(self.width, self.height, self.depth, matrix, inverse, grid_id)\n if self.is_valid_grid()\n else [[[]]]\n )\n\n def _validate_dimensions(self, width: int, height: int, depth: int, matrix: MatrixType) -> tuple:\n if matrix is not None:\n if not (\n isinstance(matrix, (list, np.ndarray))\n and len(matrix) > 0\n and len(matrix[0]) > 0\n and len(matrix[0][0]) > 0\n ):\n raise ValueError(\"Provided matrix is not a 3D structure or is empty.\")\n return len(matrix), len(matrix[0]), len(matrix[0][0])\n return width, height, depth\n\n def is_valid_grid(self) -> bool:\n return self.width > 0 and self.height > 0 and self.depth > 0\n\n def node(self, x: int, y: int, z: int) -> Optional[GridNode]:\n \"\"\"\n Get node at position\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n GridNode\n node at position\n \"\"\"\n return self.nodes[x][y][z] if self.inside(x, y, z) else None\n\n def inside(self, x: int, y: int, z: int) -> bool:\n \"\"\"\n Check, if field position is inside map\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n bool\n True, if position is inside map\n \"\"\"\n return 0 <= x < self.width and 0 <= y < self.height and 0 <= z < self.depth\n\n def walkable(self, x: int, y: int, z: int) -> bool:\n \"\"\"\n Check, if the tile is inside grid and if it is set as walkable\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n bool\n True, if position is inside map and walkable\n \"\"\"\n return self.inside(x, y, z) and self.nodes[x][y][z].walkable\n\n def calc_cost(self, node_a: GridNode, node_b: GridNode, weighted: bool = False) -> float:\n \"\"\"\n Get the distance between current node and the neighbor (cost)\n\n Parameters\n ----------\n node_a : GridNode\n current node\n node_b : GridNode\n neighbor node\n weighted : bool, optional\n True, if weighted algorithm is used, by default False\n\n Returns\n -------\n float\n distance between current node and the neighbor (cost)\n \"\"\"\n # Check if we have a straight, diagonal in plane or diagonal in space\n dx = node_b.x - node_a.x\n dy = node_b.y - node_a.y\n dz = node_b.z - node_a.z\n\n ng = math.sqrt(dx * dx + dy * dy + dz * dz)\n\n # weight for weighted algorithms\n if weighted:\n ng *= node_b.weight\n\n return ng\n\n def neighbors(\n self,\n node: GridNode,\n diagonal_movement: int = DiagonalMovement.never,\n ) -> List[GridNode]:\n \"\"\"\n Get all neighbors of one node\n\n Parameters\n ----------\n node : GridNode\n node to get neighbors from\n diagonal_movement : int, optional\n if diagonal movement is allowed\n (see enum in diagonal_movement), by default DiagonalMovement.never\n\n Returns\n -------\n list\n list of neighbor nodes\n \"\"\"\n x, y, z = node.x, node.y, node.z\n\n neighbors = []\n # current plane\n cs0 = cd0 = cs1 = cd1 = cs2 = cd2 = cs3 = cd3 = False\n # upper plane\n us0 = ud0 = us1 = ud1 = us2 = ud2 = us3 = ud3 = ut = False # ut = upper top\n # lower plane\n ls0 = ld0 = ls1 = ld1 = ls2 = ld2 = ls3 = ld3 = lb = False # lb = lower bottom\n\n # -y\n if self.walkable(x, y - 1, z):\n neighbors.append(self.nodes[x][y - 1][z])\n cs0 = True\n\n # +x\n if self.walkable(x + 1, y, z):\n neighbors.append(self.nodes[x + 1][y][z])\n cs1 = True\n\n # +y\n if self.walkable(x, y + 1, z):\n neighbors.append(self.nodes[x][y + 1][z])\n cs2 = True\n\n # -x\n if self.walkable(x - 1, y, z):\n neighbors.append(self.nodes[x - 1][y][z])\n cs3 = True\n\n # +z\n if self.walkable(x, y, z + 1):\n neighbors.append(self.nodes[x][y][z + 1])\n ut = True\n\n # -z\n if self.walkable(x, y, z - 1):\n neighbors.append(self.nodes[x][y][z - 1])\n lb = True\n\n # check for connections to other grids\n if node.connections:\n neighbors.extend(node.connections)\n\n if diagonal_movement == DiagonalMovement.never:\n return neighbors\n\n if diagonal_movement == DiagonalMovement.only_when_no_obstacle:\n cd0 = cs0 and cs1\n cd1 = cs1 and cs2\n cd2 = cs2 and cs3\n cd3 = cs3 and cs0\n\n us0 = cs0 and ut\n us1 = cs1 and ut\n us2 = cs2 and ut\n us3 = cs3 and ut\n\n ls0 = cs0 and lb\n ls1 = cs1 and lb\n ls2 = cs2 and lb\n ls3 = cs3 and lb\n\n elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:\n cd0 = cs0 or cs1\n cd1 = cs1 or cs2\n cd2 = cs2 or cs3\n cd3 = cs3 or cs0\n\n us0 = cs0 or ut\n us1 = cs1 or ut\n us2 = cs2 or ut\n us3 = cs3 or ut\n\n ls0 = cs0 or lb\n ls1 = cs1 or lb\n ls2 = cs2 or lb\n ls3 = cs3 or lb\n\n elif diagonal_movement == DiagonalMovement.always:\n cd0 = cd1 = cd2 = cd3 = True\n us0 = us1 = us2 = us3 = True\n ls0 = ls1 = ls2 = ls3 = True\n\n # +x -y\n if cd0 and self.walkable(x + 1, y - 1, z):\n neighbors.append(self.nodes[x + 1][y - 1][z])\n else:\n cd0 = False\n\n # +x +y\n if cd1 and self.walkable(x + 1, y + 1, z):\n neighbors.append(self.nodes[x + 1][y + 1][z])\n else:\n cd1 = False\n\n # -x +y\n if cd2 and self.walkable(x - 1, y + 1, z):\n neighbors.append(self.nodes[x - 1][y + 1][z])\n else:\n cd2 = False\n\n # -x -y\n if cd3 and self.walkable(x - 1, y - 1, z):\n neighbors.append(self.nodes[x - 1][y - 1][z])\n else:\n cd3 = False\n\n # -y +z\n if us0 and self.walkable(x, y - 1, z + 1):\n neighbors.append(self.nodes[x][y - 1][z + 1])\n else:\n us0 = False\n\n # +x +z\n if us1 and self.walkable(x + 1, y, z + 1):\n neighbors.append(self.nodes[x + 1][y][z + 1])\n else:\n us1 = False\n\n # +y +z\n if us2 and self.walkable(x, y + 1, z + 1):\n neighbors.append(self.nodes[x][y + 1][z + 1])\n else:\n us2 = False\n\n # -x +z\n if us3 and self.walkable(x - 1, y, z + 1):\n neighbors.append(self.nodes[x - 1][y][z + 1])\n else:\n us3 = False\n\n # -y -z\n if ls0 and self.walkable(x, y - 1, z - 1):\n neighbors.append(self.nodes[x][y - 1][z - 1])\n else:\n ls0 = False\n\n # +x -z\n if ls1 and self.walkable(x + 1, y, z - 1):\n neighbors.append(self.nodes[x + 1][y][z - 1])\n else:\n ls1 = False\n\n # +y -z\n if ls2 and self.walkable(x, y + 1, z - 1):\n neighbors.append(self.nodes[x][y + 1][z - 1])\n else:\n ls2 = False\n\n # -x -z\n if ls3 and self.walkable(x - 1, y, z - 1):\n neighbors.append(self.nodes[x - 1][y][z - 1])\n else:\n ls3 = False\n\n # remaining daigonal neighbors\n if diagonal_movement == DiagonalMovement.only_when_no_obstacle:\n ud0 = cs0 and cd0 and cs1 and us0 and us1 and ut\n ud1 = cs1 and cd1 and cs2 and us1 and us2 and ut\n ud2 = cs2 and cd2 and cs3 and us2 and us3 and ut\n ud3 = cs3 and cd3 and cs0 and us3 and us0 and ut\n\n ld0 = cs0 and cd0 and cs1 and ls0 and ls1 and lb\n ld1 = cs1 and cd1 and cs2 and ls1 and ls2 and lb\n ld2 = cs2 and cd2 and cs3 and ls2 and ls3 and lb\n ld3 = cs3 and cd3 and cs0 and ls3 and ls0 and lb\n\n elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:\n ud0 = sum([cs0, cd0, cs1, us0, us1, ut]) >= 5\n ud1 = sum([cs1, cd1, cs2, us1, us2, ut]) >= 5\n ud2 = sum([cs2, cd2, cs3, us2, us3, ut]) >= 5\n ud3 = sum([cs3, cd3, cs0, us3, us0, ut]) >= 5\n\n ld0 = sum([cs0, cd0, cs1, ls0, ls1, lb]) >= 5\n ld1 = sum([cs1, cd1, cs2, ls1, ls2, lb]) >= 5\n ld2 = sum([cs2, cd2, cs3, ls2, ls3, lb]) >= 5\n ld3 = sum([cs3, cd3, cs0, ls3, ls0, lb]) >= 5\n\n elif diagonal_movement == DiagonalMovement.always:\n ud0 = ud1 = ud2 = ud3 = True\n ld0 = ld1 = ld2 = ld3 = True\n\n # +x -y +z\n if ud0 and self.walkable(x + 1, y - 1, z + 1):\n neighbors.append(self.nodes[x + 1][y - 1][z + 1])\n\n # +x +y +z\n if ud1 and self.walkable(x + 1, y + 1, z + 1):\n neighbors.append(self.nodes[x + 1][y + 1][z + 1])\n\n # -x +y +z\n if ud2 and self.walkable(x - 1, y + 1, z + 1):\n neighbors.append(self.nodes[x - 1][y + 1][z + 1])\n\n # -x -y +z\n if ud3 and self.walkable(x - 1, y - 1, z + 1):\n neighbors.append(self.nodes[x - 1][y - 1][z + 1])\n\n # +x -y -z\n if ld0 and self.walkable(x + 1, y - 1, z - 1):\n neighbors.append(self.nodes[x + 1][y - 1][z - 1])\n\n # +x +y -z\n if ld1 and self.walkable(x + 1, y + 1, z - 1):\n neighbors.append(self.nodes[x + 1][y + 1][z - 1])\n\n # -x +y -z\n if ld2 and self.walkable(x - 1, y + 1, z - 1):\n neighbors.append(self.nodes[x - 1][y + 1][z - 1])\n\n # -x -y -z\n if ld3 and self.walkable(x - 1, y - 1, z - 1):\n neighbors.append(self.nodes[x - 1][y - 1][z - 1])\n\n return neighbors\n\n def cleanup(self):\n \"\"\"\n Cleanup grid\n \"\"\"\n for x_nodes in self.nodes:\n for y_nodes in x_nodes:\n for z_node in y_nodes:\n z_node.cleanup()" }, { "identifier": "GridNode", "path": "pathfinding3d/core/node.py", "snippet": "class GridNode(Node):\n \"\"\"\n basic node, saves X, Y and Z coordinates on some grid and determine if\n it is walkable.\n \"\"\"\n\n # Coordinates\n x: int = 0\n y: int = 0\n z: int = 0\n\n # Wether this node can be walked through.\n walkable: bool = True\n\n # used for weighted algorithms\n weight: float = 1.0\n\n # grid_id is used if we have more than one grid,\n # normally we just count our grids by number\n # but you can also use a string here.\n # Set it to None if you only have one grid.\n grid_id: Optional[int] = None\n\n connections: Optional[List] = None\n\n identifier: Optional[Tuple] = None\n\n def __post_init__(self):\n super().__init__()\n # for heap\n self.identifier: Tuple = (\n (self.x, self.y, self.z) if self.grid_id is None else (self.x, self.y, self.z, self.grid_id)\n )\n\n def __iter__(self):\n yield self.x\n yield self.y\n yield self.z\n if self.grid_id is not None:\n yield self.grid_id\n\n def connect(self, other_node: \"GridNode\"):\n if not self.connections:\n self.connections = [other_node]\n else:\n self.connections.append(other_node)" }, { "identifier": "AStarFinder", "path": "pathfinding3d/finder/a_star.py", "snippet": "class AStarFinder(Finder):\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using A* algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n if not heuristic:\n if diagonal_movement == DiagonalMovement.never:\n self.heuristic = manhattan\n else:\n # When diagonal movement is allowed the manhattan heuristic is\n # not admissible it should be octile instead\n self.heuristic = octile\n\n def check_neighbors(\n self,\n start: GridNode,\n end: GridNode,\n grid: Grid,\n open_list: List,\n open_value: int = 1,\n backtrace_by=None,\n ) -> Optional[List[GridNode]]:\n \"\"\"\n Find next path segment based on given node\n (or return path if we found the end)\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n open_list : List\n stores nodes that will be processed next\n\n Returns\n -------\n Optional[List[GridNode]]\n path\n \"\"\"\n\n # pop node with minimum 'f' value\n node = open_list.pop_node()\n node.closed = True\n\n # if reached the end position, construct the path and return it\n # (ignored for bi-directional a*, there we look for a neighbor that is\n # part of the oncoming path)\n if not backtrace_by and node == end:\n return backtrace(end)\n\n # get neighbors of the current node\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if neighbor.closed:\n # already visited last minimum f value\n continue\n if backtrace_by and neighbor.opened == backtrace_by:\n # found the oncoming path\n if backtrace_by == BY_END:\n return bi_backtrace(node, neighbor)\n\n return bi_backtrace(neighbor, node)\n\n # check if the neighbor has not been inspected yet, or\n # can be reached with smaller cost from the current node\n self.process_node(grid, neighbor, node, end, open_list, open_value)\n\n # the end has not been reached (yet) keep the find_path loop running\n return None\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the A* algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n\n start.g = 0\n start.f = 0\n return super().find_path(start, end, grid)" }, { "identifier": "BestFirst", "path": "pathfinding3d/finder/best_first.py", "snippet": "class BestFirst(AStarFinder):\n \"\"\"\n Similar to the default A* algorithm from a_star.\n \"\"\"\n\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using BestFirst algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n self.weighted = False\n\n def apply_heuristic(self, node_a: GridNode, node_b: GridNode, heuristic: Optional[Callable] = None) -> float:\n \"\"\"\n Helper function to apply heuristic\n\n Parameters\n ----------\n node_a : GridNode\n first node\n node_b : GridNode\n second node\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n\n Returns\n -------\n float\n heuristic value\n \"\"\"\n return super().apply_heuristic(node_a, node_b, heuristic) * 1000000" }, { "identifier": "BiAStarFinder", "path": "pathfinding3d/finder/bi_a_star.py", "snippet": "class BiAStarFinder(AStarFinder):\n \"\"\"\n Similar to the default A* algorithm from a_star.\n \"\"\"\n\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using Bi-A* algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the A* algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n (can be a list of grids)\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n\n start_open_list = SimpleHeap(start, grid)\n start.g = 0\n start.f = 0\n start.opened = BY_START\n\n end_open_list = SimpleHeap(end, grid)\n end.g = 0\n end.f = 0\n end.opened = BY_END\n\n while len(start_open_list) > 0 and len(end_open_list) > 0:\n self.runs += 1\n self.keep_running()\n path = self.check_neighbors(\n start,\n end,\n grid,\n start_open_list,\n open_value=BY_START,\n backtrace_by=BY_END,\n )\n if path:\n return path, self.runs\n\n self.runs += 1\n self.keep_running()\n path = self.check_neighbors(\n end,\n start,\n grid,\n end_open_list,\n open_value=BY_END,\n backtrace_by=BY_START,\n )\n if path:\n return path, self.runs\n\n # failed to find path\n return [], self.runs" }, { "identifier": "BreadthFirstFinder", "path": "pathfinding3d/finder/breadth_first.py", "snippet": "class BreadthFirstFinder(Finder):\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using Breadth First algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n weighted=False,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n if not diagonal_movement:\n self.diagonalMovement = DiagonalMovement.never\n\n def check_neighbors(\n self,\n start: GridNode,\n end: GridNode,\n grid: Grid,\n open_list: List,\n ) -> List[GridNode]:\n \"\"\"\n Find next path segment based on given node\n (or return path if we found the end)\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n open_list : List\n stores nodes that will be processed next\n\n Returns\n -------\n List[GridNode]\n path\n \"\"\"\n node = open_list.pop_node()\n node.closed = True\n\n if node == end:\n return backtrace(end)\n\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if neighbor.closed or neighbor.opened:\n continue\n\n open_list.push_node(neighbor)\n neighbor.opened = True\n neighbor.parent = node" }, { "identifier": "DijkstraFinder", "path": "pathfinding3d/finder/dijkstra.py", "snippet": "class DijkstraFinder(AStarFinder):\n def __init__(\n self,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using Dijkstra algorithm\n\n Parameters\n ----------\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=null,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n def apply_heuristic(self, node_a: Node, node_b: Node, heuristic: Optional[Callable] = None) -> float:\n \"\"\"\n Helper function to apply heuristic\n\n Parameters\n ----------\n node_a : Node\n first node\n node_b : Node\n second node\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n\n Returns\n -------\n float\n 0.0\n \"\"\"\n return 0.0" }, { "identifier": "ExecutionRunsException", "path": "pathfinding3d/finder/finder.py", "snippet": "class ExecutionRunsException(Exception):\n def __init__(self, message):\n super().__init__(message)" }, { "identifier": "ExecutionTimeException", "path": "pathfinding3d/finder/finder.py", "snippet": "class ExecutionTimeException(Exception):\n def __init__(self, message):\n super().__init__(message)" }, { "identifier": "IDAStarFinder", "path": "pathfinding3d/finder/ida_star.py", "snippet": "class IDAStarFinder(Finder):\n \"\"\"\n Iterative Deeping A Star (IDA*) path-finder.\n\n Recursion based on:\n http://www.apl.jhu.edu/~hall/AI-Programming/IDA-Star.html\n\n Path retracing based on:\n V. Nageshwara Rao, Vipin Kumar and K. Ramesh\n \"A Parallel Implementation of Iterative-Deeping-A*\", January 1987.\n ftp://ftp.cs.utexas.edu/.snapshot/hourly.1/pub/AI-Lab/tech-reports/\n UT-AI-TR-87-46.pdf\n\n based on the JavaScript implementation by Gerard Meier\n (www.gerardmeier.com)\n \"\"\"\n\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n track_recursion: bool = True,\n ):\n \"\"\"\n Find shortest path using IDA* algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n track_recursion : bool\n if we should track recursion\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n weighted=False,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n self.track_recursion = track_recursion\n if not heuristic:\n if diagonal_movement == DiagonalMovement.never:\n self.heuristic = manhattan\n else:\n # When diagonal movement is allowed the manhattan heuristic is\n # not admissible it should be octile instead\n self.heuristic = octile\n\n self.nodes_visited: int\n\n def search(\n self,\n node: GridNode,\n g: float,\n cutoff: float,\n path: List[GridNode],\n depth: int,\n end: GridNode,\n grid: Grid,\n ) -> Union[float, GridNode]:\n \"\"\"\n Recursive IDA* search implementation\n\n Parameters\n ----------\n node : GridNode\n current node\n g : float\n cost from start to current node\n cutoff : float\n cutoff cost\n path : List[GridNode]\n path\n depth : int\n current depth\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Union[float, GridNode]\n cutoff cost or end node\n \"\"\"\n self.runs += 1\n self.keep_running()\n\n self.nodes_visited += 1\n\n f = g + self.apply_heuristic(node, end) * self.weight\n\n # We've searched too deep for this iteration.\n if f > cutoff:\n return f\n\n if node == end:\n if len(path) < depth:\n path += [None] * (depth - len(path) + 1)\n path[depth] = node\n return node\n\n neighbors = self.find_neighbors(grid, node)\n\n # Sort the neighbors, gives nicer paths. But, this deviates\n # from the original algorithm - so I left it out\n # TODO: make this an optional parameter\n # def sort_neighbors(a, b):\n # return self.apply_heuristic(a, end) - \\\n # self.apply_heuristic(b, end)\n # sorted(neighbors, sort_neighbors)\n min_t = float(\"inf\")\n for neighbor in neighbors:\n if self.track_recursion:\n # Retain a copy for visualisation. Due to recursion, this\n # node may be part of other paths too.\n neighbor.retain_count += 1\n neighbor.tested = True\n\n t = self.search(\n neighbor,\n g + grid.calc_cost(node, neighbor),\n cutoff,\n path,\n depth + 1,\n end,\n grid,\n )\n\n if isinstance(t, GridNode):\n if len(path) < depth:\n path += [None] * (depth - len(path) + 1)\n path[depth] = node\n return t\n\n # Decrement count, then determine whether it's actually closed.\n if self.track_recursion:\n neighbor.retain_count -= 1\n if neighbor.retain_count == 0:\n neighbor.tested = False\n\n if t < min_t:\n min_t = t\n\n return min_t\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the IDA* algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n\n self.nodes_visited = 0 # for statistics\n\n # initial search depth, given the typical heuristic contraints,\n # there should be no cheaper route possible.\n cutoff = self.apply_heuristic(start, end)\n\n while True:\n path = []\n\n # search till cut-off depth:\n t = self.search(start, 0, cutoff, path, 0, end, grid)\n\n if isinstance(t, bool) and not t:\n # only when an error occured we return \"False\"\n break\n\n # If t is a node, it's also the end node. Route is now\n # populated with a valid path to the end node.\n if isinstance(t, GridNode):\n return (\n [(node.x, node.y, node.z, node.grid_id) for node in path],\n self.runs,\n )\n\n # Try again, this time with a deeper cut-off. The t score\n # is the closest we got to the end node.\n cutoff = t\n\n return [], self.runs" }, { "identifier": "MinimumSpanningTree", "path": "pathfinding3d/finder/msp.py", "snippet": "class MinimumSpanningTree(Finder):\n \"\"\"\n Minimum Spanning Tree implementation by Brad Beattie\n (see https://github.com/brean/python-pathfinding/issues/18)\n\n The wikipedia page has a nice description about MSP:\n https://en.wikipedia.org/wiki/Minimum_spanning_tree\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.heuristic = heuristic.null\n\n def tree(self, grid: Grid, start: GridNode) -> List:\n \"\"\"\n Returns a list of nodes that are part of the minimum spanning tree\n of the grid.\n\n Parameters\n ----------\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n start : GridNode\n start node\n\n Returns\n -------\n List\n \"\"\"\n\n return list(self.itertree(grid, start))\n\n def itertree(self, grid: Grid, start: GridNode):\n \"\"\"\n Returns a generator that yields nodes that are part of the minimum\n spanning tree of the grid.\n\n Parameters\n ----------\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n start : GridNode\n start node\n \"\"\"\n # Finder.process_node requires an end node, which we don't have.\n # The following value tricks the call to Finder.apply_heuristic.\n # Though maybe we want to generate a limited spanning tree that\n # trends in a certain direction? In which case we'd want a more\n # nuanced solution.\n end = namedtuple(\"FakeNode\", [\"x\", \"y\", \"z\"])(-1, -1, -1)\n\n start.opened = True\n\n open_list = SimpleHeap(start, grid)\n\n while len(open_list) > 0:\n self.runs += 1\n self.keep_running()\n\n node = open_list.pop_node()\n node.closed = True\n yield node\n\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if not neighbor.closed:\n self.process_node(grid, neighbor, node, end, open_list, open_value=True)\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the Minimum Spanning\n Tree algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n\n for node in self.itertree(grid, start):\n if node == end:\n path = deque()\n step = node\n while step.parent:\n path.appendleft(step)\n step = step.parent\n path.appendleft(step)\n return path, self.runs\n\n return [], self.runs" } ]
import numpy as np import pytest from pathfinding3d.core.diagonal_movement import DiagonalMovement from pathfinding3d.core.grid import Grid from pathfinding3d.core.node import GridNode from pathfinding3d.finder.a_star import AStarFinder from pathfinding3d.finder.best_first import BestFirst from pathfinding3d.finder.bi_a_star import BiAStarFinder from pathfinding3d.finder.breadth_first import BreadthFirstFinder from pathfinding3d.finder.dijkstra import DijkstraFinder from pathfinding3d.finder.finder import ExecutionRunsException, ExecutionTimeException from pathfinding3d.finder.ida_star import IDAStarFinder from pathfinding3d.finder.msp import MinimumSpanningTree
11,215
finders = [ AStarFinder, BestFirst, BiAStarFinder, DijkstraFinder, IDAStarFinder, BreadthFirstFinder, MinimumSpanningTree, ] TIME_LIMIT = 10 # give it a 10 second limit. weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 5 def test_max_runs(): grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT, max_runs=3)
finders = [ AStarFinder, BestFirst, BiAStarFinder, DijkstraFinder, IDAStarFinder, BreadthFirstFinder, MinimumSpanningTree, ] TIME_LIMIT = 10 # give it a 10 second limit. weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 5 def test_max_runs(): grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT, max_runs=3)
with pytest.raises(ExecutionRunsException):
8
2023-11-21 10:14:12+00:00
16k
yuukawahiroshi/ddb-tools
extract_wav.py
[ { "identifier": "DDIModel", "path": "utils/ddi_utils.py", "snippet": "class DDIModel:\n def __init__(self, ddi_bytes: bytes) -> None:\n self.ddi_bytes = ddi_bytes\n self.ddi_data = None\n self.phdc_data = {}\n self.tdb_data = {}\n self.sta_data = {}\n self.art_data = {}\n self.vqm_data = {}\n self.offset_map = {}\n\n def read(self, temp_path: Optional[str] = None, cat_only: bool = False):\n if temp_path or cat_only:\n import yaml\n\n if cat_only:\n with open(os.path.join(temp_path, 'sta.yml'), mode='r',\n encoding='utf-8') as sta_f:\n self.sta_data = yaml.load(sta_f)\n with open(os.path.join(temp_path, 'art.yml'), mode='r',\n encoding='utf-8') as art_f:\n self.art_data = yaml.load(art_f)\n vqm_data = None\n if os.path.isfile(os.path.join(temp_path, 'vqm.yml')):\n with open(os.path.join(temp_path, 'vqm.yml'), mode='r',\n encoding='utf-8') as vqm_f:\n self.vqm_data = yaml.load(vqm_f)\n else:\n self.ddi_data = io.BytesIO(self.ddi_bytes)\n # DBSe\n # Tonio.ddi has no DBSe block\n \n # assert int.from_bytes(ddi_data.read(8), byteorder='little') == 0\n # assert ddi_data.read(4).decode() == 'DBSe'\n # assert int.from_bytes(ddi_data.read(4), byteorder='little') == 0\n # assert int.from_bytes(ddi_data.read(8), byteorder='little') == 1\n # assert int.from_bytes(ddi_data.read(4), byteorder='little') == 3\n\n # PHDC\n phdc_offset = self.ddi_bytes.find(b'PHDC')\n if phdc_offset >= 0:\n self.ddi_data.seek(phdc_offset)\n self.phdc_data = self.read_phdc()\n\n self.offset_map['phdc'] = [phdc_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'phdc.yml'), mode='w',\n encoding='utf-8') as phdc_f:\n phdc_str = yaml.dump(self.phdc_data, default_flow_style=False,\n sort_keys=False)\n phdc_f.write(phdc_str)\n\n # TDB\n tdb_offset = self.ddi_bytes.find(b'\\xFF'*8+b'TDB ')\n if tdb_offset >= 0:\n self.ddi_data.seek(tdb_offset)\n self.tdb_data = self.read_tdb()\n self.offset_map['tdb'] = [tdb_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'tdb.yml'), mode='w',\n encoding='utf-8') as tdb_f:\n tdb_str = yaml.dump(self.tdb_data, default_flow_style=False,\n sort_keys=False)\n tdb_f.write(tdb_str)\n\n # DBV\n dbv_offset = self.ddi_bytes.find(b'\\x00'*8+b'DBV ')\n self.ddi_data.seek(dbv_offset)\n self.read_dbv()\n self.offset_map['dbv'] = [dbv_offset, self.ddi_data.tell()]\n\n # STA\n sta_offset = self.ddi_bytes.find(b'\\x00'*8+b'STA ')\n sta_offset = reverse_search(self.ddi_bytes, b'ARR ', sta_offset) - 8\n self.ddi_data.seek(sta_offset)\n self.sta_data = self.read_sta()\n self.offset_map['sta'] = [sta_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'sta.yml'), mode='w',\n encoding='utf-8') as sta_f:\n sta_str = yaml.dump(self.sta_data, default_flow_style=False,\n sort_keys=False)\n sta_f.write(sta_str)\n\n # ART\n art_offset = self.ddi_bytes.find(b'\\x00'*8+b'ART ')\n art_offset = reverse_search(self.ddi_bytes, b'ARR ', art_offset) - 8\n self.ddi_data.seek(art_offset)\n self.art_data = self.read_art()\n self.offset_map['art'] = [art_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'art.yml'), mode='w',\n encoding='utf-8') as art_f:\n art_str = yaml.dump(self.art_data, default_flow_style=False,\n sort_keys=False)\n art_f.write(art_str)\n\n # VQM\n vqm_offset = self.ddi_bytes.find(b'\\xFF'*8+b'VQM ')\n self.vqm_data = None\n if vqm_offset != -1:\n self.ddi_data.seek(vqm_offset)\n self.vqm_data = self.read_vqm()\n self.offset_map['vqm'] = [vqm_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'vqm.yml'), mode='w',\n encoding='utf-8') as vqm_f:\n vqm_str = yaml.dump(self.vqm_data, default_flow_style=False,\n sort_keys=False)\n vqm_f.write(vqm_str)\n \n \n # DDI convert\n self.ddi_data_dict: dict[str, dict[str, list[artp_type]]] = {\n 'sta': {},\n 'art': {},\n }\n\n if self.vqm_data is not None:\n self.ddi_data_dict = {\n 'vqm': {},\n 'sta': {},\n 'art': {},\n }\n vqm_dict = []\n for idx, vqmp in self.vqm_data.items():\n vqm_dict.append({'snd': vqmp['snd'], 'epr': vqmp['epr'], 'pitch': vqmp['pitch1']})\n self.ddi_data_dict['vqm'] = vqm_dict\n\n sta_dict: dict[str, list[artp_type]] = {}\n for stau in self.sta_data.values():\n stau_dict: list[artp_type] = []\n for idx, stap in stau['stap'].items():\n stau_dict.append({'snd': stap['snd'], 'epr': stap['epr'], 'pitch': stap['pitch1']})\n sta_dict[stau['phoneme']] = stau_dict\n self.ddi_data_dict['sta'] = {key: sta_dict[key]\n for key in sorted(sta_dict.keys())}\n\n art_dict: dict[str, list[artp_type]] = {}\n for art in self.art_data.values():\n if 'artu' in art.keys():\n for artu in art['artu'].values():\n key = art['phoneme']+' '+artu['phoneme']\n art_dict[key] = []\n for artp in artu['artp'].values():\n art_dict[key].append({'snd': artp['snd'],\n 'snd_start': artp['snd_start'],\n 'epr': artp['epr'],\n 'pitch': artp['pitch1']})\n if 'art' in art.keys():\n for sub_art in art['art'].values():\n sub_art: art_type\n if 'artu' in sub_art.keys():\n for artu in sub_art['artu'].values():\n key = art['phoneme']+' '+sub_art['phoneme']+' '+artu['phoneme']\n art_dict[key] = []\n for artp in artu['artp'].values():\n art_dict[key].append({'snd': artp['snd'],\n 'snd_start': artp['snd_start'],\n 'epr': artp['epr'],\n 'pitch': artp['pitch1']})\n self.ddi_data_dict['art'] = {key: art_dict[key]\n for key in sorted(art_dict.keys())}\n\n\n def save(self, dst_path: Optional[str] = None):\n import yaml\n\n with open(os.path.join(dst_path, 'ddi.yml'), mode='w', encoding='utf-8') as ddi_f:\n ddi_str = yaml.dump(self.ddi_data_dict, default_flow_style=False,\n sort_keys=False)\n ddi_f.write(ddi_str)\n\n\n def read_phdc(self):\n phdc_data: dict[str, dict[int, list[str]]\n | dict[str, dict[int, str]]\n | dict[str, list[str]]\n | str]\n phdc_data = {}\n # PHDC\n phoneme_data: dict[str, list[str]] = {\"voiced\": [], \"unvoiced\": []}\n assert self.ddi_data.read(4).decode() == 'PHDC'\n phdc_size = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 4\n phoneme_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(phoneme_num):\n bytes_str = self.ddi_data.read(0x1F)\n assert bytes_str[-1] in [0, 1]\n real_data = bytes_str[:-1].decode().strip('\\x00')\n\n phoneme_type = \"voiced\" if bytes_str[-1] == 0 else \"unvoiced\"\n\n phoneme_data[phoneme_type].append(real_data)\n phdc_data['phoneme'] = phoneme_data\n\n # PHG2\n phg2_data: dict[str, dict[int, str]] = {}\n assert self.ddi_data.read(4).decode() == 'PHG2'\n phg2_size = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n phg2_epr_guide_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(phg2_epr_guide_num):\n phg2_key = read_str(self.ddi_data)\n phg2_data[phg2_key] = {}\n temp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(temp_num):\n idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n phg2_data[phg2_key][idx] = read_str(self.ddi_data)\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n phdc_data['phg2'] = phg2_data\n\n # epr_guide\n epr_guide_data: dict[str, list[str]] = {}\n epr_guide_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_guide_size = phdc_size-phg2_size-0x10-0x1F*phoneme_num-4\n epr_guide_bytes = self.ddi_data.read(epr_guide_size)\n offset = 0\n for i in range(epr_guide_num):\n key = epr_guide_bytes[offset:offset+0x20].decode().strip('\\x00')\n assert int.from_bytes(epr_guide_bytes[offset+0x20:offset+0x24],\n byteorder='little') == 4\n epr_guide_data[key] = []\n offset += 0x24\n while(offset < len(epr_guide_bytes) and epr_guide_bytes[offset] == 0):\n if epr_guide_bytes[offset+7] == 0x40:\n value = epr_guide_bytes[offset:offset + 7]\n start_idx = 0\n for i in range(7):\n if value[i] != 0:\n start_idx = i\n break\n # TODO: Need to check carefully. \"b'XXX'\" and we only take XXX\n value = bytes_to_str(value[start_idx:])\n epr_guide_data[key].append(value)\n else:\n assert int.from_bytes(epr_guide_bytes[offset:offset + 8],\n byteorder='little') == 0\n epr_guide_data[key].append('')\n offset += 8\n assert offset == len(epr_guide_bytes)\n phdc_data['epr_guide'] = epr_guide_data\n\n # hash string\n # phdc_data['hash'] = self.ddi_data.read(0x20).decode()\n # assert int.from_bytes(self.ddi_data.read(0xE0), byteorder='little') == 0\n # assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n # assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n\n return phdc_data\n\n\n def read_tdb(self) -> dict[int, str]:\n tdb_data: dict[int, str] = {}\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'TDB '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi (B9 13 10 00)\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n tmm_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n str_list = ['pitch', 'dynamics', 'opening']\n for i in range(tmm_num):\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'TMM '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n # print(i, idx)\n str_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert str_num == 3\n for j in range(str_num):\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') == 0\n assert read_str(self.ddi_data) == str_list[j]\n phoneme = read_str(self.ddi_data)\n tdb_data[idx] = phoneme\n assert read_str(self.ddi_data) == 'timbre'\n return tdb_data\n\n\n def read_dbv(self) -> None:\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'DBV '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # 4 for AVANNA, 5 for others?\n\n\n def read_sta(self) -> dict[int, artu_type]:\n sta_data: dict[int, artu_type] = {}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n\n assert self.ddi_data.read(4).decode() == 'STA '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n stau_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(stau_num):\n stau_data: artu_type = {'phoneme': '', 'stap': {}}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'STAu'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n stau_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(8) == b'\\xFF'*8\n stap_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(stap_num):\n stap_data: artp_type = {'snd': '', 'snd_length': '', 'epr': []}\n _pos = self.ddi_data.tell()\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'STAp'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n stap_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n stap_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n \n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0x3D\n assert self.ddi_data.read(4).decode() == 'EMPT'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert read_str(self.ddi_data) == 'SND'\n stap_data['snd_length'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'EMPT'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert read_str(self.ddi_data) == 'EpR'\n self.ddi_data.read(4) # == b'\\xFF'*4 Exception: Tonio.ddi (epr_num)\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n stap_data['epr'] = epr_list\n stap_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n snd_identifier = int.from_bytes(self.ddi_data.read(4),\n byteorder='little')\n # TODO: why this number?\n snd_offset_pos = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n stap_data['snd'] = f'{snd_offset_pos:0>8x}={snd_offset:016x}_{snd_identifier:08x}'\n\n _pos = self.ddi_data.tell()\n stap_data['unknown4'] = bytes_to_str(self.ddi_data.read(0x10))\n stap_idx = read_str(self.ddi_data)\n assert stap_idx not in stau_data['stap'].keys()\n stau_data['stap'][stap_idx] = stap_data\n stau_data['stap'] = {k: stau_data['stap'][k]\n for k in sorted(stau_data['stap'].keys())}\n stau_data['phoneme'] = read_str(self.ddi_data)\n sta_data[stau_idx] = stau_data\n sta_data = {k: sta_data[k] for k in sorted(sta_data.keys())}\n assert read_str(self.ddi_data) == 'normal'\n assert read_str(self.ddi_data) == 'stationary'\n return sta_data\n\n\n def read_art(self) -> dict[int, art_type]:\n total_art_data: dict[int, art_type] = {}\n int.from_bytes(self.ddi_data.read(8), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') != 0\n while(True):\n start = self.ddi_data.read(8)\n if not (start in [b'\\x00'*8, b'\\xFF'*8]):\n offset = self.ddi_data.tell()-8\n self.ddi_data.seek(offset)\n assert read_str(self.ddi_data) == 'articulation'\n break\n assert self.ddi_data.read(4).decode() == 'ART '\n art_idx, art_data = self.read_art_block()\n total_art_data[art_idx] = art_data\n total_art_data = {key: total_art_data[key]\n for key in sorted(total_art_data.keys())}\n return total_art_data\n\n\n def read_art_block(self) -> tuple[int, art_type]:\n art_data: art_type = {'phoneme': '', 'artu': {}, 'art': {}}\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n art_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n artu_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n i = -1\n for i in range(artu_num):\n artu_data: artu_type = {'phoneme': '', 'artp': {}}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n block_type = self.ddi_data.read(4).decode()\n if block_type == 'ART ':\n sub_art_idx, sub_art_data = self.read_art_block()\n art_data['art'][sub_art_idx] = sub_art_data\n continue\n else:\n assert block_type == 'ARTu'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n artu_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n # TODO: why to be 1?\n assert int.from_bytes(self.ddi_data.read(8),\n byteorder='little') in [0, 1]\n self.ddi_data.read(4)\n assert self.ddi_data.read(4) == b'\\xFF'*4\n artp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(artp_num):\n artp_data: artp_type = {'snd': '', 'snd_unknown': '', 'epr': []}\n dev_artp_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n artp_data['dev_artp'] = f'{dev_artp_offset:0>8x}'\n assert self.ddi_data.read(4).decode() == 'ARTp'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n artp_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n artp_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n # print(f'art {i:4d} {j:4d} {unknown}')\n # if env['unknown'] is None:\n # env['unknown'] = unknown\n # else:\n # assert env['unknown'] == unknown\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n # TODO: This doesn't seem to be an index actually\n artp_idx = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n assert self.ddi_data.read(4).decode() == 'EMPT'\n snd_len_empt1 = int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n # artp_data['snd_len_empt1'] = f'{snd_len_empt1:08x}'\n assert read_str(self.ddi_data) == 'SND'\n snd_len_sta = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n artp_data['snd_len_sta'] = f'{snd_len_sta:08x}'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'EMPT'\n snd_len_empt2 = int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n # artp_data['snd_len_empt2'] = f'{snd_len_empt2:08x}'\n assert read_str(self.ddi_data) == 'EpR'\n loc = self.ddi_data.tell()\n try:\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n artp_data['epr'] = epr_list\n artp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n except AssertionError:\n self.ddi_data.seek(loc)\n self.ddi_data.read(4) # == b'\\xFF'*4 Exception: Tonio.ddi (epr_num)\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n artp_data['epr'] = epr_list\n artp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n \n snd_identifier = int.from_bytes(self.ddi_data.read(4),\n byteorder='little')\n # TODO: why this number?\n snd_offset_pos = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n artp_data['snd'] = f'{snd_offset_pos:08x}={snd_offset-0x12:016x}_{snd_identifier:08x}'\n\n snd_offset2_pos = self.ddi_data.tell()\n snd_offset2 = int.from_bytes(self.ddi_data.read(8), byteorder='little') # == snd_offset+0x800 Exception: Tonio.ddi (0)\n artp_data['snd_start'] = f'{snd_offset2_pos:08x}={snd_offset2-0x12:016x}_{snd_identifier:08x}'\n\n ddi_bytes: bytes = self.ddi_bytes[self.ddi_data.tell():self.ddi_data.tell() + 1024]\n align_length = ddi_bytes.find(b'default')-4\n align_bytes = self.ddi_data.read(align_length)\n frame_align = []\n if align_length > 4:\n align_group_num = int.from_bytes(align_bytes[0:4], byteorder='little')\n # In V3 format, each group has int32 * 4 bytes\n align_bytes = align_bytes[4:]\n align_io = io.BytesIO(align_bytes)\n for _ in range(0, align_group_num):\n frame_align_group = {\n \"start\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"end\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"start2\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"end2\": int.from_bytes(align_io.read(4), byteorder='little'),\n }\n frame_align.append(frame_align_group)\n else: # V2 format\n frame_align_group = []\n for i in range(0, len(align_bytes), 4):\n frame_align_group.append(int.from_bytes(align_bytes[i:i+4], byteorder='little'))\n frame_align.append(frame_align_group)\n artp_data['frame_align'] = frame_align\n \n assert read_str(self.ddi_data) == 'default'\n\n assert artp_idx not in artu_data['artp'].keys()\n artu_data['artp'][artp_idx] = artp_data\n artu_data['artp'] = {k: artu_data['artp'][k]\n for k in sorted(artu_data['artp'].keys())}\n artu_data['phoneme'] = read_str(self.ddi_data)\n art_data['artu'][artu_idx] = artu_data\n art_data['artu'] = {k: art_data['artu'][k]\n for k in sorted(art_data['artu'].keys())}\n art_data['art'] = {k: art_data['art'][k]\n for k in sorted(art_data['art'].keys())}\n art_data['phoneme'] = read_str(self.ddi_data)\n if len(art_data['art'].keys()) == 0:\n del art_data['art']\n if len(art_data['artu'].keys()) == 0:\n del art_data['artu']\n return art_idx, art_data\n\n\n def read_vqm(self) -> dict[int, artp_type]:\n vqm_data: dict[int, artp_type] = {}\n\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'VQM '\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert self.ddi_data.read(8) == b'\\xFF'*8\n\n assert self.ddi_data.read(4).decode() == 'VQMu'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n\n vqmp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == vqmp_num\n for i in range(vqmp_num):\n vqmp_data = {'snd': '', 'epr': []}\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'VQMp'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n vqmp_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n vqmp_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n # TODO: that may not be same as env['unknown']\n vqmp_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4) == b'\\xFF'*4\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n ddi_epr_offset = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n epr_list.append(f'{ddi_epr_offset:0>8x}={epr_offset:0>8x}')\n vqmp_data['epr'] = epr_list\n vqmp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n snd_identifier = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n ddi_snd_offset = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n vqmp_data['snd'] = f'{ddi_snd_offset:0>8x}={snd_offset:016x}_{snd_identifier:08x}'\n assert self.ddi_data.read(0x10) == b'\\xFF'*0x10\n vqmp_idx = int(read_str(self.ddi_data))\n vqm_data[vqmp_idx] = vqmp_data\n assert read_str(self.ddi_data) == 'GROWL'\n assert read_str(self.ddi_data) == 'vqm'\n return vqm_data" }, { "identifier": "bytes_to_str", "path": "utils/ddi_utils.py", "snippet": "def bytes_to_str(data: bytes, add_spaces: bool = True) -> str:\n if add_spaces:\n return ' '.join([f'{piece:02x}' for piece in list(data)])\n else:\n return ''.join([f'{piece:02x}' for piece in list(data)])" }, { "identifier": "stream_reverse_search", "path": "utils/ddi_utils.py", "snippet": "def stream_reverse_search(data: io.BufferedReader, search: bytes, offset: int, limit: int = -1) -> int:\n if limit == -1:\n limit = 1024 * 1024 * 10\n offset -= len(search)\n for i in range(offset, 0, -1):\n data.seek(i)\n if data.read(len(search)) == search:\n return i\n if offset - i > limit:\n break\n\n return -1" } ]
import argparse import math import os import re import time import wave from typing import Sequence, TypedDict from utils.ddi_utils import DDIModel, bytes_to_str, stream_reverse_search
11,875
snd_pos_list: list[int] = [] # Read DDI file print("Reading DDI...") with open(ddi_path, "rb") as f: ddi_bytes = f.read() ddi_model = DDIModel(ddi_bytes) ddi_model.read() # Extract snd files from DDB ddb_size = os.path.getsize(ddb_path) with open(ddb_path, "rb") as ddb_f: # Dump articulation art_list: list[tuple[list, dict]] = [] for idx, art_item in ddi_model.art_data.items(): if "artu" in art_item: # Triphoneme for idx, artu_item in art_item["artu"].items(): if "artp" in artu_item: for idx, artp_item in artu_item["artp"].items(): phonemes = [art_item["phoneme"], artu_item["phoneme"]] art_list.append((phonemes, artp_item)) if "artu" in artu_item: for idx, artu2_item in artu_item["artu"].items(): if "artp" in artu2_item: for idx, artp_item in artu2_item["artp"].items(): phonemes = [ art_item["phoneme"], artu_item["phoneme"], artu2_item["phoneme"]] art_list.append((phonemes, artp_item)) for art_item in art_list: phonemes = art_item[0] art_item = art_item[1] _, t = art_item["snd"].split("=") snd_offset, _ = t.split("_") snd_offset = int(snd_offset, 16) pitch = art_item["pitch1"] output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "wav")) ddb_f.seek(snd_offset) snd_ident = ddb_f.read(4) if snd_ident != start_encode: print( f'Error: SND header not found for articulation [{" ".join(phonemes)}] {i}') continue # Read snd header snd_length = int.from_bytes(ddb_f.read(4), byteorder='little') snd_frame_rate = int.from_bytes(ddb_f.read(4), byteorder='little') snd_channel = int.from_bytes(ddb_f.read(2), byteorder='little') int.from_bytes(ddb_f.read(4), byteorder='little') # unknown snd_bytes = ddb_f.read(snd_length - 18) wav_params = (snd_channel, 2, snd_frame_rate, 0, 'NONE', 'NONE') # Write snd to wave file with wave.open(output_path, "wb") as wav_f: wav_f.setparams(wav_params) wav_f.writeframes(snd_bytes) print("Dumped [%s] -> %s" % (" ".join(phonemes), output_path)) snd_pos_list.append(snd_offset) if (gen_lab or gen_seg) and art_item.get("frame_align"): _, t = art_item["snd_start"].split("=") snd_vstart_offset, _ = t.split("_") snd_vstart_offset = int(snd_vstart_offset, 16) snd_empt_bytes = snd_vstart_offset - snd_offset if gen_lab: lab_content = generate_lab( phonemes, art_item["frame_align"], snd_frame_rate, snd_empt_bytes, snd_length) lab_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "lab")) with open(lab_output_path, "w") as lab_f: lab_f.write(lab_content) elif gen_seg: unvoiced_consonant_list = ddi_model.phdc_data["phoneme"]["unvoiced"] trans_content, seg_content, art_seg_content = generate_seg_files( phonemes, art_item["frame_align"], snd_frame_rate, snd_empt_bytes, snd_length, unvoiced_consonant_list ) trans_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "trans")) seg_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "seg")) art_seg_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "as0")) with open(trans_output_path, "w") as fp: fp.write(trans_content) with open(seg_output_path, "w") as fp: fp.write(seg_content) with open(art_seg_output_path, "w") as fp: fp.write(art_seg_content) # Dump stationary for _, sta_info in ddi_model.sta_data.items(): phoneme = sta_info["phoneme"] for sta_idx, sta_item in sta_info["stap"].items(): _, snd_name = sta_item["snd"].split("=") snd_offset, snd_id = snd_name.split("_") snd_offset = int(snd_offset, 16) pitch = sta_item["pitch1"] output_path = os.path.join(dst_path, create_file_name( [phoneme], filename_style, snd_offset, pitch, dst_path, "wav")) # real_snd_offset = 0x3d
#!/usr/bin/env python3 from __future__ import annotations start_encode = 'SND '.encode() wav_params = (1, 2, 44100, 0, 'NONE', 'NONE') window_size = 512 class ArticulationSegmentInfo(TypedDict): phonemes: list[str, str] boundaries: list[list[str, float, float]] def escape_xsampa(xsampa: str) -> str: """Escapes xsampa to file name.""" xsampa = xsampa.replace("Sil", "sil") # Sil is a special case xsampa = ( xsampa.replace("\\", "-") .replace("/", "~") .replace("?", "!") .replace(":", ";") .replace("<", "(") .replace(">", ")") ) return xsampa def unescape_xsampa(xsampa: str) -> str: """Unescapes xsampa from file name.""" xsampa = ( xsampa.replace("-", "\\") .replace("~", "/") .replace("!", "?") .replace(";", ":") .replace("(", "<") .replace(")", ">") ) return xsampa def parse_args(args: Sequence[str] = None): # : list[str] # initialize parser parser = argparse.ArgumentParser() parser.add_argument('--src_path', required=True, help='source ddi file path') parser.add_argument('--dst_path', help='destination extract path, ' 'default to be "./[name]/snd"') parser.add_argument('--gen_lab', action='store_true', help='generate lab file') parser.add_argument('--gen_seg', action='store_true', help='generate trans, seg, as files') parser.add_argument('--filename_style', type=str, choices=['flat', 'devkit'], default=None, help="output filename style, default to be 'devkit', or default to be 'flat' if gen_lab is true.") # parse args args_result = parser.parse_args(args) ddi_path: str = os.path.normpath(args_result.src_path) ddb_path: str = re.sub(r'\.ddi$', '.ddb', ddi_path) dst_path: str = args_result.dst_path if dst_path is None: dst_path = os.path.dirname(ddi_path) + '/snd' dst_path: str = os.path.normpath(dst_path) # make dirs if not os.path.exists(dst_path): os.makedirs(dst_path) gen_lab: bool = args_result.gen_lab gen_seg: bool = args_result.gen_seg filename_style: str = args_result.filename_style if filename_style is None: if gen_lab or gen_seg: filename_style = "flat" else: filename_style = "devkit" return ddi_path, ddb_path, dst_path, filename_style, gen_lab, gen_seg def create_file_name(phonemes: list[str], name_style: str, offset: int, pitch: float, dst_path: str, file_type: str): offset_hex = f'{offset:0>8x}' escaped_phonemes = [escape_xsampa(p) for p in phonemes] phonemes_len = len(phonemes) if pitch >= 0: pit_str = f"pit+{pitch:.2f}" else: pit_str = f"pit{pitch:.2f}" filename = "" if name_style == "flat": phonemes_str = "-".join(escaped_phonemes) prefix = "" if phonemes_len == 0: filename = f"unknown_{offset_hex}.{file_type}" else: if phonemes_len == 1: if phonemes[0] == "growl": prefix = "growl" else: prefix = "sta" elif phonemes_len == 2: prefix = "art" elif phonemes_len == 3: prefix = "tri" file_type_prefix = "lab" if file_type == "lab" else "wav" filename = f"{file_type_prefix}/{prefix}_[{phonemes_str}]_{pit_str}_{offset_hex}.{file_type}" elif name_style == "devkit": phonemes_path = "/".join([item + "#" + bytes_to_str(item.encode('utf-8')) for item in escaped_phonemes]) root_path = "" if phonemes_len == 0: filename = f"unknown/{offset_hex}.{file_type}" else: if phonemes_len == 1: if phonemes[0] == "growl": root_path = "vqm/growl" else: root_path = "stationary" elif phonemes_len == 2: root_path = "articulation" elif phonemes_len == 3: root_path = "triphoneme" filename = f"{root_path}/{phonemes_path}/{pit_str}_{offset_hex}.{file_type}" folder = os.path.dirname(filename) if folder != "": os.makedirs(os.path.join(dst_path, folder), exist_ok=True) return filename def nsample2sec(nsample: int, sample_rate: int) -> float: return nsample / sample_rate / 2 def frm2sec(frm: int, sample_rate: int) -> float: return frm * window_size / sample_rate / 2 def generate_lab(phonemes: list[str], frame_align: list[dict], sample_rate: int, offset_bytes: int, total_bytes: int): offset_time = nsample2sec(offset_bytes, sample_rate) * 1e7 duration_time = nsample2sec(total_bytes, sample_rate) * 1e7 lab_lines = [] if len(phonemes) == 3: # VCV center_phoneme = re.sub("^\^", "", phonemes[1]) phonemes = [phonemes[0], center_phoneme, center_phoneme, phonemes[2]] lab_lines.append(f"0 {offset_time:.0f} sil") last_time = 0 for i, phoneme in enumerate(phonemes): frame = frame_align[i] start_time = offset_time + frm2sec(frame["start"], sample_rate) * 1e7 end_time = offset_time + frm2sec(frame["end"], sample_rate) * 1e7 lab_lines.append(f'{start_time:.0f} {end_time:.0f} {phoneme}') last_time = end_time lab_lines.append(f'{last_time:.0f} {duration_time:.0f} sil') return "\n".join(lab_lines) def generate_seg_files( phonemes: list[str], frame_align: list[dict], sample_rate: int, offset_bytes: int, total_bytes: int, unvoiced_consonant_list: list[str]): offset_time = nsample2sec(offset_bytes, sample_rate) duration_time = nsample2sec(total_bytes, sample_rate) if len(phonemes) == 3: # VCV center_phoneme = re.sub("^\^", "", phonemes[1]) phonemes = [phonemes[0], center_phoneme, center_phoneme, phonemes[2]] seg_list: list[list] = [] boundaries: list[float] = [] for i, phoneme in enumerate(phonemes): start_time = offset_time + \ frm2sec(frame_align[i]["start"], sample_rate) end_time = offset_time + frm2sec(frame_align[i]["end"], sample_rate) if i == 0: boundaries.append(start_time) boundaries.append(end_time) seg_list.append([phoneme, start_time, end_time]) art_seg_info: ArticulationSegmentInfo = { "boundaries": boundaries, "phonemes": [] } if len(phonemes) == 4: # VCV art_seg_info["phonemes"] = [phonemes[0], phonemes[1], phonemes[3]] else: art_seg_info["phonemes"] = phonemes trans_content = generate_transcription(seg_list) seg_content = generate_seg(seg_list, duration_time) art_seg_content = generate_articulation_seg( art_seg_info, total_bytes, unvoiced_consonant_list) return trans_content, seg_content, art_seg_content def generate_transcription(seg_info: list[list]) -> str: content = [] phoneme_list = [] for i in range(0, len(seg_info)): phoneme_list.append(seg_info[i][0]) content.append(" ".join(phoneme_list)) trans_group = [item[0] for item in seg_info] content.append("[" + " ".join(trans_group) + "]") return "\n".join(content) def generate_seg( phoneme_list: list[list], wav_length: float ) -> str: content = [ "nPhonemes %d" % (len(phoneme_list) + 2,), # Add 2 Sil "articulationsAreStationaries = 0", "phoneme BeginTime EndTime", "===================================================", ] content.append("%s\t\t%.6f\t\t%.6f" % ("Sil", 0, phoneme_list[0][1])) begin_time: float = 0 end_time: float = 0 for i in range(0, len(phoneme_list)): phoneme_info = phoneme_list[i] phoneme_name = phoneme_info[0] begin_time = phoneme_info[1] end_time = phoneme_info[2] content.append("%s\t\t%.6f\t\t%.6f" % (phoneme_name, begin_time, end_time)) content.append("%s\t\t%.6f\t\t%.6f" % ("Sil", end_time, wav_length)) return "\n".join(content) + "\n" def generate_articulation_seg( art_seg_info: ArticulationSegmentInfo, wav_samples: int, unvoiced_consonant_list: list[str] ) -> str: content = [ "nphone art segmentation", "{", '\tphns: ["' + ('", "'.join(art_seg_info["phonemes"])) + '"];', "\tcut offset: 0;", "\tcut length: %d;" % int(math.floor(wav_samples / 2)), ] boundaries_str = [ ("%.9f" % item) for item in art_seg_info["boundaries"] ] content.append("\tboundaries: [" + ", ".join(boundaries_str) + "];") content.append("\trevised: false;") voiced_str = [] is_triphoneme = len(art_seg_info["phonemes"]) == 3 for i in range(0, len(art_seg_info["phonemes"])): phoneme = art_seg_info["phonemes"][i] is_unvoiced = phoneme in unvoiced_consonant_list or phoneme in [ "Sil", "Asp", "?", ] voiced_str.append(str(not is_unvoiced).lower()) if is_triphoneme and i == 1: # Triphoneme needs 2 flags for center phoneme voiced_str.append(str(not is_unvoiced).lower()) content.append("\tvoiced: [" + ", ".join(voiced_str) + "];") content.append("};") content.append("") return "\n".join(content) def main(): ddi_path, ddb_path, dst_path, filename_style, gen_lab, gen_seg = parse_args() snd_pos_list: list[int] = [] # Read DDI file print("Reading DDI...") with open(ddi_path, "rb") as f: ddi_bytes = f.read() ddi_model = DDIModel(ddi_bytes) ddi_model.read() # Extract snd files from DDB ddb_size = os.path.getsize(ddb_path) with open(ddb_path, "rb") as ddb_f: # Dump articulation art_list: list[tuple[list, dict]] = [] for idx, art_item in ddi_model.art_data.items(): if "artu" in art_item: # Triphoneme for idx, artu_item in art_item["artu"].items(): if "artp" in artu_item: for idx, artp_item in artu_item["artp"].items(): phonemes = [art_item["phoneme"], artu_item["phoneme"]] art_list.append((phonemes, artp_item)) if "artu" in artu_item: for idx, artu2_item in artu_item["artu"].items(): if "artp" in artu2_item: for idx, artp_item in artu2_item["artp"].items(): phonemes = [ art_item["phoneme"], artu_item["phoneme"], artu2_item["phoneme"]] art_list.append((phonemes, artp_item)) for art_item in art_list: phonemes = art_item[0] art_item = art_item[1] _, t = art_item["snd"].split("=") snd_offset, _ = t.split("_") snd_offset = int(snd_offset, 16) pitch = art_item["pitch1"] output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "wav")) ddb_f.seek(snd_offset) snd_ident = ddb_f.read(4) if snd_ident != start_encode: print( f'Error: SND header not found for articulation [{" ".join(phonemes)}] {i}') continue # Read snd header snd_length = int.from_bytes(ddb_f.read(4), byteorder='little') snd_frame_rate = int.from_bytes(ddb_f.read(4), byteorder='little') snd_channel = int.from_bytes(ddb_f.read(2), byteorder='little') int.from_bytes(ddb_f.read(4), byteorder='little') # unknown snd_bytes = ddb_f.read(snd_length - 18) wav_params = (snd_channel, 2, snd_frame_rate, 0, 'NONE', 'NONE') # Write snd to wave file with wave.open(output_path, "wb") as wav_f: wav_f.setparams(wav_params) wav_f.writeframes(snd_bytes) print("Dumped [%s] -> %s" % (" ".join(phonemes), output_path)) snd_pos_list.append(snd_offset) if (gen_lab or gen_seg) and art_item.get("frame_align"): _, t = art_item["snd_start"].split("=") snd_vstart_offset, _ = t.split("_") snd_vstart_offset = int(snd_vstart_offset, 16) snd_empt_bytes = snd_vstart_offset - snd_offset if gen_lab: lab_content = generate_lab( phonemes, art_item["frame_align"], snd_frame_rate, snd_empt_bytes, snd_length) lab_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "lab")) with open(lab_output_path, "w") as lab_f: lab_f.write(lab_content) elif gen_seg: unvoiced_consonant_list = ddi_model.phdc_data["phoneme"]["unvoiced"] trans_content, seg_content, art_seg_content = generate_seg_files( phonemes, art_item["frame_align"], snd_frame_rate, snd_empt_bytes, snd_length, unvoiced_consonant_list ) trans_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "trans")) seg_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "seg")) art_seg_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "as0")) with open(trans_output_path, "w") as fp: fp.write(trans_content) with open(seg_output_path, "w") as fp: fp.write(seg_content) with open(art_seg_output_path, "w") as fp: fp.write(art_seg_content) # Dump stationary for _, sta_info in ddi_model.sta_data.items(): phoneme = sta_info["phoneme"] for sta_idx, sta_item in sta_info["stap"].items(): _, snd_name = sta_item["snd"].split("=") snd_offset, snd_id = snd_name.split("_") snd_offset = int(snd_offset, 16) pitch = sta_item["pitch1"] output_path = os.path.join(dst_path, create_file_name( [phoneme], filename_style, snd_offset, pitch, dst_path, "wav")) # real_snd_offset = 0x3d
real_snd_offset = stream_reverse_search(
2
2023-11-20 11:37:46+00:00
16k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n # print('************************encoder shape',x.shape)\n\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n\n\n if conditioning is not None:\n if isinstance(conditioning, dict):\n if isinstance(list(conditioning.values())[0],list):\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n else:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "Attention_AR_counter", "path": "text_super_resolution/model/VisionLAN/utils.py", "snippet": "class Attention_AR_counter():\n def __init__(self, display_string, dict_file, case_sensitive):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n self.display_string = display_string\n self.case_sensitive = case_sensitive\n self.de = cha_encdec(dict_file, case_sensitive)\n\n def clear(self):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n \n def add_iter(self, output, out_length, label_length, labels):\n self.total_samples += label_length.size()[0]\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n for i in range(0, len(prdt_texts)):\n if not self.case_sensitive:\n prdt_texts[i] = prdt_texts[i].lower()\n labels[i] = labels[i].lower()\n all_words = []\n for w in labels[i].split('|') + prdt_texts[i].split('|'):\n if w not in all_words:\n all_words.append(w)\n l_words = [all_words.index(_) for _ in labels[i].split('|')]\n p_words = [all_words.index(_) for _ in prdt_texts[i].split('|')]\n self.distance_C += ed.eval(labels[i], prdt_texts[i])\n self.distance_W += ed.eval(l_words, p_words)\n self.total_C += len(labels[i])\n self.total_W += len(l_words)\n self.correct = self.correct + 1 if labels[i] == prdt_texts[i] else self.correct\n return prdt_texts, labels\n\n def show(self):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W))\n self.clear()\n def show_test(self,best_acc, change= False):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n if (self.correct / self.total_samples) > best_acc:\n best_acc = np.copy(self.correct / self.total_samples)\n change = True\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}, best_acc: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W, best_acc))\n\n self.clear()\n return best_acc, change\n \n def convert(self, output, out_length):\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n prdt_prob = prdt_prob.cpu().unsqueeze(0)\n MAX_LEN = 25\n length = prdt_prob.size(1)\n if length >= MAX_LEN:\n return prdt_prob[:, :MAX_LEN, :], prdt_prob\n pad = torch.zeros([prdt_prob.shape[0], MAX_LEN - length, prdt_prob.shape[2]])\n prdt_prob = torch.cat([prdt_prob, pad], dim=1)\n return prdt_texts, prdt_prob" }, { "identifier": "TPSSpatialTransformer", "path": "text_super_resolution/model/tps_spatial_transformer.py", "snippet": "class TPSSpatialTransformer(nn.Module):\n\n def __init__(self, output_image_size=None, num_control_points=None, margins=None):\n super(TPSSpatialTransformer, self).__init__()\n self.output_image_size = output_image_size\n self.num_control_points = num_control_points\n self.margins = margins\n\n self.target_height, self.target_width = output_image_size\n target_control_points = build_output_control_points(num_control_points, margins)\n N = num_control_points\n # N = N - 4\n\n # create padded kernel matrix\n forward_kernel = torch.zeros(N + 3, N + 3)\n target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points)\n forward_kernel[:N, :N].copy_(target_control_partial_repr)\n forward_kernel[:N, -3].fill_(1)\n forward_kernel[-3, :N].fill_(1)\n forward_kernel[:N, -2:].copy_(target_control_points)\n forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))\n # compute inverse matrix\n inverse_kernel = torch.inverse(forward_kernel)\n\n # create target cordinate matrix\n HW = self.target_height * self.target_width\n target_coordinate = list(itertools.product(range(self.target_height), range(self.target_width)))\n target_coordinate = torch.Tensor(target_coordinate) # HW x 2\n Y, X = target_coordinate.split(1, dim = 1)\n Y = Y / (self.target_height - 1)\n X = X / (self.target_width - 1)\n target_coordinate = torch.cat([X, Y], dim = 1) # convert from (y, x) to (x, y)\n target_coordinate_partial_repr = compute_partial_repr(target_coordinate, target_control_points)\n target_coordinate_repr = torch.cat([\n target_coordinate_partial_repr, torch.ones(HW, 1), target_coordinate\n ], dim = 1)\n\n # register precomputed matrices\n self.register_buffer('inverse_kernel', inverse_kernel)\n self.register_buffer('padding_matrix', torch.zeros(3, 2))\n self.register_buffer('target_coordinate_repr', target_coordinate_repr)\n self.register_buffer('target_control_points', target_control_points)\n\n def forward(self, input, source_control_points):\n assert source_control_points.ndimension() == 3\n assert source_control_points.size(1) == self.num_control_points\n assert source_control_points.size(2) == 2\n batch_size = source_control_points.size(0)\n\n Y = torch.cat([source_control_points, self.padding_matrix.expand(batch_size, 3, 2)], 1)\n mapping_matrix = torch.matmul(self.inverse_kernel, Y)\n source_coordinate = torch.matmul(self.target_coordinate_repr, mapping_matrix)\n\n grid = source_coordinate.view(-1, self.target_height, self.target_width, 2)\n grid = torch.clamp(grid, 0, 1) # the source_control_points may be out of [0, 1].\n # the input to grid_sample is normalized [-1, 1], but what we get is [0, 1]\n grid = 2.0 * grid - 1.0\n output_maps = grid_sample(input, grid, canvas=None)\n return output_maps, source_coordinate" }, { "identifier": "STNHead", "path": "text_super_resolution/model/stn_head.py", "snippet": "class STNHead(nn.Module):\n def __init__(self, in_planes, num_ctrlpoints, activation='none', input_size=(16, 64)):\n super(STNHead, self).__init__()\n\n self.in_planes = in_planes\n self.num_ctrlpoints = num_ctrlpoints\n self.activation = activation\n self.stn_convnet = nn.Sequential(\n # conv3x3_block(in_planes, 32), # 32*128\n # nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(in_planes, 32), # 16*64\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(32, 64), # 8*32\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(64, 128), # 4*16\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(128, 256), # 2*8\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(256, 256), # 1*4,\n nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)),\n conv3x3_block(256, 256)) # 1*2\n\n flatten_width = int(input_size[1] / 32)\n # print(\"flw:\", input_size[1] / 32)\n self.stn_fc1 = nn.Sequential(\n nn.Linear(512, 512), #flatten_width*256\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True))\n self.stn_fc2 = nn.Linear(512, num_ctrlpoints*2)\n\n self.init_weights(self.stn_convnet)\n self.init_weights(self.stn_fc1)\n self.init_stn(self.stn_fc2)\n\n def init_weights(self, module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.001)\n m.bias.data.zero_()\n\n def init_stn(self, stn_fc2):\n margin = 0.01\n sampling_num_per_side = int(self.num_ctrlpoints / 2)\n ctrl_pts_x = np.linspace(margin, 1.-margin, sampling_num_per_side)\n ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin\n ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1-margin)\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n ctrl_points = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32)\n # print(ctrl_points.shape)\n if self.activation is 'none':\n pass\n elif self.activation == 'sigmoid':\n ctrl_points = -np.log(1. / ctrl_points - 1.)\n elif self.activation == 'relu':\n ctrl_points = F.relu(torch.Tensor(ctrl_points))\n stn_fc2.weight.data.zero_()\n stn_fc2.bias.data = torch.Tensor(ctrl_points).view(-1)\n\n def forward(self, x):\n x = self.stn_convnet(x)\n batch_size, _, h, w = x.size()\n x = x.view(batch_size, -1)\n\n # print(\"x:\", x.shape)\n\n img_feat = self.stn_fc1(x)\n x = self.stn_fc2(0.1 * img_feat)\n if self.activation == 'sigmoid':\n x = torch.sigmoid(x)\n if self.activation == 'relu':\n x = F.relu(x)\n x = x.view(-1, self.num_ctrlpoints, 2)\n return img_feat, x" }, { "identifier": "VisionLAN", "path": "text_super_resolution/model/VisionLAN/VisionLAN.py", "snippet": "class VisionLAN(nn.Module):\n '''\n Architecture of VisionLAN\n input\n input: input image\n label_pos: character index\n output\n text_pre: word-level prediction from VRM\n test_rem: remaining string prediction from MLM\n text_mas: occluded character prediction from MLM\n '''\n def __init__(self, strides, input_shape):\n super(VisionLAN, self).__init__()\n self.backbone = resnet.resnet45(strides, compress_layer=False)\n self.input_shape = input_shape\n self.MLM_VRM = MLM_VRM()\n def forward(self, input, label_pos, training_stp, Train_in = True):\n # extract features\n features = self.backbone(input)\n # MLM + VRM\n if Train_in:\n text_pre, test_rem, text_mas, mask_map = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return text_pre, test_rem, text_mas, mask_map\n else:\n output, out_length = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return output, out_length" }, { "identifier": "SemanticLoss", "path": "text_super_resolution/loss/semantic_loss.py", "snippet": "class SemanticLoss(nn.Module):\n def __init__(self, margin=0.1):\n super(SemanticLoss, self).__init__()\n self.cos_sim = nn.CosineSimilarity(dim=-1, eps=1e-8)\n self.margin = margin\n\n self.lambda1 = 1.0\n self.lambda2 = 1.0\n\n self.kl_loss = torch.nn.KLDivLoss()\n\n def forward(self, pred_vec, gt_vec):\n # pred_vec: [N, C]\n # gt_vec: [N, C]\n # mean_sim = torch.mean(self.cos_sim(gt_vec, pred_vec))\n # sim_loss = 1 - mean_sim\n \n #noise = Variable(torch.rand(pred_vec.shape)) * 0.1 - 0.05\n\n #normed_pred_vec = pred_vec + noise.to(pred_vec.device)\n # print(\"pred_vec:\", pred_vec.shape)\n norm_vec = torch.abs(gt_vec - pred_vec)\n margin_loss = torch.mean(norm_vec) #\n\n # pr int(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n ce_loss = self.kl_loss(torch.log(pred_vec + 1e-20), gt_vec + 1e-20)\n # print(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n\n return self.lambda1 * margin_loss + self.lambda2 * ce_loss# ce_loss #margin_loss # + ce_loss # + sim_loss #margin_loss +\n\n def cross_entropy(self, pred_vec, gt_vec, l=1e-5):\n cal = gt_vec * torch.log(pred_vec+l) + (1 - gt_vec) * torch.log(1 - pred_vec+l)\n #print(\"cal:\", cal)\n return -cal" }, { "identifier": "ssim_psnr", "path": "text_super_resolution/utils/ssim_psnr.py", "snippet": "def calculate_psnr(img1, img2):\ndef weighted_calculate_psnr(img1, img2, weighted_mask):\ndef gaussian(window_size, sigma):\ndef create_window(window_size, channel):\ndef create_rect_window(window_H, window_W, channel):\ndef _ssim_weighted(img1_, img2_, window, window_size, channel, weighted_mask, size_average=True):\ndef _ssim(img1, img2, window, window_size, channel, size_average=True):\ndef _tri_ssim(img1, img2, img3, window, window_size, channel, size_average=True):\ndef _ssim_rect(img1, img2, window, window_size, channel, size_average=True):\n def __init__(self, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, img3):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, weighted_mask):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\ndef ssim(img1, img2, window_size=11, size_average=True):\ndef ssim_weighted(img1, img2, weighted_mask, window_size=11, size_average=True):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n H, W = window_size\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\nclass Distorted_SSIM(torch.nn.Module):\nclass SSIM(torch.nn.Module):\nclass TRI_SSIM(torch.nn.Module):\nclass SSIM_WEIGHTED(torch.nn.Module):\nclass SSIM_TSR(torch.nn.Module):" } ]
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
14,272
log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', }
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") print(sd.keys()) print(sd['epoch']) print(sd['global_step']) print(sd['callbacks']) # print(sd['optimizer_states']) # print(sd['lr_schedulers']) # print(sd['state_dict'].keys()) # exit(0) if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): # print('************************fuck',k) x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', }
model_VL = VisionLAN(**cfg['args'])
21
2023-11-20 06:34:21+00:00
16k
mjavadpur/mj_ONNX_SadTalker
inference_onnx.py
[ { "identifier": "AnimateFromCoeff", "path": "src/facerender/animate_onnx.py", "snippet": "class AnimateFromCoeff():\n\n def __init__(self, sadtalker_path, device):\n\n with open(sadtalker_path['facerender_yaml']) as f:\n config = yaml.safe_load(f)\n\n generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],\n **config['model_params']['common_params'])\n kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],\n **config['model_params']['common_params'])\n he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],\n **config['model_params']['common_params'])\n mapping = MappingNet(**config['model_params']['mapping_params'])\n\n generator.to(device)\n kp_extractor.to(device)\n he_estimator.to(device)\n mapping.to(device)\n for param in generator.parameters():\n param.requires_grad = False\n for param in kp_extractor.parameters():\n param.requires_grad = False \n for param in he_estimator.parameters():\n param.requires_grad = False\n for param in mapping.parameters():\n param.requires_grad = False\n\n if sadtalker_path is not None:\n if 'checkpoint' in sadtalker_path: # use safe tensor\n self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None)\n else:\n self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)\n else:\n raise AttributeError(\"Checkpoint should be specified for video head pose estimator.\")\n\n if sadtalker_path['mappingnet_checkpoint'] is not None:\n self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping)\n else:\n raise AttributeError(\"Checkpoint should be specified for video head pose estimator.\") \n\n self.kp_extractor = kp_extractor\n self.generator = generator\n self.he_estimator = he_estimator\n self.mapping = mapping\n\n self.kp_extractor.eval()\n self.generator.eval()\n self.he_estimator.eval()\n self.mapping.eval()\n \n self.device = device\n \n def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None, \n kp_detector=None, he_estimator=None, \n device=\"cpu\"):\n\n checkpoint = safetensors.torch.load_file(checkpoint_path)\n\n if generator is not None:\n x_generator = {}\n for k,v in checkpoint.items():\n if 'generator' in k:\n x_generator[k.replace('generator.', '')] = v\n generator.load_state_dict(x_generator)\n if kp_detector is not None:\n x_generator = {}\n for k,v in checkpoint.items():\n if 'kp_extractor' in k:\n x_generator[k.replace('kp_extractor.', '')] = v\n kp_detector.load_state_dict(x_generator)\n if he_estimator is not None:\n x_generator = {}\n for k,v in checkpoint.items():\n if 'he_estimator' in k:\n x_generator[k.replace('he_estimator.', '')] = v\n he_estimator.load_state_dict(x_generator)\n \n return None\n\n def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None, \n kp_detector=None, he_estimator=None, optimizer_generator=None, \n optimizer_discriminator=None, optimizer_kp_detector=None, \n optimizer_he_estimator=None, device=\"cpu\"):\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))\n if generator is not None:\n generator.load_state_dict(checkpoint['generator'])\n if kp_detector is not None:\n kp_detector.load_state_dict(checkpoint['kp_detector'])\n if he_estimator is not None:\n he_estimator.load_state_dict(checkpoint['he_estimator'])\n if discriminator is not None:\n try:\n discriminator.load_state_dict(checkpoint['discriminator'])\n except:\n print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')\n if optimizer_generator is not None:\n optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])\n if optimizer_discriminator is not None:\n try:\n optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])\n except RuntimeError as e:\n print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')\n if optimizer_kp_detector is not None:\n optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])\n if optimizer_he_estimator is not None:\n optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])\n\n return checkpoint['epoch']\n \n def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,\n optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))\n if mapping is not None:\n mapping.load_state_dict(checkpoint['mapping'])\n if discriminator is not None:\n discriminator.load_state_dict(checkpoint['discriminator'])\n if optimizer_mapping is not None:\n optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])\n if optimizer_discriminator is not None:\n optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])\n\n return checkpoint['epoch']\n\n def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):\n\n source_image=x['source_image'].type(torch.FloatTensor)\n source_semantics=x['source_semantics'].type(torch.FloatTensor)\n target_semantics=x['target_semantics_list'].type(torch.FloatTensor) \n source_image=source_image.to(self.device)\n source_semantics=source_semantics.to(self.device)\n target_semantics=target_semantics.to(self.device)\n if 'yaw_c_seq' in x:\n yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)\n yaw_c_seq = x['yaw_c_seq'].to(self.device)\n else:\n yaw_c_seq = None\n if 'pitch_c_seq' in x:\n pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)\n pitch_c_seq = x['pitch_c_seq'].to(self.device)\n else:\n pitch_c_seq = None\n if 'roll_c_seq' in x:\n roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor) \n roll_c_seq = x['roll_c_seq'].to(self.device)\n else:\n roll_c_seq = None\n\n frame_num = x['frame_num']\n\n predictions_video = make_animation(source_image, source_semantics, target_semantics,\n self.generator, self.kp_extractor, self.he_estimator, self.mapping, \n yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)\n\n predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])\n predictions_video = predictions_video[:frame_num]\n\n video = []\n for idx in range(predictions_video.shape[0]):\n image = predictions_video[idx]\n image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)\n video.append(image)\n result = img_as_ubyte(video)\n\n ### the generated video is 256x256, so we keep the aspect ratio, \n original_size = crop_info[0]\n if original_size:\n result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]\n \n video_name = x['video_name'] + '.mp4'\n path = os.path.join(video_save_dir, 'temp_'+video_name)\n \n imageio.mimsave(path, result, fps=float(25))\n\n av_path = os.path.join(video_save_dir, video_name)\n return_path = av_path \n \n audio_path = x['audio_path'] \n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')\n start_time = 0\n # cog will not keep the .mp3 filename\n sound = AudioSegment.from_file(audio_path)\n frames = frame_num \n end_time = start_time + frames*1/25*1000\n word1=sound.set_frame_rate(16000)\n word = word1[start_time:end_time]\n word.export(new_audio_path, format=\"wav\")\n\n save_video_with_watermark(path, new_audio_path, av_path, watermark= False)\n print(f'The generated video is named {video_save_dir}/{video_name}') \n\n if 'full' in preprocess.lower():\n # only add watermark to the full image.\n video_name_full = x['video_name'] + '_full.mp4'\n full_video_path = os.path.join(video_save_dir, video_name_full)\n return_path = full_video_path\n paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)\n print(f'The generated video is named {video_save_dir}/{video_name_full}') \n else:\n full_video_path = av_path \n\n #### paste back then enhancers\n if enhancer:\n video_name_enhancer = x['video_name'] + '_enhanced.mp4'\n enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)\n av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer) \n return_path = av_path_enhancer\n\n try:\n enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n except:\n enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n \n save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False)\n print(f'The generated video is named {video_save_dir}/{video_name_enhancer}')\n os.remove(enhanced_path)\n\n os.remove(path)\n os.remove(new_audio_path)\n\n return return_path\n \n def generate_deploy(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):\n # Create Talking Face\n # 1. Reading Data\n source_image=x['source_image'].type(torch.FloatTensor)\n source_semantics=x['source_semantics'].type(torch.FloatTensor)\n target_semantics=x['target_semantics_list'].type(torch.FloatTensor) \n source_image=source_image.to(self.device)\n source_semantics=source_semantics.to(self.device)\n target_semantics=target_semantics.to(self.device)\n # 2. برای محاسبه به دستگاه self.device انتقال دهید\n if 'yaw_c_seq' in x:\n yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)\n yaw_c_seq = x['yaw_c_seq'].to(self.device)\n else:\n yaw_c_seq = None\n if 'pitch_c_seq' in x:\n pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)\n pitch_c_seq = x['pitch_c_seq'].to(self.device)\n else:\n pitch_c_seq = None\n if 'roll_c_seq' in x:\n roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor) \n roll_c_seq = x['roll_c_seq'].to(self.device)\n else:\n roll_c_seq = None\n\n frame_num = x['frame_num']\n # 3. پیش‌بینی‌های مدل مولد برای ویدیوهای Talking Face\n predictions_video = make_animation(source_image, source_semantics, target_semantics,\n self.generator, self.kp_extractor, self.he_estimator, self.mapping, \n yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)\n # 4. تنظیم شکل و برش\n predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])\n predictions_video = predictions_video[:frame_num]\n # 5. هر فریم ویدیو را پیمایش کنید و آن را به Numpy تبدیل کنید و در نتیجه ذخیره کنید.\n video = []\n for idx in range(predictions_video.shape[0]):\n image = predictions_video[idx]\n image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)\n video.append(image)\n result = img_as_ubyte(video)\n # 6. اندازه تصویر در نتیجه را متناسب با اطلاعات اندازه اصلی در crop_info تغییر دهید.\n original_size = crop_info[0]\n if original_size:\n result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]\n\n # 7. از کتابخانه imageio برای ذخیره نتیجه به عنوان یک فایل ویدیویی با نرخ فریم 25 استفاده کنید.\n video_name = x['video_name'] + '.mp4'\n path = os.path.join(video_save_dir, 'temp_'+video_name)\n \n imageio.mimsave(path, result, fps=float(25))\n\n av_path = os.path.join(video_save_dir, video_name)\n return_path = av_path \n \n # 8. مسیر صوتی را در پارامتر x وارد کنید و یک مسیر فایل صوتی جدید ایجاد کنید.\n audio_path = x['audio_path'] \n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')\n start_time = 0\n sound = AudioSegment.from_file(audio_path)\n frames = frame_num \n end_time = start_time + frames*1/25*1000\n word1=sound.set_frame_rate(16000)\n word = word1[start_time:end_time]\n word.export(new_audio_path, format=\"wav\")\n \n\n save_video_with_watermark(path, new_audio_path, av_path, watermark= False)\n print(f' ---- The first generated video is named {video_save_dir}/{video_name}') \n \n if 'full' in preprocess.lower():\n # only add watermark to the full image.\n video_name_full = x['video_name'] + '_full.mp4'\n full_video_path = os.path.join(video_save_dir, video_name_full)\n return_path = full_video_path\n paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)\n print(f' ---- The full generated video is named {video_save_dir}/{video_name_full}') \n else:\n full_video_path = av_path \n \n if enhancer:\n video_name_enhancer = x['video_name'] + '_enhanced.mp4'\n enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)\n av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer) \n return_path = av_path_enhancer\n\n print(\" ---- video_name_enhancer: \" + video_name_enhancer + \"\\n ---- enhanced_path: \" + enhanced_path + \"\\n ---- av_path_enhancer: \" + av_path_enhancer + \"\\n ---- return_path: \" + return_path)\n\n try:\n enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n print(\" -- len of enhanced_images_gen_with_len -- \" + str(len(enhanced_images_gen_with_len)))\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n print(\"\\n -------- try execute enhanced_path ---\" + enhanced_path + \"\\n ---- path:\" + path+ \"\\n ---- full_video_path:\" + full_video_path)\n except:\n enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n print(\" -- len of enhanced_images_gen_with_len -- \" + str(len(enhanced_images_gen_with_len)))\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n print(\"\\n -------- except execute enhanced_path ---\" + enhanced_path+ \"\\n ---- path:\" + path+ \"\\n ---- full_video_path:\" + full_video_path)\n \n save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False)\n print(f' ---- The enhance generated video is named {video_save_dir}/{video_name_enhancer}')\n # os.remove(enhanced_path)\n\n # حالت فول تصویر پس‌بازگشت\n # paste_pic(av_path_enhancer, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)\n # print(f'The final enhancer generated video is named {full_video_path}') \n # return_path = full_video_path\n \n # os.remove(path)\n # os.remove(new_audio_path)\n print(f' ---- Final return_path: {return_path}')\n\n return return_path" }, { "identifier": "get_data", "path": "src/generate_batch.py", "snippet": "def get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=False, idlemode=False, length_of_audio=False, use_blink=True):\n\n syncnet_mel_step_size = 16\n fps = 25\n\n pic_name = os.path.splitext(os.path.split(first_coeff_path)[-1])[0]\n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n\n \n if idlemode:\n num_frames = int(length_of_audio * 25)\n indiv_mels = np.zeros((num_frames, 80, 16))\n else:\n wav = audio.load_wav(audio_path, 16000) \n wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)\n wav = crop_pad_audio(wav, wav_length)\n orig_mel = audio.melspectrogram(wav).T\n spec = orig_mel.copy() # nframes 80\n indiv_mels = []\n\n for i in tqdm(range(num_frames), 'mel:'):\n start_frame_num = i-2\n start_idx = int(80. * (start_frame_num / float(fps)))\n end_idx = start_idx + syncnet_mel_step_size\n seq = list(range(start_idx, end_idx))\n seq = [ min(max(item, 0), orig_mel.shape[0]-1) for item in seq ]\n m = spec[seq, :]\n indiv_mels.append(m.T)\n indiv_mels = np.asarray(indiv_mels) # T 80 16\n\n ratio = generate_blink_seq_randomly(num_frames) # T\n source_semantics_path = first_coeff_path\n source_semantics_dict = scio.loadmat(source_semantics_path)\n ref_coeff = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n ref_coeff = np.repeat(ref_coeff, num_frames, axis=0)\n\n if ref_eyeblink_coeff_path is not None:\n ratio[:num_frames] = 0\n refeyeblink_coeff_dict = scio.loadmat(ref_eyeblink_coeff_path)\n refeyeblink_coeff = refeyeblink_coeff_dict['coeff_3dmm'][:,:64]\n refeyeblink_num_frames = refeyeblink_coeff.shape[0]\n if refeyeblink_num_frames<num_frames:\n div = num_frames//refeyeblink_num_frames\n re = num_frames%refeyeblink_num_frames\n refeyeblink_coeff_list = [refeyeblink_coeff for i in range(div)]\n refeyeblink_coeff_list.append(refeyeblink_coeff[:re, :64])\n refeyeblink_coeff = np.concatenate(refeyeblink_coeff_list, axis=0)\n print(refeyeblink_coeff.shape[0])\n\n ref_coeff[:, :64] = refeyeblink_coeff[:num_frames, :64] \n \n indiv_mels = torch.FloatTensor(indiv_mels).unsqueeze(1).unsqueeze(0) # bs T 1 80 16\n\n if use_blink:\n ratio = torch.FloatTensor(ratio).unsqueeze(0) # bs T\n else:\n ratio = torch.FloatTensor(ratio).unsqueeze(0).fill_(0.) \n # bs T\n ref_coeff = torch.FloatTensor(ref_coeff).unsqueeze(0) # bs 1 70\n\n indiv_mels = indiv_mels.to(device)\n ratio = ratio.to(device)\n ref_coeff = ref_coeff.to(device)\n\n return {'indiv_mels': indiv_mels, \n 'ref': ref_coeff, \n 'num_frames': num_frames, \n 'ratio_gt': ratio,\n 'audio_name': audio_name, 'pic_name': pic_name}" }, { "identifier": "get_facerender_data", "path": "src/generate_facerender_batch.py", "snippet": "def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path, \n batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None, \n expression_scale=1.0, still_mode = False, preprocess='crop', size = 256):\n\n semantic_radius = 13\n video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0]\n txt_path = os.path.splitext(coeff_path)[0]\n\n data={}\n\n img1 = Image.open(pic_path)\n source_image = np.array(img1)\n source_image = img_as_float32(source_image)\n source_image = transform.resize(source_image, (size, size, 3))\n source_image = source_image.transpose((2, 0, 1))\n source_image_ts = torch.FloatTensor(source_image).unsqueeze(0)\n source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1)\n data['source_image'] = source_image_ts\n \n source_semantics_dict = scio.loadmat(first_coeff_path)\n generated_dict = scio.loadmat(coeff_path)\n\n if 'full' not in preprocess.lower():\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n else:\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n source_semantics_new = transform_semantic_1(source_semantics, semantic_radius)\n source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0)\n source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1)\n data['source_semantics'] = source_semantics_ts\n\n # target \n generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale\n\n if 'full' in preprocess.lower():\n generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1)\n\n if still_mode:\n generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0)\n\n with open(txt_path+'.txt', 'w') as f:\n for coeff in generated_3dmm:\n for i in coeff:\n f.write(str(i)[:7] + ' '+'\\t')\n f.write('\\n')\n\n target_semantics_list = [] \n frame_num = generated_3dmm.shape[0]\n data['frame_num'] = frame_num\n for frame_idx in range(frame_num):\n target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius)\n target_semantics_list.append(target_semantics)\n\n remainder = frame_num%batch_size\n if remainder!=0:\n for _ in range(batch_size-remainder):\n target_semantics_list.append(target_semantics)\n\n target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1\n target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1])\n data['target_semantics_list'] = torch.FloatTensor(target_semantics_np)\n data['video_name'] = video_name\n data['audio_path'] = audio_path\n \n if input_yaw_list is not None:\n yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size)\n data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq)\n if input_pitch_list is not None:\n pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size)\n data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq)\n if input_roll_list is not None:\n roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size) \n data['roll_c_seq'] = torch.FloatTensor(roll_c_seq)\n \n return data" }, { "identifier": "init_path", "path": "src/utils/init_path.py", "snippet": "def init_path(checkpoint_dir, config_dir, size=512, old_version=False, preprocess='crop'):\n\n if old_version:\n #### load all the checkpoint of `pth`\n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n use_safetensor = False\n elif len(glob.glob(os.path.join(checkpoint_dir, '*.safetensors'))):\n print('using safetensor as default')\n sadtalker_paths = {\n \"checkpoint\":os.path.join(checkpoint_dir, 'SadTalker_V0.0.2_'+str(size)+'.safetensors'),\n }\n use_safetensor = True\n else:\n print(\"WARNING: The new version of the model will be updated by safetensor, you may need to download it mannully. We run the old version of the checkpoint this time!\")\n use_safetensor = False\n \n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n sadtalker_paths['dir_of_BFM_fitting'] = os.path.join(config_dir) # , 'BFM_Fitting'\n sadtalker_paths['audio2pose_yaml_path'] = os.path.join(config_dir, 'auido2pose.yaml')\n sadtalker_paths['audio2exp_yaml_path'] = os.path.join(config_dir, 'auido2exp.yaml')\n sadtalker_paths['use_safetensor'] = use_safetensor # os.path.join(config_dir, 'auido2exp.yaml')\n\n if 'full' in preprocess:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00109-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender_still.yaml')\n else:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00229-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender.yaml')\n\n return sadtalker_paths" }, { "identifier": "CropAndExtract", "path": "src/utils/preprocess.py", "snippet": "class CropAndExtract():\n def __init__(self, sadtalker_path, device):\n\n self.propress = Preprocesser(device)\n self.net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='').to(device)\n \n if sadtalker_path['use_safetensor']:\n checkpoint = safetensors.torch.load_file(sadtalker_path['checkpoint']) \n self.net_recon.load_state_dict(load_x_from_safetensor(checkpoint, 'face_3drecon'))\n else:\n checkpoint = torch.load(sadtalker_path['path_of_net_recon_model'], map_location=torch.device(device)) \n self.net_recon.load_state_dict(checkpoint['net_recon'])\n\n self.net_recon.eval()\n self.lm3d_std = load_lm3d(sadtalker_path['dir_of_BFM_fitting'])\n self.device = device\n \n def generate(self, input_path, save_dir, crop_or_resize='crop', source_image_flag=False, pic_size=256):\n\n pic_name = os.path.splitext(os.path.split(input_path)[-1])[0] \n\n landmarks_path = os.path.join(save_dir, pic_name+'_landmarks.txt') \n coeff_path = os.path.join(save_dir, pic_name+'.mat') \n png_path = os.path.join(save_dir, pic_name+'.png') \n\n #load input\n if not os.path.isfile(input_path):\n raise ValueError('input_path must be a valid path to video/image file')\n elif input_path.split('.')[-1] in ['jpg', 'png', 'jpeg']:\n # loader for first frame\n full_frames = [cv2.imread(input_path)]\n fps = 25\n else:\n # loader for videos\n video_stream = cv2.VideoCapture(input_path)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n full_frames = [] \n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break \n full_frames.append(frame) \n if source_image_flag:\n break\n\n x_full_frames= [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in full_frames] \n\n #### crop images as the \n if 'crop' in crop_or_resize.lower(): # default crop\n x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512)\n clx, cly, crx, cry = crop\n lx, ly, rx, ry = quad\n lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)\n oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad)\n elif 'full' in crop_or_resize.lower():\n x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512)\n clx, cly, crx, cry = crop\n lx, ly, rx, ry = quad\n lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)\n oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad)\n else: # resize mode\n oy1, oy2, ox1, ox2 = 0, x_full_frames[0].shape[0], 0, x_full_frames[0].shape[1] \n crop_info = ((ox2 - ox1, oy2 - oy1), None, None)\n\n frames_pil = [Image.fromarray(cv2.resize(frame,(pic_size, pic_size))) for frame in x_full_frames]\n if len(frames_pil) == 0:\n print('No face is detected in the input file')\n return None, None\n\n # save crop info\n for frame in frames_pil:\n cv2.imwrite(png_path, cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR))\n\n # 2. get the landmark according to the detected face. \n if not os.path.isfile(landmarks_path): \n lm = self.propress.predictor.extract_keypoint(frames_pil, landmarks_path)\n else:\n print(' Using saved landmarks.')\n lm = np.loadtxt(landmarks_path).astype(np.float32)\n lm = lm.reshape([len(x_full_frames), -1, 2])\n\n if not os.path.isfile(coeff_path):\n # load 3dmm paramter generator from Deep3DFaceRecon_pytorch \n video_coeffs, full_coeffs = [], []\n for idx in tqdm(range(len(frames_pil)), desc='3DMM Extraction In Video:'):\n frame = frames_pil[idx]\n W,H = frame.size\n lm1 = lm[idx].reshape([-1, 2])\n \n if np.mean(lm1) == -1:\n lm1 = (self.lm3d_std[:, :2]+1)/2.\n lm1 = np.concatenate(\n [lm1[:, :1]*W, lm1[:, 1:2]*H], 1\n )\n else:\n lm1[:, -1] = H - 1 - lm1[:, -1]\n\n trans_params, im1, lm1, _ = align_img(frame, lm1, self.lm3d_std)\n \n trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]).astype(np.float32)\n im_t = torch.tensor(np.array(im1)/255., dtype=torch.float32).permute(2, 0, 1).to(self.device).unsqueeze(0)\n \n with torch.no_grad():\n full_coeff = self.net_recon(im_t)\n coeffs = split_coeff(full_coeff)\n\n pred_coeff = {key:coeffs[key].cpu().numpy() for key in coeffs}\n \n pred_coeff = np.concatenate([\n pred_coeff['exp'], \n pred_coeff['angle'],\n pred_coeff['trans'],\n trans_params[2:][None],\n ], 1)\n video_coeffs.append(pred_coeff)\n full_coeffs.append(full_coeff.cpu().numpy())\n\n semantic_npy = np.array(video_coeffs)[:,0] \n\n savemat(coeff_path, {'coeff_3dmm': semantic_npy, 'full_3dmm': np.array(full_coeffs)[0]})\n\n return coeff_path, png_path, crop_info" }, { "identifier": "Audio2Coeff", "path": "src/test_audio2coeff.py", "snippet": "class Audio2Coeff():\n\n def __init__(self, sadtalker_path, device):\n #load config\n fcfg_pose = open(sadtalker_path['audio2pose_yaml_path'])\n cfg_pose = CN.load_cfg(fcfg_pose)\n cfg_pose.freeze()\n fcfg_exp = open(sadtalker_path['audio2exp_yaml_path'])\n cfg_exp = CN.load_cfg(fcfg_exp)\n cfg_exp.freeze()\n\n # load audio2pose_model\n self.audio2pose_model = Audio2Pose(cfg_pose, None, device=device)\n self.audio2pose_model = self.audio2pose_model.to(device)\n self.audio2pose_model.eval()\n for param in self.audio2pose_model.parameters():\n param.requires_grad = False \n \n try:\n if sadtalker_path['use_safetensor']:\n checkpoints = safetensors.torch.load_file(sadtalker_path['checkpoint'])\n self.audio2pose_model.load_state_dict(load_x_from_safetensor(checkpoints, 'audio2pose'))\n else:\n load_cpk(sadtalker_path['audio2pose_checkpoint'], model=self.audio2pose_model, device=device)\n except:\n raise Exception(\"Failed in loading audio2pose_checkpoint\")\n\n # load audio2exp_model\n netG = SimpleWrapperV2()\n netG = netG.to(device)\n for param in netG.parameters():\n netG.requires_grad = False\n netG.eval()\n try:\n if sadtalker_path['use_safetensor']:\n checkpoints = safetensors.torch.load_file(sadtalker_path['checkpoint'])\n netG.load_state_dict(load_x_from_safetensor(checkpoints, 'audio2exp'))\n else:\n load_cpk(sadtalker_path['audio2exp_checkpoint'], model=netG, device=device)\n except:\n raise Exception(\"Failed in loading audio2exp_checkpoint\")\n self.audio2exp_model = Audio2Exp(netG, cfg_exp, device=device, prepare_training_loss=False)\n self.audio2exp_model = self.audio2exp_model.to(device)\n for param in self.audio2exp_model.parameters():\n param.requires_grad = False\n self.audio2exp_model.eval()\n \n self.device = device\n\n def generate(self, batch, coeff_save_dir, pose_style, ref_pose_coeff_path=None):\n\n with torch.no_grad():\n #test\n results_dict_exp= self.audio2exp_model.test(batch)\n exp_pred = results_dict_exp['exp_coeff_pred'] #bs T 64\n\n #for class_id in range(1):\n #class_id = 0#(i+10)%45\n #class_id = random.randint(0,46) #46 styles can be selected \n batch['class'] = torch.LongTensor([pose_style]).to(self.device)\n results_dict_pose = self.audio2pose_model.test(batch) \n pose_pred = results_dict_pose['pose_pred'] #bs T 6\n\n pose_len = pose_pred.shape[1]\n if pose_len<13: \n pose_len = int((pose_len-1)/2)*2+1\n pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), pose_len, 2, axis=1)).to(self.device)\n else:\n pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), 13, 2, axis=1)).to(self.device) \n \n coeffs_pred = torch.cat((exp_pred, pose_pred), dim=-1) #bs T 70\n\n coeffs_pred_numpy = coeffs_pred[0].clone().detach().cpu().numpy() \n\n if ref_pose_coeff_path is not None: \n coeffs_pred_numpy = self.using_refpose(coeffs_pred_numpy, ref_pose_coeff_path)\n \n savemat(os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name'])), \n {'coeff_3dmm': coeffs_pred_numpy})\n\n return os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name']))\n \n def using_refpose(self, coeffs_pred_numpy, ref_pose_coeff_path):\n num_frames = coeffs_pred_numpy.shape[0]\n refpose_coeff_dict = loadmat(ref_pose_coeff_path)\n refpose_coeff = refpose_coeff_dict['coeff_3dmm'][:,64:70]\n refpose_num_frames = refpose_coeff.shape[0]\n if refpose_num_frames<num_frames:\n div = num_frames//refpose_num_frames\n re = num_frames%refpose_num_frames\n refpose_coeff_list = [refpose_coeff for i in range(div)]\n refpose_coeff_list.append(refpose_coeff[:re, :])\n refpose_coeff = np.concatenate(refpose_coeff_list, axis=0)\n\n #### relative head pose\n coeffs_pred_numpy[:, 64:70] = coeffs_pred_numpy[:, 64:70] + ( refpose_coeff[:num_frames, :] - refpose_coeff[0:1, :] )\n return coeffs_pred_numpy" }, { "identifier": "get_data", "path": "src/generate_batch.py", "snippet": "def get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=False, idlemode=False, length_of_audio=False, use_blink=True):\n\n syncnet_mel_step_size = 16\n fps = 25\n\n pic_name = os.path.splitext(os.path.split(first_coeff_path)[-1])[0]\n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n\n \n if idlemode:\n num_frames = int(length_of_audio * 25)\n indiv_mels = np.zeros((num_frames, 80, 16))\n else:\n wav = audio.load_wav(audio_path, 16000) \n wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)\n wav = crop_pad_audio(wav, wav_length)\n orig_mel = audio.melspectrogram(wav).T\n spec = orig_mel.copy() # nframes 80\n indiv_mels = []\n\n for i in tqdm(range(num_frames), 'mel:'):\n start_frame_num = i-2\n start_idx = int(80. * (start_frame_num / float(fps)))\n end_idx = start_idx + syncnet_mel_step_size\n seq = list(range(start_idx, end_idx))\n seq = [ min(max(item, 0), orig_mel.shape[0]-1) for item in seq ]\n m = spec[seq, :]\n indiv_mels.append(m.T)\n indiv_mels = np.asarray(indiv_mels) # T 80 16\n\n ratio = generate_blink_seq_randomly(num_frames) # T\n source_semantics_path = first_coeff_path\n source_semantics_dict = scio.loadmat(source_semantics_path)\n ref_coeff = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n ref_coeff = np.repeat(ref_coeff, num_frames, axis=0)\n\n if ref_eyeblink_coeff_path is not None:\n ratio[:num_frames] = 0\n refeyeblink_coeff_dict = scio.loadmat(ref_eyeblink_coeff_path)\n refeyeblink_coeff = refeyeblink_coeff_dict['coeff_3dmm'][:,:64]\n refeyeblink_num_frames = refeyeblink_coeff.shape[0]\n if refeyeblink_num_frames<num_frames:\n div = num_frames//refeyeblink_num_frames\n re = num_frames%refeyeblink_num_frames\n refeyeblink_coeff_list = [refeyeblink_coeff for i in range(div)]\n refeyeblink_coeff_list.append(refeyeblink_coeff[:re, :64])\n refeyeblink_coeff = np.concatenate(refeyeblink_coeff_list, axis=0)\n print(refeyeblink_coeff.shape[0])\n\n ref_coeff[:, :64] = refeyeblink_coeff[:num_frames, :64] \n \n indiv_mels = torch.FloatTensor(indiv_mels).unsqueeze(1).unsqueeze(0) # bs T 1 80 16\n\n if use_blink:\n ratio = torch.FloatTensor(ratio).unsqueeze(0) # bs T\n else:\n ratio = torch.FloatTensor(ratio).unsqueeze(0).fill_(0.) \n # bs T\n ref_coeff = torch.FloatTensor(ref_coeff).unsqueeze(0) # bs 1 70\n\n indiv_mels = indiv_mels.to(device)\n ratio = ratio.to(device)\n ref_coeff = ref_coeff.to(device)\n\n return {'indiv_mels': indiv_mels, \n 'ref': ref_coeff, \n 'num_frames': num_frames, \n 'ratio_gt': ratio,\n 'audio_name': audio_name, 'pic_name': pic_name}" }, { "identifier": "get_facerender_data", "path": "src/generate_facerender_batch.py", "snippet": "def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path, \n batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None, \n expression_scale=1.0, still_mode = False, preprocess='crop', size = 256):\n\n semantic_radius = 13\n video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0]\n txt_path = os.path.splitext(coeff_path)[0]\n\n data={}\n\n img1 = Image.open(pic_path)\n source_image = np.array(img1)\n source_image = img_as_float32(source_image)\n source_image = transform.resize(source_image, (size, size, 3))\n source_image = source_image.transpose((2, 0, 1))\n source_image_ts = torch.FloatTensor(source_image).unsqueeze(0)\n source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1)\n data['source_image'] = source_image_ts\n \n source_semantics_dict = scio.loadmat(first_coeff_path)\n generated_dict = scio.loadmat(coeff_path)\n\n if 'full' not in preprocess.lower():\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n else:\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n source_semantics_new = transform_semantic_1(source_semantics, semantic_radius)\n source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0)\n source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1)\n data['source_semantics'] = source_semantics_ts\n\n # target \n generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale\n\n if 'full' in preprocess.lower():\n generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1)\n\n if still_mode:\n generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0)\n\n with open(txt_path+'.txt', 'w') as f:\n for coeff in generated_3dmm:\n for i in coeff:\n f.write(str(i)[:7] + ' '+'\\t')\n f.write('\\n')\n\n target_semantics_list = [] \n frame_num = generated_3dmm.shape[0]\n data['frame_num'] = frame_num\n for frame_idx in range(frame_num):\n target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius)\n target_semantics_list.append(target_semantics)\n\n remainder = frame_num%batch_size\n if remainder!=0:\n for _ in range(batch_size-remainder):\n target_semantics_list.append(target_semantics)\n\n target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1\n target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1])\n data['target_semantics_list'] = torch.FloatTensor(target_semantics_np)\n data['video_name'] = video_name\n data['audio_path'] = audio_path\n \n if input_yaw_list is not None:\n yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size)\n data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq)\n if input_pitch_list is not None:\n pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size)\n data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq)\n if input_roll_list is not None:\n roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size) \n data['roll_c_seq'] = torch.FloatTensor(roll_c_seq)\n \n return data" }, { "identifier": "init_path", "path": "src/utils/init_path.py", "snippet": "def init_path(checkpoint_dir, config_dir, size=512, old_version=False, preprocess='crop'):\n\n if old_version:\n #### load all the checkpoint of `pth`\n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n use_safetensor = False\n elif len(glob.glob(os.path.join(checkpoint_dir, '*.safetensors'))):\n print('using safetensor as default')\n sadtalker_paths = {\n \"checkpoint\":os.path.join(checkpoint_dir, 'SadTalker_V0.0.2_'+str(size)+'.safetensors'),\n }\n use_safetensor = True\n else:\n print(\"WARNING: The new version of the model will be updated by safetensor, you may need to download it mannully. We run the old version of the checkpoint this time!\")\n use_safetensor = False\n \n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n sadtalker_paths['dir_of_BFM_fitting'] = os.path.join(config_dir) # , 'BFM_Fitting'\n sadtalker_paths['audio2pose_yaml_path'] = os.path.join(config_dir, 'auido2pose.yaml')\n sadtalker_paths['audio2exp_yaml_path'] = os.path.join(config_dir, 'auido2exp.yaml')\n sadtalker_paths['use_safetensor'] = use_safetensor # os.path.join(config_dir, 'auido2exp.yaml')\n\n if 'full' in preprocess:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00109-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender_still.yaml')\n else:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00229-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender.yaml')\n\n return sadtalker_paths" } ]
from glob import glob from time import strftime from argparse import ArgumentParser from src.facerender.animate_onnx import AnimateFromCoeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.utils.preprocess import CropAndExtract from src.test_audio2coeff import Audio2Coeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.face3d.visualize import gen_composed_video import shutil import torch import os, sys, time import base64
12,717
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device)
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device)
audio_to_coeff = Audio2Coeff(sadtalker_paths, device)
5
2023-11-25 06:53:12+00:00
16k
microsoft/Project-BayesDAG
src/causica/preprocessing/data_processor.py
[ { "identifier": "CausalDataset", "path": "src/causica/datasets/dataset.py", "snippet": "class CausalDataset(Dataset):\n \"\"\"\n Class to store the np.ndarray adjacency matrix and samples\n from the intervention distributions as attributes of the Dataset object.\n \"\"\"\n\n def __init__(\n self,\n train_data: np.ndarray,\n train_mask: np.ndarray,\n adjacency_data: Optional[np.ndarray],\n subgraph_data: Optional[np.ndarray],\n intervention_data: Optional[List[InterventionData]],\n counterfactual_data: Optional[List[InterventionData]],\n val_data: Optional[np.ndarray] = None,\n val_mask: Optional[np.ndarray] = None,\n test_data: Optional[np.ndarray] = None,\n test_mask: Optional[np.ndarray] = None,\n variables: Optional[Variables] = None,\n data_split: Optional[Dict[str, Any]] = None,\n held_out_interventions: Optional[Dict[str, Any]]=None,\n true_posterior: Optional[Any]=None,\n graph_args: Optional[Dict[str, Any]]=None,\n ) -> None:\n super().__init__(train_data, train_mask, val_data, val_mask, test_data, test_mask, variables, data_split, held_out_interventions, true_posterior, graph_args)\n\n self._counterfactual_data = counterfactual_data\n self._intervention_data = intervention_data\n self._adjacency_data = adjacency_data\n self._subgraph_data = subgraph_data\n\n def get_adjacency_data_matrix(self) -> np.ndarray:\n \"\"\"\n Return the np.ndarray dag adjacency matrix.\n \"\"\"\n if self._adjacency_data is None:\n raise TypeError(\"Adjacency matrix is None. No adjacency matrix has been loaded.\")\n return self._adjacency_data\n\n def set_adjacency_data_matrix(self, A: np.ndarray) -> None:\n \"\"\"\n Externally set the np.ndarray dag adjacency matrix. If already set with a matrix, it will overwrite it\n \"\"\"\n self._adjacency_data = A.copy()\n\n @property\n def has_adjacency_data_matrix(self) -> bool:\n \"\"\"\n Returns: If the adjacency matrix is loaded\n \"\"\"\n return self._adjacency_data is not None\n\n def get_known_subgraph_mask_matrix(self) -> np.ndarray:\n \"\"\"\n Return the np.ndarray dag mask matrix.\n \"\"\"\n if self._subgraph_data is None:\n raise TypeError(\"Adjacency matrix is None. No adjacency matrix has been loaded.\")\n return self._subgraph_data\n\n def get_intervention_data(self) -> List[InterventionData]:\n \"\"\"\n Return the list of interventions and samples from intervened distributions\n \"\"\"\n if self._intervention_data is None:\n raise TypeError(\"Intervention data is None. No intervention data has been loaded.\")\n return self._intervention_data\n\n def get_counterfactual_data(self) -> List[InterventionData]:\n \"\"\"\n Return the list of interventions and samples for the counterfactual data\n \"\"\"\n if self._counterfactual_data is None:\n raise TypeError(\"Counterfactual data is None. No counterfactual data has been loaded.\")\n return self._counterfactual_data\n\n @property\n def has_counterfactual_data(self) -> bool:\n \"\"\"\n Returns True if object has counterfactual data.\n \"\"\"\n return self._counterfactual_data is not None" }, { "identifier": "Dataset", "path": "src/causica/datasets/dataset.py", "snippet": "class Dataset(BaseDataset):\n \"\"\"\n Class to store dense train/val/test data and masks and variables metadata.\n Note that the data and masks provided by this class are read only.\n \"\"\"\n\n def __init__(\n self,\n train_data: np.ndarray,\n train_mask: np.ndarray,\n val_data: Optional[np.ndarray] = None,\n val_mask: Optional[np.ndarray] = None,\n test_data: Optional[np.ndarray] = None,\n test_mask: Optional[np.ndarray] = None,\n variables: Optional[Variables] = None,\n data_split: Optional[Dict[str, Any]] = None,\n held_out_interventions: Optional[Dict[str, Any]]=None,\n true_posterior: Optional[Any]=None,\n graph_args: Optional[Dict[str, Any]]=None\n ) -> None:\n super().__init__(train_data, train_mask, val_data, val_mask, test_data, test_mask, variables, data_split, held_out_interventions, true_posterior, graph_args)\n\n # Ensure that data and masks are immutable\n if not issparse(self._train_data):\n self._train_data.setflags(write=False)\n self._train_mask.setflags(write=False)\n if test_data is not None and not issparse(test_data):\n self._test_data = cast(np.ndarray, test_data)\n self._test_data.setflags(write=False)\n self._test_mask = cast(np.ndarray, test_mask)\n self._test_mask.setflags(write=False)\n\n if val_data is not None and not issparse(val_data):\n self._val_data = cast(np.ndarray, val_data)\n self._val_mask = cast(np.ndarray, val_mask)\n self._val_data.setflags(write=False)\n self._val_mask.setflags(write=False)\n\n def to_causal(\n self,\n adjacency_data: Optional[np.ndarray],\n subgraph_data: Optional[np.ndarray],\n intervention_data: Optional[List[InterventionData]],\n counterfactual_data: Optional[List[InterventionData]] = None,\n ):\n \"\"\"\n Return the dag version of this dataset.\n \"\"\"\n return CausalDataset(\n train_data=self._train_data,\n train_mask=self._train_mask,\n adjacency_data=adjacency_data,\n subgraph_data=subgraph_data,\n intervention_data=intervention_data,\n counterfactual_data=counterfactual_data,\n val_data=self._val_data,\n val_mask=self._val_mask,\n test_data=self._test_data,\n test_mask=self._test_mask,\n variables=self._variables,\n data_split=self._data_split,\n held_out_interventions=self._held_out_interventions,\n true_posterior=self._true_posterior,\n graph_args=self._graph_args\n )\n\n @property\n def train_data_and_mask(self) -> Tuple[np.ndarray, np.ndarray]:\n # Add to avoid inconsistent type mypy error\n return self._train_data, self._train_mask" }, { "identifier": "SparseDataset", "path": "src/causica/datasets/dataset.py", "snippet": "class SparseDataset(BaseDataset):\n \"\"\"\n Class to store sparse train/val/test data and masks and variables metadata.\n \"\"\"\n\n def __init__(\n self,\n train_data: csr_matrix,\n train_mask: csr_matrix,\n val_data: Optional[csr_matrix] = None,\n val_mask: Optional[csr_matrix] = None,\n test_data: Optional[csr_matrix] = None,\n test_mask: Optional[csr_matrix] = None,\n variables: Optional[Variables] = None,\n data_split: Optional[Dict[str, Any]] = None,\n ) -> None:\n super().__init__(train_data, train_mask, val_data, val_mask, test_data, test_mask, variables, data_split)\n # Declare types to avoid mypy error\n self._val_data: Optional[csr_matrix]\n self._val_mask: Optional[csr_matrix]\n self._test_data: Optional[csr_matrix]\n self._test_mask: Optional[csr_matrix]\n self._train_data: csr_matrix\n self._train_mask: csr_matrix\n\n def to_dense(self) -> Dataset:\n \"\"\"\n Return the dense version of this dataset, i.e. all sparse data and mask arrays are transformed to dense.\n \"\"\"\n val_data = self._val_data.toarray() if self._val_data is not None else None\n val_mask = self._val_mask.toarray() if self._val_mask is not None else None\n test_data = self._test_data.toarray() if self._test_data is not None else None\n test_mask = self._test_mask.toarray() if self._test_mask is not None else None\n return Dataset(\n self._train_data.toarray(),\n self._train_mask.toarray(),\n val_data,\n val_mask,\n test_data,\n test_mask,\n self._variables,\n self._data_split,\n )" }, { "identifier": "InterventionData", "path": "src/causica/datasets/intervention_data.py", "snippet": "class InterventionData(NamedTuple):\n \"\"\"Class that acts as a container for observational (rank-1), interventional (rank-2) or counterfactual (rank-3) data.\n\n This data object can be serialized by converting to a dict, taking the form\n\n {\n \"intervention_idxs\": Optional[np.ndarray]\n \"intervention_values\": Optional[np.ndarray]\n \"test_data\": np.ndarray\n \"conditioning_idxs\": Optional[np.ndarray] = None\n \"conditioning_values\": Optional[np.ndarray] = None\n \"effect_idxs\": Optional[np.ndarray] = None\n \"intervention_reference\": Optional[np.ndarray] = None\n \"reference_data\": Optional[np.ndarray] = None\n },\n\n Args:\n conditioning_idxs: np.ndarray. 1d array containing the indices of each variable on which we condition on. For counterfactuals,\n all variables should be conditioned on.\n conditioning_values: np.ndarray. 1d array containing the values being assigned to the conditioned variables.\n effect_idxs: np.ndarray. 1d array containing the indices of each variable for which we want to evaluate the effect of the treatment.\n intervention_idxs: np.ndarray. 1d array containing the indices of each variable on which an intervention is made.\n intervention_values: np.ndarray. 1d array containing the values being assigned to the intervened variables.\n intervention_reference: np.ndarray 1d array containing reference values for interventions.\n test_data: np.ndarray. Samples from intervened distribution.\n reference_data: np.ndarray. Samples from intervened distribution with reference intervention.\n \"\"\"\n\n intervention_idxs: Optional[np.ndarray]\n intervention_values: Optional[np.ndarray]\n test_data: np.ndarray\n conditioning_idxs: Optional[np.ndarray] = None\n conditioning_values: Optional[np.ndarray] = None\n effect_idxs: Optional[np.ndarray] = None\n intervention_reference: Optional[np.ndarray] = None\n reference_data: Optional[np.ndarray] = None\n\n def to_dict(self):\n # When converting to dict, numpy arrays are converted to lists\n result = self._asdict()\n for k, v in result.items():\n if v is not None:\n result[k] = v.tolist()\n return result\n\n @classmethod\n def from_dict(cls, input_dict):\n type_converted_input = {k: np.atleast_1d(v) if v is not None else None for k, v in input_dict.items()}\n return cls(**type_converted_input)" }, { "identifier": "Variables", "path": "src/causica/datasets/variables.py", "snippet": "class Variables:\n \"\"\"\n This class represents any variables present in a model.\n \"\"\"\n\n def __init__(\n self,\n variables: List[Variable],\n auxiliary_variables: Optional[List[Variable]] = None,\n used_cols: Optional[List[int]] = None,\n ) -> None:\n \"\"\"\n Args:\n variables: A list Variable objects.\n auxiliary_variables: A list of Variable objects only used for input into VAE,\n not produced in output.\n These are assumed to be appended onto the end of the variables in the data.\n Defaults to None - no aux variables present.\n used_cols: A list of column ids that were used when processing the original data.\n \"\"\"\n if not auxiliary_variables:\n auxiliary_variables = []\n self.auxiliary_variables = auxiliary_variables\n self._variables = variables\n\n self._deduplicate_names()\n\n # Dictionary mapping from variable name to variable index.\n self.name_to_idx = {var.name: idx for idx, var in enumerate(self._variables)}\n\n # Lists containing query and target variable indices\n self.target_var_idxs = []\n self.not_target_var_idxs = []\n self.query_var_idxs = []\n self.not_query_var_idxs = []\n for idx, var in enumerate(self._variables):\n if var.query:\n self.query_var_idxs.append(idx)\n else:\n self.not_query_var_idxs.append(idx)\n if var.target:\n self.target_var_idxs.append(idx)\n else:\n self.not_target_var_idxs.append(idx)\n\n if len(self.target_var_idxs) > 0 and all(idx in self.query_var_idxs for idx in self.target_var_idxs):\n warnings.warn(\n \"All target variables are marked as queriable, it is likely that active learning will always \"\n \"select these variables first.\"\n )\n\n # Lists containing continuous (including text) and binary/categorical variable indices\n self.var_idxs_by_type: DefaultDict[str, List[int]] = defaultdict(list)\n for idx, var in enumerate(self._variables + self.auxiliary_variables):\n self.var_idxs_by_type[var.type_].append(idx)\n\n # List of lists, where self.unprocessed_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data.\n self.unprocessed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.unprocessed_non_aux_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data (non-auxiliary).\n self.unprocessed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_cols[i] gives the columns occupied by the ith variable in the processed\n # data.\n self.processed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.processed_dim\n self.processed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_non_aux_cols[i] gives the columns occupied by the ith variable in the processed\n # data (non-auxiliary).\n self.processed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.processed_dim\n self.processed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # Set of all query group names, maintaining order in which they are first encountered when iterating through\n # the variables list. This is the simplest way to do this since dictionaries are guaranteed to be\n # insertion-ordered since Python 3.7\n self.group_names = list(dict.fromkeys([var.group_name for var in self._variables]))\n\n # List containing indices for each query group, where the query group names are assumed to be in the same order\n # as self.group_names\n self.group_idxs = [\n [idx for idx, var in enumerate(self._variables) if var.group_name == group_name]\n for group_name in self.group_names\n ]\n\n # Remove groups containing no queriable variables from self.group_names and self.group_idxs, as\n # we can guarantee that we will never query these groups.\n is_group_queriable = [any(self._variables[idx].query for idx in idxs) for idxs in self.group_idxs]\n\n self.group_names = [name for group_idx, name in enumerate(self.group_names) if is_group_queriable[group_idx]]\n self.group_idxs = [idxs for group_idx, idxs in enumerate(self.group_idxs) if is_group_queriable[group_idx]]\n\n # Save the list of observed column ids\n default_used_cols = list(range(len(self._variables) + len(auxiliary_variables))) # All columns observed\n self.used_cols = used_cols if used_cols is not None else default_used_cols\n assert len(self.used_cols) == len(self._variables) + len(self.auxiliary_variables)\n\n self.col_id_to_var_index = {old: new for new, old in enumerate(self.used_cols)}\n\n def __repr__(self):\n return str(self._variables)\n\n def __iter__(self) -> Iterator[Variable]:\n \"\"\"\n Iterate through the variables within the container.\n Note - Now it iterate through all the variables within the container\n (including auxiliary variables, if they are present)\n \"\"\"\n for var in self._all_variables:\n yield var\n\n def __getitem__(self, idx):\n return (self._all_variables)[idx]\n\n def __len__(self) -> int:\n return len(self._variables) + len(self.auxiliary_variables)\n\n @classmethod\n def create_from_json(cls, path: str) -> Variables:\n return cls.create_from_dict(read_json_as(path, dict))\n\n @classmethod\n def create_from_dict(cls, variables_dict: Dict[str, List[Any]]) -> Variables:\n \"\"\"\n Create variables object from a dictionary\n \"\"\"\n variables = variables_dict[\"variables\"]\n for var in variables:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n var_obj_list = [Variable(**var) for var in variables]\n\n auxiliary_vars = variables_dict.get(\"auxiliary_variables\", [])\n if len(auxiliary_vars) == 0:\n auxiliary_vars_obj = None\n else:\n for var in auxiliary_vars:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n\n auxiliary_vars_obj = [Variable(**var) for var in auxiliary_vars]\n\n used_cols = variables_dict.get(\"used_cols\", None)\n\n return cls(var_obj_list, auxiliary_vars_obj, used_cols)\n\n @classmethod\n def create_from_data_and_dict(\n cls, data: np.ndarray, mask: np.ndarray, variables_dict: Optional[Dict[str, Any]] = None\n ) -> Variables:\n \"\"\"\n Create variables object from an input dictionary, inferring missing fields using `data` and `mask`.\n \"\"\"\n # Infer missing fields in variables_dict\n variables_dict = cls.infer_from_data(data, mask, variables_dict, True)\n variables = cls.create_from_dict(variables_dict)\n return variables\n\n @staticmethod\n def _metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n ) -> Tuple[List[Any], Union[List[Any], None]]:\n \"\"\"\n Infer variables_metadata from input data\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n variables_type: is it aux variables, or normal variables\n Returns:\n varaibles_metadata: inferred metadata from input data\n A list of column ids that were used when processing the original data.\n \"\"\"\n\n variables_metadata = []\n # Use None rather than {} as default since mutable default args are dangerous in Python.\n used_cols = variables_dict.get(\"used_cols\", None)\n if used_cols:\n used_cols = cast(List[int], used_cols)\n assert len(used_cols) == data.shape[1]\n\n for idx, variable_metadata in enumerate(variables_dict[variables_type]):\n if not all(\n k in variable_metadata for k in [\"name\", \"type\", \"lower\", \"upper\", \"query\", \"target\", \"always_observed\"]\n ):\n # If variable metadata fully specified, do not try to infer, as doing column indexing can be expensive\n # for CSR sparse matrices.\n var_data = data[:, idx]\n var_mask = mask[:, idx]\n if issparse(var_data):\n var_data = var_data.toarray()\n var_mask = var_mask.toarray()\n\n if \"name\" not in variable_metadata:\n if used_cols:\n variable_metadata[\"name\"] = str(used_cols[idx])\n else:\n variable_metadata[\"name\"] = f\"Column {idx}\"\n\n # If data type/min max/num categories specified explicitly, overwrite variables file\n if \"type\" not in variable_metadata:\n # Test if all unmasked elements are integers\n\n if np.all((var_data * var_mask) // 1 == var_data * var_mask):\n if (var_data * var_mask).max() <= 1:\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as binary. This can be '\n \"changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"binary\"\n else:\n # Note that we always infer integer values with a max value > 1 as categorical. This may want to be\n # reconsidered if support for ordinal variables is introduced at a later date.\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as categorical. This can be'\n \" changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"categorical\"\n else:\n variable_metadata[\"type\"] = \"continuous\"\n\n if \"lower\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_lower = 0\n else:\n inferred_lower = min(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"lower\"] = inferred_lower\n print(\n f'Minimum value of variable {variable_metadata[\"name\"]} inferred as {inferred_lower}. This'\n \" can be changed manually in the dataset's variables.json file\"\n )\n\n if \"upper\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_upper = 1\n else:\n inferred_upper = max(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"upper\"] = inferred_upper\n print(\n f'Max value of variable {variable_metadata[\"name\"]} inferred as {inferred_upper}. This can '\n \"be changed manually in the dataset's variables.json file\"\n )\n\n if \"query\" not in variable_metadata:\n # By default, assume all variables can be queried unless specified otherwise.\n if variables_type == \"auxiliary_variables\":\n variable_metadata[\"query\"] = False\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a non-queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n else:\n variable_metadata[\"query\"] = True\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n\n if \"target\" not in variable_metadata:\n # By default, assume variable is a target if and only if it is not queriable.\n variable_metadata[\"target\"] = not variable_metadata[\"query\"]\n fill_string = \"not \" if not variable_metadata[\"target\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an active learning target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"target\" field.'\n )\n\n if \"always_observed\" not in variable_metadata:\n # By default, assume variable is always observed if there is no missing in the mask.\n if np.sum((var_mask - 1) ** 2) == 0:\n variable_metadata[\"always_observed\"] = True\n else:\n variable_metadata[\"always_observed\"] = False\n fill_string = \"not \" if not variable_metadata[\"always_observed\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an always observed target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"always_observed\" field.'\n )\n\n variables_metadata.append(variable_metadata)\n\n return variables_metadata, used_cols\n\n @staticmethod\n def infer_from_data(data, mask, variables_dict=None, infer_aux_variables=False) -> Dict[str, List[Any]]:\n \"\"\"\n Infer missing values in an input variables dictionary, using the input data.\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n infer_aux_variables: infer auxiliary variables for GINA or not.\n Returns:\n variables_dict: Updated version of the input variables_dict, with missing variables and fields inferred from the\n data.\n \"\"\"\n\n if variables_dict is None:\n variables_dict = {}\n\n # NOTE this assumes all variables have only one column in unprocessed data, which should always be the case when\n # inferring from a dataset.\n if \"auxiliary_variables\" not in variables_dict:\n variables_dict[\"auxiliary_variables\"] = []\n\n if \"variables\" not in variables_dict or variables_dict[\"variables\"] == []:\n num_var_cols = data.shape[1] - len(variables_dict[\"auxiliary_variables\"])\n variables_dict[\"variables\"] = [{} for _ in range(num_var_cols)]\n\n variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": variables_dict[\"auxiliary_variables\"],\n \"used_cols\": used_cols,\n }\n if infer_aux_variables:\n aux_variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"auxiliary_variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": aux_variables_metadata,\n \"used_cols\": used_cols,\n }\n\n return variables_dict\n\n @property\n def _all_variables(self):\n return self._variables + self.auxiliary_variables\n\n @property\n def has_auxiliary(self) -> bool:\n \"\"\"\n True if there are aux variables present.\n \"\"\"\n return len(self.auxiliary_variables) > 0\n\n @property\n def binary_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all binary variables.\n \"\"\"\n return self.var_idxs_by_type[\"binary\"]\n\n @property\n def categorical_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all categorical variables.\n \"\"\"\n return self.var_idxs_by_type[\"categorical\"]\n\n @property\n def discrete_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all discrete (i.e. binary or categorical) variables. We sort to ensure that the\n combined list is in ascending order.\n \"\"\"\n return sorted(self.var_idxs_by_type[\"categorical\"] + self.var_idxs_by_type[\"binary\"])\n\n @property\n def continuous_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all continuous variables.\n \"\"\"\n return self.var_idxs_by_type[\"continuous\"]\n\n @property\n def text_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all text variables.\n \"\"\"\n return self.var_idxs_by_type[\"text\"]\n\n @property\n def non_text_idxs(self) -> List[bool]:\n \"\"\"Helper method. Returns list of booleans, where an element\n at index i indicates whether a variable at index i is non-text or not\n e.g. For Variables object of [...\"continous\"..., ...\"text\"..., \"continuous\"],\n the result would be [True, False, True]\n \"\"\"\n unproc_cols_by_type = self.unprocessed_cols_by_type\n if \"text\" not in unproc_cols_by_type:\n return [True for _ in range(len(self))]\n return (~np.in1d(range(len(self)), unproc_cols_by_type[\"text\"])).tolist()\n\n @property\n def num_unprocessed_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_cols)\n\n @property\n def num_unprocessed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_non_aux_cols)\n\n @property\n def num_processed_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_cols)\n\n @property\n def num_processed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_non_aux_cols)\n\n @property\n def num_groups(self) -> int:\n \"\"\"\n Return the number of unique query groups in the variables object.\n \"\"\"\n return len(self.group_names)\n\n @property\n def group_mask(self) -> np.ndarray:\n \"\"\"\n Return a mask of shape (num_groups, num_processed_cols) indicating which column\n corresponds to which group.\n \"\"\"\n mask = np.zeros((self.num_groups, self.num_processed_cols), dtype=bool)\n for group_idx, group in enumerate(self.group_idxs):\n for var in group:\n for proc_col in self.processed_cols[var]:\n mask[group_idx, proc_col] = 1\n return mask\n\n @property\n def proc_always_observed_list(self) -> List[Optional[bool]]:\n \"\"\"\n The mask that indicates if the variable is always observed (for processed data)\n \"\"\"\n return sum(([var.always_observed] * var.processed_dim for var in self._all_variables), [])\n\n @property\n def processed_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data associated with each variable of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._all_variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def processed_non_aux_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data (w/o aux variables) associated with each\n variable of that type.\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def unprocessed_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._all_variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n @property\n def unprocessed_non_aux_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n def subset(self, idxs: List[int], auxiliary_idxs: Optional[List[int]] = None) -> Variables:\n \"\"\"\n Returns a new Variables object containing only the Variable objects whose indices are given in `idxs`.\n Note that this currently ignores metadata variables.\n \"\"\"\n if auxiliary_idxs is None:\n auxiliary_idxs = []\n\n variables_list = [self._variables[idx] for idx in idxs]\n auxiliary_variables_list = [self.auxiliary_variables[idx] for idx in auxiliary_idxs]\n return Variables(variables_list, auxiliary_variables_list)\n\n def to_dict(self) -> Dict[str, Any]:\n variables_list = [var.to_json() for var in self._variables]\n if self.auxiliary_variables is None:\n auxiliary_vars_list = []\n else:\n auxiliary_vars_list = [var.to_json() for var in self.auxiliary_variables]\n\n variables_json = {\n \"variables\": variables_list,\n \"auxiliary_variables\": auxiliary_vars_list,\n \"used_cols\": [int(col) for col in self.used_cols],\n }\n return variables_json\n\n def save(self, path: str) -> None:\n variables_json = self.to_dict()\n save_json(variables_json, path)\n\n def as_list(self) -> List[Variable]:\n return self._variables\n\n def get_idxs_from_name_list(self, variable_names: List[Union[str, int]]) -> np.ndarray:\n \"\"\"\n Get a binary array of shape (variable_count,), where for each index the array value is 1 if the corresponding\n variable is named in `variable_names`, and 0 otherwise.\n \"\"\"\n variables_to_query = np.zeros((len(self._variables),))\n # Look up indices of specified variables and mark as queriable.\n for variable_name in variable_names:\n # Cast name to string in case numeric names (e.g. question ids) have been input as integers.\n variable_name = str(variable_name)\n variable_idx = self.name_to_idx[variable_name]\n variables_to_query[variable_idx] = 1\n\n return variables_to_query\n\n def get_observable_groups(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of indices for groups that are still observable in the current row\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n list of indices of groups that can be observed, where the indices correspond to the corresponding group\n names in `self.group_names`.\n \"\"\"\n observable_variables_idxs = self.get_observable_variable_idxs(data_mask_row, obs_mask_row)\n observable_groups_idxs: List[int] = []\n for group_idx, idxs in enumerate(self.group_idxs):\n if any(i in observable_variables_idxs for i in idxs):\n observable_groups_idxs.append(group_idx)\n return observable_groups_idxs\n\n def get_observable_variable_idxs(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of variable idxs for variables that are still observable in the current row.\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n observable_vars: List of indices of variables that can be observed.\n \"\"\"\n if data_mask_row.ndim != 1:\n raise ValueError(f\"Test mask should be 1D, had {data_mask_row.ndim} dims and shape {data_mask_row.shape}.\")\n if obs_mask_row.ndim != 1:\n raise ValueError(\n f\"Observation mask should be 1D, had {obs_mask_row.ndim} dims and shape {obs_mask_row.shape}.\"\n )\n if len(obs_mask_row) != len(data_mask_row) or len(data_mask_row) != len(self._variables):\n # One likely cause is accidentally passing 'processed' masks, which may be longer\n # if some variables are categorical.\n raise ValueError(\n f\"Lengths of obs_mask_row {len(obs_mask_row)}, data_mask_row {len(data_mask_row)}, \"\n f\"and variables list {len(self._variables)} should all be the same.\"\n )\n # Get ids where there is an underlying data value (test_mask == 1) and that we haven't yet queried (obs_mask == 0)\n unobserved_idxs = np.where((data_mask_row == 1) & (obs_mask_row == 0))[0]\n\n # Intersection of these and query_var_idxs.\n observable_idx_set = set(unobserved_idxs).intersection(set(self.query_var_idxs))\n return list(observable_idx_set)\n\n def get_var_cols_from_data(self, var_idx, data):\n \"\"\"\n Get data from an array for a single variable only.\n\n Args:\n var_idx: Index of variable we want data for.\n data (shape (batch_size, variable_count)): Array to get variable info from.\n\n Returns:\n var_data (shape (observed_count, processed_dim)): Values only for\n the corresponding variable.\n \"\"\"\n return data[:, self.processed_cols[var_idx]]\n\n def get_variables_to_observe(self, data_mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Return a boolean tensor of length num_variables, where each element indicates whether the corresponding variable\n can be queried during active learning (i.e. the variable is queriable and has at least one observed value in\n the data).\n Args:\n data_mask (shape (batch_size, num_processed_cols)): Processed mask\n\n Returns:\n torch.Tensor (shape (variable_count,)): True where it's a query-able variable and we have at least one\n observed value\n \"\"\"\n cols_with_data = data_mask.sum(dim=0).to(torch.bool)\n\n # data_mask may have multiple columns for a single variable, if it's a categorical variable. Pick first entry per variable\n ii = torch.tensor([cols[0] for cols in self.processed_cols], dtype=torch.long, device=cols_with_data.device)\n cols_with_data = torch.index_select(cols_with_data, 0, ii)\n is_query_id = torch.zeros(len(self), dtype=torch.bool, device=cols_with_data.device)\n is_query_id[\n tuple(self.query_var_idxs),\n ] = True\n return is_query_id * cols_with_data\n\n def _deduplicate_names(self):\n # Produce warning if var name is reused and add an increasing integer to the end until it is unique.\n var_names = set()\n for var in self._all_variables:\n i = 2\n original_name = var.name\n while var.name in var_names:\n new_name = f\"{original_name}_{i}\"\n var.name = new_name\n i += 1\n if var.name != original_name:\n # Do the warning in a separate block to the while loop so that we only raise one warning if we have to\n # try appending several different integers to the name.\n warnings.warn(\n f\"Name {original_name} has already been used, renaming to {var.name}\",\n UserWarning,\n )\n var_names.add(var.name)\n\n # TODO: Maybe create Variables.Utils for methods like the below one\n @staticmethod\n def create_empty_data(variables: Variables) -> np.ndarray:\n var_count = len(variables)\n empty_data = np.zeros((1, var_count), dtype=object)\n for i in range(var_count):\n if variables[i].type_ == \"text\":\n empty_data[:, i] = \"empty str\"\n return empty_data" }, { "identifier": "IdentityTransform", "path": "src/causica/preprocessing/transforms.py", "snippet": "class IdentityTransform(FunctionTransformer):\n \"\"\"Scikit-learn data transformation passing through any data without modification.\"\"\"\n\n def __init__(self):\n super().__init__(func=self.identity, inverse_func=self.identity)\n\n @staticmethod\n def identity(values: np.ndarray) -> np.ndarray:\n \"\"\"Return values without modification.\"\"\"\n return values" }, { "identifier": "UnitScaler", "path": "src/causica/preprocessing/transforms.py", "snippet": "class UnitScaler(FunctionTransformer):\n \"\"\"Scikit-learn data transformation for scaling (or squashing) data to the unit hypercube.\n\n The range of the data is determined by the provided variables.\n \"\"\"\n\n def __init__(self, variables: Iterable[Variable]):\n \"\"\"\n Args:\n variables: Iterable over the variables expected to be transformed\n provided in the same order as data columns.\n \"\"\"\n # Collect limits for the variables\n lower = []\n upper = []\n for variable in variables:\n lower.append(variable.lower)\n upper.append(variable.upper)\n\n if variable.lower == variable.upper:\n warnings.warn(\n f\"Variable with name '{variable.name}' has the same upper and lower values. Is this variable a constant?\"\n )\n\n self._lower = np.array(lower)\n self._range: np.ndarray = np.array(upper) - self._lower\n super().__init__(func=self.scale, inverse_func=self.unscale)\n\n def scale(self, values: np.ndarray) -> np.ndarray:\n \"\"\"Scale values into the hypercube using pre-determined variable ranges.\"\"\"\n return (values - self._lower) / self._range\n\n def unscale(self, scaled_values: np.ndarray) -> np.ndarray:\n \"\"\"Restore scaled values from the hypercube into the original range.\"\"\"\n return scaled_values * self._range + self._lower" } ]
import logging import warnings import numpy as np import torch from typing import Iterable, List, Optional, Tuple, TypeVar, Union from scipy import sparse from scipy.sparse import csr_matrix, issparse from sklearn.exceptions import NotFittedError from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.utils.validation import check_is_fitted from tqdm import tqdm from ..datasets.dataset import CausalDataset, Dataset, SparseDataset from ..datasets.intervention_data import InterventionData from ..datasets.variables import Variables from .transforms import IdentityTransform, UnitScaler
12,549
self._txt_unproc_cols, self._txt_proc_cols = [], [] self._num_processed_cols = sum(var.processed_dim for var in self._variables) def process_data_and_masks( self, data: csr_matrix, data_mask: csr_matrix, *extra_masks: csr_matrix, batch_size: int = 1000, ) -> Tuple[csr_matrix, ...]: """ Process and validate data, data mask and optionally any number of additional masks. These masks will all be applied to the data when performing data range validation, in case of e.g. dummy zero data that is masked out by an additional obs_mask. Args: data: Unprocessed data array data_mask: Data indicating which values in `data` are observed. Can be any dtype provided all values are either 0 or 1. extra_masks: Additional masks to be processed, if any. Can be any dtype provided all values are either 0 or 1. batch_size: Batch size used during data preprocessing for sparse matrices. Returns: processed_data: Data with categorical variables expanded to a one-hot encoding, and features normalised. processed_data_mask: Boolean mask with categorical variables expanded to a one-hot encoding. processed_extra_masks: Any additional boolean masks with categorical variables expanded to a one-hot encoding. """ if not issparse(data): ( proc_data, proc_data_mask, *proc_extra_masks, ) = self._process_and_check_dense(data, data_mask, *extra_masks) else: # Break sparse data into smaller batches and preprocess each as a dense array. Somewhat inefficient but # allows us to reuse our preprocessing functions and keeps memory usage manageable. proc_data_list: List[csr_matrix] = [] proc_data_mask_list: List[csr_matrix] = [] proc_extra_masks_lists: Tuple[List[csr_matrix], ...] = tuple([] for mask in extra_masks) num_rows = data.shape[0] for start_idx in tqdm(range(0, num_rows, batch_size), desc="Data preprocessing"): stop_idx = min(start_idx + batch_size, num_rows) data_batch = data[start_idx:stop_idx].toarray() data_mask_batch = data_mask[start_idx:stop_idx].toarray() extra_masks_batch = tuple(mask[start_idx:stop_idx].toarray() for mask in extra_masks) # TODO: we will currently lose sparsity for rescaled continuous data here, since 0 will be mapped to # another value. We could multiply by the mask to zero out unobserved data but we need to make sure this # doesn't have any unintended consequences for cases with more complex masking, e.g. active learning ( proc_data_batch, proc_data_mask_batch, *proc_extra_masks_batch, ) = self._process_and_check_dense(data_batch, data_mask_batch, *extra_masks_batch) proc_data_list.append(csr_matrix(proc_data_batch)) proc_data_mask_list.append(csr_matrix(proc_data_mask_batch)) for mask_list, mask in zip(proc_extra_masks_lists, proc_extra_masks_batch): mask_list.append(csr_matrix(mask)) proc_data = sparse.vstack(proc_data_list, format="csr") proc_data_mask = sparse.vstack(proc_data_mask_list, format="csr") proc_extra_masks = tuple( sparse.vstack(proc_mask_list, format="csr") for proc_mask_list in proc_extra_masks_lists ) return (proc_data, proc_data_mask, *proc_extra_masks) def _process_and_check_dense(self, data: np.ndarray, data_mask: np.ndarray, *extra_masks: np.ndarray): """ Check validity of dense data and masks and process them. """ combined_mask = data_mask for mask in extra_masks: combined_mask = combined_mask * mask self.check_data(data, combined_mask) self.check_mask(data_mask) for mask in extra_masks: self.check_mask(mask) proc_data = self.process_data(data) proc_data_mask = self.process_mask(data_mask) proc_extra_masks = tuple(self.process_mask(mask) for mask in extra_masks) return (proc_data, proc_data_mask, *proc_extra_masks) def process_intervention_data( self, intervention_data: Union[InterventionData, Iterable[InterventionData]] ) -> List[InterventionData]: """Preprocesses data in the InterventionData format and returns a list of processed InterventionData objects. Args: intervention_data (Union[InterventionData, Iterable[InterventionData]]): InterventionData object or list of InterventionData objects to be processed. Returns: List[InterventionData]: List of processed InterventionData objects. """ if isinstance(intervention_data, InterventionData): intervention_data = [intervention_data] proc_intervention = [ InterventionData( i.intervention_idxs, self.process_data_subset_by_group(i.intervention_values, i.intervention_idxs), self.process_data(i.test_data), i.conditioning_idxs, self.process_data_subset_by_group(i.conditioning_values, i.conditioning_idxs), i.effect_idxs, self.process_data_subset_by_group(i.intervention_reference, i.intervention_idxs), self.process_data(i.reference_data) if i.reference_data is not None else None, ) for i in intervention_data ] return proc_intervention def process_dataset(
EPSILON = 1e-5 logger = logging.getLogger(__name__) V = TypeVar("V", np.ndarray, torch.Tensor) # pylint: disable=protected-access class DataProcessor: def __init__( self, variables: Variables, unit_scale_continuous: bool = True, standardize_data_mean: bool = False, standardize_data_std: bool = False, ): """ Args: variables (Variables): Information about variables/features used by this model. unit_scale_continuous (bool): Scale continuous variables to the range of [0, 1]. standardize_data_mean (bool): Standardize continuous variables to mean=0 standardize_data_std (bool): Standardize continuous variables to std=1 """ if unit_scale_continuous and (standardize_data_mean or standardize_data_std): raise ValueError("Cannot unit scale and standardize variables simultanously.") self._variables = variables # Call unprocessed columns unproc_cols, processed columns proc_cols unproc_cols_by_type = self._variables.unprocessed_cols_by_type proc_cols_by_type = self._variables.processed_cols_by_type def flatten(lists): # Flatten proc_cols for continuous and binary unproc_cols, since they will be of form [[1], [2], ...] return [i for sublist in lists for i in sublist] if "binary" in unproc_cols_by_type: self._bin_unproc_cols = unproc_cols_by_type["binary"] self._bin_proc_cols = flatten(proc_cols_by_type["binary"]) # Save contiguous regions containig binary features to allow for more efficient processing via slicing self._bin_unproc_regions = self.split_contiguous_sublists(self._bin_unproc_cols) self._bin_proc_regions = self.split_contiguous_sublists(self._bin_proc_cols) assert len(self._bin_unproc_regions) == len(self._bin_proc_regions) else: self._bin_unproc_cols, self._bin_proc_cols = [], [] if "continuous" in unproc_cols_by_type: self._cts_unproc_cols = unproc_cols_by_type["continuous"] self._cts_proc_cols = flatten(proc_cols_by_type["continuous"]) # Save contiguous regions containing continuous features to allow for more efficient processing via slicing if all(x.overwrite_processed_dim is None for x in self._variables): self._cts_unproc_regions = self.split_contiguous_sublists(self._cts_unproc_cols) self._cts_proc_regions = self.split_contiguous_sublists(self._cts_proc_cols) else: # For VAEM, we can only take single variable as region # to allow for processing/reverting mask self._cts_unproc_regions = [[col_id] for col_id in unproc_cols_by_type["continuous"]] self._cts_proc_regions = proc_cols_by_type["continuous"] assert len(self._cts_unproc_regions) == len(self._cts_proc_regions) if unit_scale_continuous: self._cts_normalizers = [ UnitScaler(variables[i] for i in unproc_region) for unproc_region in self._cts_unproc_regions ] elif standardize_data_mean or standardize_data_std: self._cts_normalizers = [ StandardScaler(with_mean=standardize_data_mean, with_std=standardize_data_std) for _ in self._cts_unproc_regions ] else: self._cts_normalizers = [IdentityTransform()] * len(self._cts_unproc_regions) else: self._cts_unproc_cols, self._cts_proc_cols, self._cts_normalizers = [], [], [] if "categorical" in unproc_cols_by_type: self._cat_unproc_cols = unproc_cols_by_type["categorical"] self._cat_proc_cols = flatten(proc_cols_by_type["categorical"]) self._cat_proc_cols_grouped = proc_cols_by_type["categorical"] def get_lower(idx): return self._variables[idx].lower def get_upper(idx): return self._variables[idx].upper var_categories = [ np.arange(int(get_lower(var_idx)), int(get_upper(var_idx)) + 1) for var_idx in self._cat_unproc_cols ] self._one_hot_encoder = OneHotEncoder(categories=var_categories, sparse=False, handle_unknown="ignore") # Fit on dummy data due to an issue in sklearn where the encoder needs to be fitted to data even if the # categories are specified upon creation. self._one_hot_encoder.fit(np.array([categories[0] for categories in var_categories]).reshape(1, -1)) else: self._cat_unproc_cols, self._cat_proc_cols = [], [] self._txt_unproc_cols, self._txt_proc_cols = [], [] self._num_processed_cols = sum(var.processed_dim for var in self._variables) def process_data_and_masks( self, data: csr_matrix, data_mask: csr_matrix, *extra_masks: csr_matrix, batch_size: int = 1000, ) -> Tuple[csr_matrix, ...]: """ Process and validate data, data mask and optionally any number of additional masks. These masks will all be applied to the data when performing data range validation, in case of e.g. dummy zero data that is masked out by an additional obs_mask. Args: data: Unprocessed data array data_mask: Data indicating which values in `data` are observed. Can be any dtype provided all values are either 0 or 1. extra_masks: Additional masks to be processed, if any. Can be any dtype provided all values are either 0 or 1. batch_size: Batch size used during data preprocessing for sparse matrices. Returns: processed_data: Data with categorical variables expanded to a one-hot encoding, and features normalised. processed_data_mask: Boolean mask with categorical variables expanded to a one-hot encoding. processed_extra_masks: Any additional boolean masks with categorical variables expanded to a one-hot encoding. """ if not issparse(data): ( proc_data, proc_data_mask, *proc_extra_masks, ) = self._process_and_check_dense(data, data_mask, *extra_masks) else: # Break sparse data into smaller batches and preprocess each as a dense array. Somewhat inefficient but # allows us to reuse our preprocessing functions and keeps memory usage manageable. proc_data_list: List[csr_matrix] = [] proc_data_mask_list: List[csr_matrix] = [] proc_extra_masks_lists: Tuple[List[csr_matrix], ...] = tuple([] for mask in extra_masks) num_rows = data.shape[0] for start_idx in tqdm(range(0, num_rows, batch_size), desc="Data preprocessing"): stop_idx = min(start_idx + batch_size, num_rows) data_batch = data[start_idx:stop_idx].toarray() data_mask_batch = data_mask[start_idx:stop_idx].toarray() extra_masks_batch = tuple(mask[start_idx:stop_idx].toarray() for mask in extra_masks) # TODO: we will currently lose sparsity for rescaled continuous data here, since 0 will be mapped to # another value. We could multiply by the mask to zero out unobserved data but we need to make sure this # doesn't have any unintended consequences for cases with more complex masking, e.g. active learning ( proc_data_batch, proc_data_mask_batch, *proc_extra_masks_batch, ) = self._process_and_check_dense(data_batch, data_mask_batch, *extra_masks_batch) proc_data_list.append(csr_matrix(proc_data_batch)) proc_data_mask_list.append(csr_matrix(proc_data_mask_batch)) for mask_list, mask in zip(proc_extra_masks_lists, proc_extra_masks_batch): mask_list.append(csr_matrix(mask)) proc_data = sparse.vstack(proc_data_list, format="csr") proc_data_mask = sparse.vstack(proc_data_mask_list, format="csr") proc_extra_masks = tuple( sparse.vstack(proc_mask_list, format="csr") for proc_mask_list in proc_extra_masks_lists ) return (proc_data, proc_data_mask, *proc_extra_masks) def _process_and_check_dense(self, data: np.ndarray, data_mask: np.ndarray, *extra_masks: np.ndarray): """ Check validity of dense data and masks and process them. """ combined_mask = data_mask for mask in extra_masks: combined_mask = combined_mask * mask self.check_data(data, combined_mask) self.check_mask(data_mask) for mask in extra_masks: self.check_mask(mask) proc_data = self.process_data(data) proc_data_mask = self.process_mask(data_mask) proc_extra_masks = tuple(self.process_mask(mask) for mask in extra_masks) return (proc_data, proc_data_mask, *proc_extra_masks) def process_intervention_data( self, intervention_data: Union[InterventionData, Iterable[InterventionData]] ) -> List[InterventionData]: """Preprocesses data in the InterventionData format and returns a list of processed InterventionData objects. Args: intervention_data (Union[InterventionData, Iterable[InterventionData]]): InterventionData object or list of InterventionData objects to be processed. Returns: List[InterventionData]: List of processed InterventionData objects. """ if isinstance(intervention_data, InterventionData): intervention_data = [intervention_data] proc_intervention = [ InterventionData( i.intervention_idxs, self.process_data_subset_by_group(i.intervention_values, i.intervention_idxs), self.process_data(i.test_data), i.conditioning_idxs, self.process_data_subset_by_group(i.conditioning_values, i.conditioning_idxs), i.effect_idxs, self.process_data_subset_by_group(i.intervention_reference, i.intervention_idxs), self.process_data(i.reference_data) if i.reference_data is not None else None, ) for i in intervention_data ] return proc_intervention def process_dataset(
self, dataset: Union[Dataset, CausalDataset, SparseDataset]
1
2023-11-21 12:55:08+00:00
16k
ChenyangGao/python-epub3
epub3/epub.py
[ { "identifier": "File", "path": "epub3/util/file.py", "snippet": "class File:\n __slots__ = (\"path\", \"fs\", \"open\", \"open_modes\", \"_getattr\")\n ALL_MODES = frozenset(\"rwxab+\")\n\n def __init__(\n self, \n /, \n path=None, \n fs=None, \n open_modes=None, \n ):\n super().__setattr__(\"path\", path)\n super().__setattr__(\"fs\", fs)\n self._init_open(path, fs, open_modes)\n\n def __init_subclass__(cls, /, **kwargs):\n raise TypeError(\"subclassing is not allowed\")\n\n def __repr__(self, /) -> str:\n cls = type(self)\n module = cls.__module__\n name = cls.__qualname__\n if module != \"__main__\":\n name = module + \".\" + name\n return \"%s(%s)\" % (name, \", \".join(\"%s=%r\" % (k, getattr(self, k)) for k in cls.__slots__))\n\n def __delattr__(self, attr):\n raise TypeError(\"can't delete any attributes\")\n\n def __getattr__(self, attr, /):\n try:\n return self._getattr(attr)\n except Exception as e:\n raise AttributeError(attr) from e\n\n def __setattr__(self, attr, value, /):\n raise TypeError(\"can't set any attributes\")\n\n def _init_open(self, path, fs, open_modes, /):\n cls = type(self)\n code, file_open = cls._get_open(fs)\n use_io_open = file_open is io.open\n if file_open is None:\n if isinstance(path, Path):\n file_open = path.open\n use_io_open = True\n code = 0\n else:\n code, file_open = cls._get_open(path)\n if file_open is None:\n if not isinstance(path, (bytes, str, PathLike)):\n raise TypeError(\"unable to determine how to open the file\")\n file_open = partial(io.open, path)\n use_io_open = True\n if code < 0:\n code = 0\n use_fs = False\n else:\n file_open = partial(file_open, path)\n use_fs = True\n if code == 0:\n def _getattr0(attr):\n try:\n return getattr(os, attr)\n except AttributeError:\n try:\n return getattr(ospath, attr)\n except AttributeError:\n return getattr(shutil, attr)\n elif code == 1:\n _getattr0 = partial(getattr, fs if use_fs else path)\n elif code == 2:\n _getattr0 = (fs if use_fs else path).__getitem__\n if use_fs:\n def _getattr(attr, default=undefined, /):\n try:\n val = _getattr0(attr)\n except (LookupError, AttributeError):\n if default is undefined:\n raise\n return default\n if not callable(val):\n return val\n if isclass(val) or isinstance(val, staticmethod):\n return val\n return partial(val, path)\n else:\n def _getattr(attr, default=undefined, /):\n try:\n return _getattr0(attr)\n except (LookupError, AttributeError):\n if default is undefined:\n raise\n return default\n default_open_modes = _getattr(\"open_modes\", None)\n if default_open_modes is not None:\n open_modes = default_open_modes\n super().__setattr__(\"_getattr\", _getattr)\n open_keywords = cls._open_keywords(file_open)\n if \"mode\" not in open_keywords or open_modes == \"\":\n open_modes = frozenset()\n elif open_modes is None:\n open_modes = type(self).ALL_MODES\n elif use_io_open:\n open_modes = frozenset(open_modes) & type(self).ALL_MODES | frozenset(\"rb\")\n else:\n open_modes = frozenset(open_modes) & type(self).ALL_MODES | frozenset(\"r\")\n super().__setattr__(\"open_modes\", open_modes)\n amode = frozenset(\"rwxa+\")\n def open(\n mode=\"r\", \n buffering=-1, \n encoding=None, \n errors=None, \n newline=None, \n **kwargs, \n ):\n if mode not in OPEN_MODES:\n raise ValueError(f\"invalid open mode: {mode!r}\")\n binary_mode = \"b\" in mode\n if mode == \"r\":\n pass\n elif not open_modes:\n if \"r\" not in mode or \"+\" in mode:\n raise ValueError(f\"open mode unsupported: {mode!r}\")\n mode = \"r\"\n else:\n if open_modes:\n if amode & set(mode) - open_modes:\n raise ValueError(f\"open mode unsupported: {mode!r}\")\n mode = next(m for m in \"rwax\" if m in mode) + \"+\"[:\"+\" in mode]\n if open_modes:\n if \"b\" in open_modes:\n mode += \"b\"\n if open_keywords is not CONTAINS_ALL:\n kwargs = {k: v for k, v in kwargs.items() if k in open_keywords}\n if open_modes:\n kwargs[\"mode\"] = mode\n if \"buffering\" in open_keywords:\n kwargs[\"buffering\"] = buffering\n file = file_open(**kwargs)\n else:\n file = file_open(**kwargs)\n if binary_mode and buffering == 0:\n return file\n bufsize = buffering if buffering > 1 else DEFAULT_BUFFER_SIZE\n if \"+\" in mode:\n file = BufferedRandom(file, bufsize)\n elif \"r\" in mode:\n file = BufferedReader(file, bufsize)\n else:\n file = BufferedWriter(file, bufsize)\n if binary_mode:\n return file\n return TextIOWrapper(\n file, \n encoding=encoding, \n errors=errors, \n newline=newline, \n line_buffering=buffering==1, \n )\n super().__setattr__(\"open\", open)\n\n @staticmethod\n def _get_open(f, /):\n if f is None:\n return 0, None\n if callable(open := getattr(f, \"open\", None)):\n return 1, open\n try:\n if callable(open := f[\"open\"]):\n return 2, open\n except (TypeError, LookupError):\n if callable(f):\n return 3, f\n return -1, None\n\n @staticmethod\n def _open_keywords(open, /):\n params = signature(open).parameters\n if params:\n names = []\n for name, param in reversed(params.items()):\n if param.kind not in (POSITIONAL_OR_KEYWORD, KEYWORD_ONLY):\n break\n names.append(name)\n if param.kind is VAR_KEYWORD:\n return CONTAINS_ALL\n return frozenset(names)\n return frozenset()\n\n def check_open_mode(self, mode=\"r\", /):\n if mode not in OPEN_MODES:\n return False\n if mode == \"r\":\n return True\n open_modes = self.open_modes\n if not open_modes:\n if \"r\" not in mode or \"+\" in mode:\n return False\n else:\n if open_modes and frozenset(\"rwxa+\") & set(mode) - open_modes:\n return False\n return True" }, { "identifier": "RootFS", "path": "epub3/util/file.py", "snippet": "class RootFS:\n\n def __init__(self, root=None, /, joinpath=None):\n none_root = root is None\n if not none_root and callable(open := getattr(root, \"open\", None)):\n _getattr = partial(getattr, root)\n elif not none_root and callable(open := root[\"open\"]):\n _getattr = root.__getitem__\n elif none_root or isinstance(root, (bytes, str, PathLike)):\n self._fs = None\n if root is None:\n self._root = os.getcwd()\n else:\n self._root = ospath.realpath(root)\n if ospath.isfile(root):\n raise NotADirectoryError(errno.ENOTDIR, root)\n self._joinpath = ospath.join\n self._open = io.open\n return\n if joinpath is None:\n joinpath = get_any_callable(_getattr, \"joinpath\", \"join\") or posixpath.join\n self._fs = root\n self._root = \"\"\n self._getattr = _getattr\n self._joinpath = joinpath\n self._open = open\n\n def __repr__(self, /):\n return f\"<{type(self).__qualname__}({self._root!r}) at {hex(id(self))}>\"\n\n def _getattr(self, attr, /):\n try:\n return getattr(os, attr)\n except AttributeError:\n try:\n return getattr(ospath, attr)\n except AttributeError:\n return getattr(shutil, attr)\n\n def __getattr__(self, attr, /):\n try:\n val = self._getattr(attr)\n except (AttributeError, LookupError) as e:\n raise AttributeError(attr) from e\n if not callable(val):\n return val\n if isclass(val) or isinstance(val, staticmethod):\n return val\n def wrapper(name, /, *args, **kwargs):\n return val(self.joinpath(name), *args, **kwargs)\n return update_wrapper(wrapper, val)\n\n @property\n def name(self, /):\n return self._root\n\n @property\n def root(self, /):\n return self._root\n\n def joinpath(self, /, *paths):\n return self._joinpath(self._root, *paths)\n\n def open(\n self, \n name, \n /, \n mode='r', \n buffering=-1, \n encoding=None, \n errors=None, \n newline=None, \n ):\n return self._open(\n self.joinpath(name), \n mode=mode, \n buffering=buffering, \n encoding=encoding, \n errors=errors, \n newline=newline, \n )" }, { "identifier": "TemporaryFS", "path": "epub3/util/file.py", "snippet": "class TemporaryFS(RootFS):\n\n def __init__(self, root=None, /, joinpath=None):\n none_root = root is None\n if not none_root and callable(open := getattr(root, \"open\", None)):\n _getattr = partial(getattr, root)\n elif not none_root and callable(open := root[\"open\"]):\n _getattr = root.__getitem__\n elif none_root or isinstance(root, (bytes, str, PathLike)):\n self._fs = None\n temdir = TemporaryDirectory(dir=root)\n self._root = temdir.name\n self._joinpath = ospath.join\n self._open = io.open\n self._cleanup = temdir.cleanup\n return\n else:\n raise TypeError(f\"can't get `open` method from: {fs!r}\")\n if joinpath is None:\n joinpath = get_any_callable(_getattr, \"joinpath\", \"join\") or posixpath.join\n self._fs = root\n self._root = root = \"\"\n self._getattr = _getattr\n self._joinpath = joinpath\n self.open = open\n remove = get_any_callable(_getattr, \"remove\", \"rm\")\n if remove is None:\n warn(f\"can't get `remove` and `rm` methods from: {fs!r}\")\n self.remove = lambda *args, **kwargs: None\n self._cleanup = lambda: None\n return\n self.remove = remove\n mkdir = get_any_callable(_getattr, \"mkdir\", \"makedir\")\n if mkdir is not None:\n name = str(uuid4())\n try:\n mkdir(name)\n except:\n warn(f\"can't make temporary directory: {name!r} on {fs!r}\")\n else:\n self._root = root = name\n if root:\n rmtree = get_any_callable(_getattr, \"rmtree\", \"removetree\")\n if rmtree is not None:\n def _open(path, *args, **kwargs):\n return open(joinpath(root, path), *args, **kwargs)\n self.open = update_wrapper(_open, open)\n def _remove(path):\n remove(joinpath(root, path))\n self.remove = update_wrapper(_remove, remove)\n self._cleanup = lambda: rmtree(root)\n return\n created = set()\n def _open(path, mode=\"r\", **kwargs):\n path = joinpath(root, path)\n file = open(path, mode=mode, **kwargs)\n if \"r\" not in mode:\n created.add(path)\n return file\n self.open = update_wrapper(_open, open)\n def _remove(path):\n path = joinpath(root, path)\n remove(path)\n created.discard(path)\n self.remove = update_wrapper(_remove, remove)\n rmdir = get_any_callable(_getattr, \"rmdir\", \"removedir\")\n def _cleanup():\n for path in tuple(created):\n try:\n remove(path)\n except:\n pass\n if root and rmdir is not None:\n try:\n rmdir(root)\n except:\n pass\n self._cleanup = _cleanup\n\n def __repr__(self, /):\n return f\"<{type(self).__qualname__}({self._fs!r}) {self._root!r} at {hex(id(self))}>\"\n\n def __del__(self, /):\n self.cleanup()\n\n def __enter__(self, /):\n return self\n\n def __exit__(self, exc, value, tb, /):\n self.cleanup()\n\n def cleanup(self, /):\n try:\n self._cleanup()\n except:\n pass" }, { "identifier": "OPEN_MODES", "path": "epub3/util/file.py", "snippet": "OPEN_MODES = frozenset(\n \"\".join(t1) \n for t0 in product(\"rwax\", (\"\", \"b\", \"t\"), (\"\", \"+\")) \n for t1 in permutations(t0, 3)\n)" }, { "identifier": "guess_media_type", "path": "epub3/util/helper.py", "snippet": "def guess_media_type(name: str, /, default: str = \"application/octet-stream\") -> str:\n return guess_type(name)[0] or default" }, { "identifier": "values", "path": "epub3/util/helper.py", "snippet": "def values(m, /):\n if isinstance(m, Mapping):\n try:\n return m.values()\n except Exception:\n return ValuesView(m)\n return m" }, { "identifier": "items", "path": "epub3/util/helper.py", "snippet": "def items(m, /):\n if isinstance(m, Mapping):\n try:\n return m.items()\n except Exception:\n return ItemsView(m)\n return m" }, { "identifier": "sup", "path": "epub3/util/helper.py", "snippet": "def sup(exists, x=1):\n \"\"\"Find the smallest available integer greater than or equal to `x`.\n\n :param exists: Determine if the value exists (unavailable), return True if it does.\n :param x: Start value.\n\n :return: The smallest integer greater than or equal to the initial value \n x for which calling exists returns False.\n \"\"\"\n δ = 1\n while exists(x):\n x += δ\n δ <<= 1\n if δ <= 2:\n return x\n δ >>= 2\n x -= δ\n while δ > 1:\n δ >>= 1\n if exists(x):\n x += δ\n else:\n x -= δ\n return x + exists(x)" }, { "identifier": "proxy_property", "path": "epub3/util/proxy.py", "snippet": "@overload\ndef proxy_property(fget: None, /, key: Optional[str] = \"\") -> Callable[[Callable], property]: ..." }, { "identifier": "ElementAttribProxy", "path": "epub3/util/proxy.py", "snippet": "class ElementAttribProxy(metaclass=CachedMeta):\n __const_keys__: tuple[str, ...] = ()\n __protected_keys__: tuple[str, ...] = ()\n __cache_check_key__ = lambda obj: isinstance(obj, Element)\n __cache_cls__ = WeakKeyDictionary if USE_BUILTIN_XML else WeakValueDictionary\n __wrap_class__: \"type[ElementAttribProxy]\"\n\n def __init__(self, root, /):\n self._root = root\n self._attrib = root.attrib\n if USE_BUILTIN_XML:\n self._nsmap = nsmap = {}\n else:\n self._nsmap = nsmap = root.nsmap\n if self.__const_keys__:\n self.__const_keys__ = frozenset(\n resolve_prefix(key, nsmap, NAMESPACES) for key in type(self).__const_keys__\n )\n if self.__protected_keys__:\n self.__protected_keys__ = frozenset(\n resolve_prefix(key, nsmap, NAMESPACES) for key in type(self).__protected_keys__\n )\n\n def __init_subclass__(\n cls, \n /, \n get_key=None, \n check_key=None, \n get_state=None, \n set_state=None, \n **kwargs, \n ):\n if callable(get_key):\n self.__cache_get_key__ = get_key\n if isclass(check_key) and issubclass(check_key, object) or type(check_key) is tuple:\n self.__cache_check_key__ = lambda obj, _t: isinstance(obj, _t)\n elif type(check_key) in (set, frozenset):\n self.__cache_check_key__ = check_key.__contains__\n elif callable(check_key):\n self.__cache_check_key__ = check_key\n if callable(get_state):\n self.__cache_get_state__ = get_state\n if callable(set_state):\n self.__cache_set_state__ = set_state\n namespaces = cls.__dict__\n const_keys = namespaces.get(\"__const_keys__\")\n if const_keys:\n for key in const_keys:\n stripped_key = strip_key(key)\n if stripped_key not in namespaces:\n setattr(cls, stripped_key, auto_property(key))\n protected_keys = namespaces.get(\"__protected_keys__\")\n if protected_keys:\n for key in protected_keys:\n stripped_key = strip_key(key)\n if stripped_key not in namespaces:\n setattr(cls, stripped_key, auto_property(key, setable=True))\n optional_keys = namespaces.get(\"__optional_keys__\")\n if optional_keys:\n for key in optional_keys:\n stripped_key = strip_key(key)\n if stripped_key not in namespaces:\n setattr(cls, stripped_key, auto_property(key, setable=True, delable=True))\n if \"__wrap_class__\" not in namespaces:\n for base_cls in cls.__mro__:\n if \"__wrap_class__\" in base_cls.__dict__:\n cls.__wrap_class__ = base_cls.__wrap_class__\n break\n elif cls.__dict__.get(\"__is_wrap_class__\"):\n cls.__wrap_class__ = base_cls\n break\n\n def __contains__(self, key, /):\n if not isinstance(key, str) or not key:\n return False\n return resolve_prefix(key, self._nsmap, NAMESPACES) in self._attrib\n\n def __delitem__(self, key, /):\n if isinstance(key, (int, slice)):\n del self._root[key]\n elif isinstance(key, str):\n if not key:\n raise ValueError(\"empty key not allowed\")\n if key in self.__const_keys__ or key in self.__protected_keys__:\n raise LookupError(f\"not allowed to delete key: {key}\")\n del self._attrib[key]\n else:\n raise TypeError(\"only accept `key` type: int, slice and str\")\n return self\n\n def __eq__(self, other, /):\n if type(self) is not type(other):\n return NotImplemented\n return self._root is other._root\n\n def __getitem__(self, key, /):\n if isinstance(key, str):\n if not key:\n raise ValueError(\"empty key not allowed\")\n return self._attrib[resolve_prefix(key, self._nsmap, NAMESPACES)]\n elif isinstance(key, (int, slice)):\n if isinstance(key, int):\n return type(self).wrap(self._root[key])\n return list(map(type(self).wrap, self._root[key]))\n else:\n raise TypeError(\"only accept `key` type: int, slice and str\")\n\n def __hash__(self, /):\n return hash(self._root)\n\n @PyLinq.streamify\n def __iter__(self, /):\n return iter(self._attrib)\n\n def __len__(self, /):\n return len(self._attrib)\n\n def __setitem__(self, key, value, /):\n if not isinstance(key, str):\n raise TypeError(\"only accept `key` type: `str`\")\n if not key:\n raise ValueError(\"empty key not allowed\")\n if value is None:\n self.pop(key, None)\n else:\n if key in self.__const_keys__:\n raise LookupError(f\"not allowed to set key: {key!r}\")\n self._attrib[key] = str(value)\n return self\n\n def __repr__(self, /):\n attrib = self._attrib\n attrib = f\", {attrib=!r}\" if attrib else \"\"\n return f\"<{type(self).__qualname__}(<{self._root.tag}>{attrib}) at {hex(id(self))}>\"\n\n @classmethod\n def wrap(cls, root, /):\n wrap_class_map = cls.__dict__.get(\"__wrap_class_map__\")\n if not wrap_class_map:\n return cls.__wrap_class__(root)\n for pred, wrap_class in wrap_class_map.items():\n if isinstance(pred, str):\n if pred.startswith(\"{*}\"):\n if pred[3:] == root.tag or root.tag.endswith(pred[2:]):\n return wrap_class(root)\n elif pred.startswith(\"{}\"):\n if pred[2:] == root.tag:\n return wrap_class(root)\n elif pred.endswith(\":*\"):\n if root.tag.startswith(pred[:-1]) or root.tag.startswith(resolve_prefix(pred[:-1], NAMESPACES)):\n return wrap_class(root)\n elif root.tag == pred or root.tag == resolve_prefix(pred, NAMESPACES):\n return wrap_class(root)\n elif isinstance(pred, Pattern):\n if pred.search(root.tag) is not None:\n return wrap_class(root)\n elif isinstance(pred, Container):\n if root.tag in pred:\n return wrap_class(root)\n elif callable(pred):\n if pred(root):\n return wrap_class(root)\n return cls.__wrap_class__(root)\n\n def getproxy(self, key, /):\n if not key:\n return\n key = resolve_prefix(key, self._nsmap, NAMESPACES)\n namespaces = type(self).__dict__\n const_keys = namespaces.get(\"__const_keys__\")\n protected_keys = namespaces.get(\"__protected_keys__\")\n setable = not (const_keys and key in const_keys)\n delable = setable and not (protected_keys and key in protected_keys)\n return auto_property(key, setable=setable, delable=delable).fget(self)\n\n @cached_property\n def attrib(self, /):\n return AttrInfoProxy(self)\n\n @property\n def nsmap(self, /):\n return self._nsmap\n\n @cached_property\n def info(self, /):\n return MappingProxyType({\"attrib\": self.attrib})\n\n @property\n def proxy(self, /):\n return self\n\n @PyLinq.streamify\n def iter(self, /):\n return map(type(self).wrap, self._root.iterfind(\"*\"))\n\n def list(self, /, mapfn=None):\n if mapfn is None:\n return list(self.iter())\n return list(map(mapfn, self.iter()))\n\n def keys(self, /):\n return self._attrib.keys()\n\n def values(self, /):\n return self._attrib.values()\n\n def items(self, /):\n return self._attrib.items()\n\n def clear(self, /):\n const_keys = self.__const_keys__\n protected_keys = self.__protected_keys__\n attrib = self._attrib\n if const_keys or protected_keys:\n for key in tuple(attrib):\n if key in const_keys or key in protected_keys:\n continue\n del attrib[key]\n else:\n attrib.clear()\n return self\n\n def get(self, key, /, default=None):\n try:\n return self._attrib[key]\n except LookupError:\n return default\n\n def pop(self, key, /, default=undefined):\n if key in self.__const_keys__ or key in self.__protected_keys__:\n raise LookupError(f\"not allowed to delete key: {key}\") \n try:\n r = self._attrib[key]\n except LookupError:\n if default is undefined:\n raise\n return default\n else:\n del self._attrib[key]\n return r\n\n def popitem(self, /):\n const_keys = self.__const_keys__\n protected_keys = self.__protected_keys__\n for key, val in reversed(self._attrib.items()):\n if not (key in const_keys or key in protected_keys):\n del self._attrib[key]\n return (key, val)\n raise LookupError(\"no items to pop\")\n\n def setdefault(self, key, /, default=\"\"):\n if not isinstance(key, str):\n raise TypeError(\"only accept `key` type: str\")\n try:\n return seself._attriblf[key]\n except LookupError:\n self._attrib[key] = default\n return default\n\n def sort(self, key=id, reverse=False, use_backend_element=False):\n if use_backend_element:\n self._root[:] = sorted(self._root, key=key, reverse=reverse)\n else:\n self._root[:] = (e._root for e in sorted(self.iter(), key=key, reverse=reverse))\n return self\n\n def merge(self, attrib=None, /, **attrs):\n if attrib:\n if attrs:\n attrib = dict(attrib, **attrs)\n else:\n attrib = attrs\n if attrib:\n el_set(self._root, attrib=attrib, namespaces=NAMESPACES, merge=True)\n return self\n\n def update(self, attrib=None, /, **attrs):\n const_keys = self.__const_keys__\n if attrib:\n if attrs:\n attrib = dict(attrib, **attrs)\n elif const_keys and (not isinstance(attrib, Mapping) or any(key in attrib for key in const_keys)):\n attrib = dict(attrib)\n else:\n const_keys = ()\n else:\n attrib = attrs\n if const_keys:\n for key in const_keys:\n attrib.pop(key, None)\n if attrib:\n el_set(self._root, attrib=attrib, namespaces=NAMESPACES, merge=False)\n return self" }, { "identifier": "ElementProxy", "path": "epub3/util/proxy.py", "snippet": "class ElementProxy(ElementAttribProxy):\n __is_wrap_class__ = True\n\n def __repr__(self, /):\n attrib = self._attrib\n attrib = f\", {attrib=!r}\" if attrib else \"\"\n text = self.text\n text = f\", {text=!r}\" if text and text.strip() else \"\"\n tail = self.tail\n tail = f\", {tail=!r}\" if tail and tail.strip() else \"\"\n return f\"<{type(self).__qualname__}(<{self._root.tag}>{attrib}{text}{tail}) at {hex(id(self))}>\"\n\n def getproxy(self, key=\"\", /):\n if not key:\n return auto_property(key, setable=True, delable=True).fget(self)\n return super().getproxy(key)\n\n @property\n def length(self, /):\n return len(self._root)\n\n @property\n def tag(self, /):\n return self._root.tag\n\n @property\n def text(self, /):\n return self._root.text\n\n @text.setter\n def text(self, text, /):\n self._root.text = None if text is None else str(text)\n\n @property\n def tail(self, /):\n return self._root.tail\n\n @tail.setter\n def tail(self, text, /):\n self._root.tail = None if text is None else str(text)\n\n @cached_property\n def info(self, /):\n return ElementInfoProxy(self)\n\n def clear(self, /):\n self._root.clear()\n return self\n\n def merge(self, attrib=None, /, text=None, tail=None, **attrs):\n super().merge(attrib, **attrs)\n el_set(self._root, text=text, tail=tail, namespaces=NAMESPACES, merge=True)\n return self\n\n def update(self, attrib=None, /, text=None, tail=None, **attrs):\n super().update(attrib, **attrs)\n el_set(self._root, text=text, tail=tail, namespaces=NAMESPACES, merge=False)\n return self\n\n def add(self, name, /, attrib=None, text=None, tail=None):\n return type(self).wrap(el_add(self._root, name=name, attrib=attrib, text=text, tail=tail, namespaces=NAMESPACES))\n\n def delete(self, path, /):\n if isinstance(path, ElementAttribProxy):\n try:\n self._root.remove(path._root)\n except:\n pass\n else:\n el_del(self._root, path, namespaces=NAMESPACES)\n return self\n\n def find(self, path, /):\n return next(self.iterfind(path), None)\n\n @PyLinq.streamify\n def iterfind(self, path, /):\n return map(type(self).wrap, el_iterfind(self._root, path, NAMESPACES))\n\n def set(\n self, \n path=None, \n /, \n name=None, \n attrib=None, \n text=None, \n tail=None, \n merge=False, \n ):\n el = el_set(\n self._root, \n path, \n name=name, \n attrib=attrib, \n text=text, \n tail=tail, \n namespaces=NAMESPACES, \n merge=merge, \n )\n if el is not None:\n return type(self).wrap(el)\n\n def setfind(\n self, \n name, \n /, \n find_attrib=None, \n attrib=None, \n text=None, \n tail=None, \n merge=False, \n delete=False, \n auto_add=False, \n ):\n el = el_setfind(\n self._root, \n name=name, \n find_attrib=find_attrib, \n attrib=attrib, \n text=text, \n tail=tail, \n namespaces=NAMESPACES, \n merge=merge, \n delete=delete, \n auto_add=auto_add, \n )\n if el is not None:\n return type(self).wrap(el)" }, { "identifier": "NAMESPACES", "path": "epub3/util/proxy.py", "snippet": "NAMESPACES: Final = {\n \"containerns\": \"urn:oasis:names:tc:opendocument:xmlns:container\", \n \"daisy\": \"http://www.daisy.org/z3986/2005/ncx/\", \n \"dc\": \"http://purl.org/dc/elements/1.1/\", \n \"ds\": \"http://www.w3.org/2000/09/xmldsig#\", \n \"epub\": \"http://www.idpf.org/2007/ops\", \n \"enc\": \"http://www.w3.org/2001/04/xmlenc#\",\n \"ncx\": \"http://www.daisy.org/z3986/2005/ncx/\", \n \"ns\": \"http://www.idpf.org/2016/encryption#compression\", \n \"opf\": \"http://www.idpf.org/2007/opf\", \n \"rdf\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\", \n \"smil\": \"http://www.w3.org/ns/SMIL\", \n \"svg\": \"http://www.w3.org/2000/svg\", \n \"html\": \"http://www.w3.org/1999/xhtml\", \n \"wsdl\": \"http://schemas.xmlsoap.org/wsdl/\", \n \"xhtml\": \"http://www.w3.org/1999/xhtml\", \n \"xlink\": \"http://www.w3.org/1999/xlink\", \n \"xml\": \"http://www.w3.org/XML/1998/namespace\", \n \"xs\": \"http://www.w3.org/2001/XMLSchema\", \n \"xsi\": \"http://www.w3.org/2001/XMLSchema-instance\", \n}" }, { "identifier": "remap_links", "path": "epub3/util/remap.py", "snippet": "def remap_links(\n manifest, \n pathmap, \n encoding=\"utf-8\", \n link_patterns=LINK_PATTERNS, \n):\n changed = []\n for predicate, patterns in link_patterns:\n for item in manifest.filter_by_attr(predicate):\n try:\n text = item.read_text(encoding=encoding)\n href = unquote(item[\"href\"])\n basedir = dirname(href)\n if type(patterns) is list:\n ls = []\n for subpats in patterns:\n repls = list(path_repl_iter(chain_finditer(text, subpats), pathmap, basedir))\n if repls:\n ls.append(repls)\n if not ls:\n repls = None\n elif len(ls) > 1:\n repls = sorted(chain.from_iterable(ls))\n else:\n repls = ls[0]\n else:\n repls = list(path_repl_iter(chain_finditer(text, patterns), pathmap, basedir))\n if repls:\n text = \"\".join(apply_repl_iter(text, repls))\n item.write_text(text, encoding=encoding)\n changed.append(href)\n except:\n pass\n return changed" }, { "identifier": "PyLinq", "path": "epub3/util/stream.py", "snippet": "class PyLinq(Stream, AggregateMixin, ItertoolsMixin):\n\n def __init__(self, iterable=None):\n if iterable is None:\n iterable = []\n super().__init__(iterable)\n\n def iter(self):\n return self @ iter(self.iterable)\n\n def reversed(self):\n return self @ reversed(self.iterable)\n\n def length(self):\n return self @ len(self.iterable)\n\n def add(self, element):\n return self.chain((element,))\n\n def all_equal(self):\n \"Returns True if all the elements are equal to each other\"\n g = iter(self.groupby())\n return next(g, True) and not next(g, False)\n\n def contains(self, element, key=None):\n return element in self.map(key)\n\n def difference(self, other, key=None, left_key=None, right_key=None):\n other = (self @ other).map(key or right_key)\n selectors = self.map(key or left_key).notin(other)\n return self.compress(selectors)\n\n @typed_method\n def distinct(self, key=None):\n # A simpler but not equivalent implementation as following:\n # return self @ self.group_by(key).each.first()\n hashable, unhashable = set(), []\n for i, k in self.pair(key):\n if k not in hashable and k not in unhashable:\n try:\n hashable.add(k)\n except TypeError:\n unhashable.append(k)\n yield i\n\n def element_at(self, n, default=undefined):\n try:\n return self[n]\n except TypeError as exc:\n if type(n) is int:\n if n >= 0:\n r = tuple(self.islice(n, n+1))\n if r:\n return r[0]\n else:\n r = deque(self, -n)\n if len(r) == -n:\n return r[0]\n if default is not undefined:\n return default\n raise LookupError(f'No element found at {n!r}') from exc\n\n def first(self, default=undefined):\n # self.element_at(0, default)\n if default is undefined:\n try:\n return next(iter(self))\n except StopIteration as exc:\n raise LookupError('No such first element') from exc\n return next(iter(self), default)\n\n def first_true(self, default=None, predicate=None):\n \"\"\"Returns the first true value in the iterable.\n\n If no true value is found, returns *default*\n\n If *predicate* is not None, returns the first item\n for which predicate(item) is true.\n\n \"\"\"\n return next(iter(self.filter(predicate)), default)\n\n @typed_method\n def flatten(list_of_lists):\n \"Flatten one level of nesting\"\n return itertools.chain.from_iterable(self.iterable)\n\n def group_by(self, key=None):\n groupers = self.orderby(key=key).groupby(key=key)\n return groupers.map(lambda args: Grouper.make_grouper(*args))\n\n @typed_method\n def group_join(self, other, key=None, left_key=None, right_key=None):\n left_key, right_key = key or left_key, key or right_key\n left = {i.key: tuple(i) for i in self.group_by(left_key)}\n right = {i.key: tuple(i) for i in (self @ other).group_by(right_key)}\n for k in sorted(left.keys() | right.keys()):\n grouper = itertools.product(left.get(k, ()), right.get(k, ()))\n yield Grouper.make_grouper(k, grouper)\n\n def intersection(self, other, key=None, left_key=None, right_key=None):\n return self.join(other, key, left_key, right_key).map(lambda x: x[0])\n\n def isin(self, other):\n if isinstance(other, Stream):\n other = other.data\n if not isinstance(other, (Set, Mapping)):\n if not isinstance(other, Sequence):\n other = tuple(other)\n try:\n other = set(other)\n except TypeError:\n pass\n return self.map(lambda x: x in other)\n\n def join(self, other, key=None, left_key=None, right_key=None):\n left_key = key or left_key or identity_function\n right_key = key or right_key or identity_function\n judge = lambda x: left_key(x[0]) == right_key(x[1])\n return self.product(other).filter(judge)\n\n def last(self, default=undefined):\n # self.element_at(-1, default)\n value = default\n for value in self: pass\n if value is undefined:\n raise LookupError('No such last element')\n return value\n\n @typed_method\n def ncycles(self, n):\n \"Returns the sequence elements n times\"\n return itertools.chain.from_iterable(itertools.repeat(tuple(self.iterable), n))\n\n def nth(self, n, default=undefined):\n \"Returns the nth item or a default value\"\n if isinstance(self.iterable, Sequence):\n try:\n return self.iterable[n]\n except LookupError:\n if default is undefined:\n raise\n return default\n try:\n return next(iter(self.islice(n, None)))\n except StopIteration as e:\n if default is undefined:\n raise LookupError(n) from e\n return default\n\n @typed_method\n def prepend(self, *values):\n \"Prepend a single value in front of an iterator\"\n return itertools.chain(values, self.iterable)\n\n def take(self, n):\n return self.islice(n)\n\n def notin(self, other):\n return self.isin(other).map(lambda x: not x)\n\n def orderby(self, key=None, reverse=False):\n return self.collect(sorted, key=key, reverse=reverse)\n\n def order_by(self, kwargs_orders=None, reverse_orders=False):\n data = list(self)\n if kwargs_orders:\n if reverse_orders:\n kwargs_orders = reversed(kwargs_orders)\n for kwargs in kwargs_orders:\n data.sort(**kwargs)\n return self @ data\n\n @typed_method\n def pair(self, key=None):\n if key is None:\n for i in self:\n yield i, i\n else:\n for i in self:\n yield i, key(i)\n\n def select(self, selector=None):\n return self.map(selector)\n\n def select_many(self, selector=None):\n return self.map(selector).chain_self_iterable()\n\n def single(self, default=undefined):\n n = 0\n for n, v in zip(range(1, 3), self): pass\n if n == 0:\n if default is not undefined:\n return default\n raise LookupError('No elements exception occured')\n elif n == 2:\n raise LookupError('More than one element exception occured')\n return v\n\n def skip(self, n):\n return self.islice(n, None)\n\n def skipwhile(self, predicate):\n return self.dropwhile(predicate)\n\n def tail(self, n):\n return self.collect(deque, n)\n\n def where(self, predicate=None):\n return self.filter(predicate)\n\n def zip(self, *iterables):\n return zip(self, *iterables)" }, { "identifier": "el_add", "path": "epub3/util/xml.py", "snippet": "def el_add(\n el: Element, \n /, \n name: str, \n attrib: Optional[Mapping] = None, \n text=None, \n tail=None, \n namespaces: Optional[Mapping] = None, \n) -> Element:\n \"\"\"\n \"\"\"\n name = extract_name(name)\n if not name:\n raise ValueError(\"unable to determine name\")\n try:\n nsmap = el.nsmap # type: ignore\n except:\n nsmap = {}\n if attrib:\n attrib0 = items(attrib)\n attrib = {}\n for key, val in attrib0:\n if key is None:\n attrib[key] = val\n elif isinstance(key, str):\n if key == \"xmlns\":\n if val:\n nsmap[None] = val\n else:\n nsmap.pop(None, None)\n elif key.startswith(\"xmlns:\"):\n if val:\n nsmap[key[6:]] = val\n else:\n nsmap.pop(key[6:], None)\n else:\n attrib[key] = val\n name = resolve_prefix(name, nsmap, namespaces, inherit=True)\n if USE_BUILTIN_XML:\n sel = el.makeelement(name, cast(dict[str, str], {}))\n else:\n sel = el.makeelement(name, nsmap=cast(dict[str, str], nsmap))\n el.append(sel)\n _el_set(sel, attrib, text, tail, nsmap, namespaces)\n return sel" }, { "identifier": "el_del", "path": "epub3/util/xml.py", "snippet": "def el_del(\n el: Element, \n path: Optional[str] = None, \n /, \n namespaces: Optional[Mapping] = None, \n) -> Optional[Element]:\n \"\"\"\n \"\"\"\n sel = el_find(el, path, namespaces) if path else el\n if sel is not None:\n try:\n pel = sel.getparent() # type: ignore\n except AttributeError:\n pel = el\n if pel is None or pel is sel:\n raise LookupError(f\"can't get parent element: {sel!r}\")\n pel.remove(sel)\n return sel" }, { "identifier": "el_iterfind", "path": "epub3/util/xml.py", "snippet": "def el_iterfind(\n el: Element, \n path: Optional[str] = None, \n /, \n namespaces: Optional[Mapping] = None, \n) -> Iterator[Element]:\n \"\"\"\n \"\"\"\n if not path or path in (\".\", \"*..\", \"*...\", \"./.\"):\n return iter((el,))\n nsmap: Optional[Mapping]\n if USE_BUILTIN_XML:\n nsmap = namespaces\n else:\n nsmap = el.nsmap\n if namespaces:\n nsmap.update(namespaces)\n if nsmap and (None in nsmap or \"\" in nsmap):\n if any(\n l == \"[\" and r != \"@\" \n for l, r in pairwise(m[0] for m in xpath_tokenizer_re.finditer(path))\n ):\n uri = get(nsmap, None) or get(nsmap, \"\") or \"*\"\n path = generalize_elementpath(path, uri=uri)\n nsmap = {k: v for k, v in items(nsmap) if k and v}\n return el.iterfind(path, nsmap) # type: ignore" }, { "identifier": "el_set", "path": "epub3/util/xml.py", "snippet": "def el_set(\n el: Element, \n path: Optional[str] = None, \n /, \n name: Optional[str] = None, \n attrib: Optional[Mapping] = None, \n text: Optional[str] = None, \n tail: Optional[str] = None, \n namespaces: Optional[Mapping] = None, \n merge: bool = False, \n) -> Element:\n \"\"\"\n \"\"\"\n sel = el_find(el, path, namespaces) if path else el\n if sel is not None:\n if text is None and tail is None and not attrib:\n return sel\n try:\n nsmap = sel.nsmap # type: ignore\n except:\n nsmap = None\n (_el_setmerge if merge else _el_set)(sel, attrib, text, tail, nsmap, namespaces)\n elif name is not None:\n if name == \"\":\n name = path\n sel = el_add(el, cast(str, name), attrib=attrib, text=text, tail=tail, namespaces=namespaces)\n else:\n raise LookupError(f\"element not found: {el!r}.find({path!r}) is None\")\n return sel" }, { "identifier": "undefined", "path": "epub3/util/undefined.py", "snippet": "class UndefinedType:\r\n def __new__(cls, /):\r\n def __init_subclass__(cls, /, **kwargs):\r\n def __eq__(self, other, /):\r" } ]
import errno import io import os import os.path as ospath import posixpath from copy import deepcopy from datetime import datetime from fnmatch import translate as wildcard_translate from functools import cached_property, partial from inspect import getfullargspec, isclass from io import IOBase, TextIOWrapper from operator import methodcaller from os import fsdecode, remove, stat, stat_result, PathLike from pathlib import PurePosixPath from posixpath import join as joinpath, normpath from pprint import pformat from re import compile as re_compile, escape as re_escape, Pattern from shutil import copy, copyfileobj from typing import cast, Any, Callable, Container, Mapping, MutableMapping, Optional from types import MappingProxyType from uuid import uuid4 from warnings import warn from weakref import WeakKeyDictionary, WeakValueDictionary from urllib.parse import quote, unquote from zipfile import ZipFile, ZIP_STORED from .util.file import File, RootFS, TemporaryFS, OPEN_MODES from .util.helper import guess_media_type, values, items, sup from .util.proxy import proxy_property, ElementAttribProxy, ElementProxy, NAMESPACES from .util.remap import remap_links from .util.stream import PyLinq from .util.xml import el_add, el_del, el_iterfind, el_set from .util.undefined import undefined, UndefinedType from lxml.etree import fromstring, tostring, _Element as Element, _ElementTree as ElementTree # type: ignore from xml.etree.ElementTree import fromstring, tostring, Element, ElementTree # type: ignore
13,836
): if mode not in OPEN_MODES: raise ValueError(f"invalid open mode: {mode!r}") if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" href_to_file = self._href_to_file if href in self._href_to_id: if "x" in mode: raise FileExistsError(errno.EEXIST, f"file exists: {href!r}") file = href_to_file.get(href) uid = str(uuid4()) if file is None: href_to_file[href] = file = File(uid, self._workfs) elif not file.check_open_mode(mode): if "w" not in mode: try: fsrc = file.open("rb", buffering=0) except FileNotFoundError: if "r" in mode: raise else: with fsrc: copyfileobj(fsrc, self._workfs.open(uid, "wb")) href_to_file[href] = file = File(uid, self._workfs) elif "r" in mode: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") else: item = self.add(href) file = href_to_file[href] if "b" not in mode and encoding is None: encoding = "utf-8" return file.open( mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, href, /, buffering=0): with self.open(href, "rb", buffering=buffering) as f: return f.read() read_bytes = read def read_text(self, href, /, encoding=None): with self.open(href, "r", encoding=encoding) as f: return f.read() def remove(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") item = super().pop(id, None) if item is not None: try: self._root.remove(item._root) except: pass file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass def _rename(self, item, href, dest_href, /): try: id = self._href_to_id[dest_href] = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") if item is None: item = super().__getitem__(id) item._attrib["href"] = quote(dest_href, safe=":/?&=#") self._href_to_file[dest_href] = self._href_to_file.pop(href, None) def rename(self, href, dest_href, /, repair=False): result = {} if isinstance(href, Item): item = href if item not in self: raise LookupError(f"no such item: {item!r}") href = unquote(item._attrib["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" item = None if isinstance(dest_href, (bytes, PathLike)): dest_href = fsdecode(dest_href) else: dest_href = str(dest_href) assert (dest_href := dest_href.strip("/")), "empty href" result["pathpair"] = (href, dest_href) if href != dest_href: if dest_href in self._href_to_id: raise FileExistsError(errno.EEXIST, f"target file exists: {dest_href!r}") self._rename(item, href, dest_href) if repair:
#!/usr/bin/env python # coding: utf-8 __author__ = "ChenyangGao <https://chenyanggao.github.io>" __version__ = (0, 0, 1) __all__ = ["ePub", "Metadata", "DCTerm", "Meta", "Link", "Manifest", "Item", "Spine", "Itemref"] try: except ModuleNotFoundError: class DCTerm(ElementProxy): pass class Meta(ElementProxy): __protected_keys__ = ("property",) __optional_keys__ = ("dir", "id", "refines", "scheme", "xml:lang") class Link(ElementAttribProxy): __protected_keys__ = ("href", "rel") __optional_keys__ = ("hreflang", "id", "media-type", "properties", "refines") class Item(ElementAttribProxy): __const_keys__ = ("id",) __protected_keys__ = ("href", "media-type") __optional_keys__ = ("fallback", "media-overlay", "properties") __cache_get_state__ = lambda _, manifest: manifest def __init__(self, root: Element, manifest, /): super().__init__(root) self._manifest = manifest def __eq__(self, other, /): if type(self) is not type(other): return NotImplemented return self._manifest is other._manifest and self._attrib["href"] == other._attrib["href"] def __fspath__(self, /): return unquote(self._attrib["href"]) def __hash__(self, /): return hash((self._root, id(self._manifest))) def __setitem__(self, key, value, /): if key == "href": if value is None: raise ValueError("can't set href to None") self.rename(val) else: super().__setitem__(key, value) return self @property def filename(self, /): return PurePosixPath(joinpath(self.home, self)) @property def home(self, /): return PurePosixPath(self._manifest._epub._opf_dir) @property def name(self, /): return self.path.name @property def path(self, /): return PurePosixPath(self) @property def _parent(self, /): return posixpath.dirname(unquote(self._attrib["href"])) @property def parent(self, /): return self.path.parent @property def parents(self, /): return self.path.parents @property def parts(self, /): return self.path.parts @property def stem(self, /): return self.path.stem @property def suffix(self, /): return self.path.suffix @property def suffixes(self, /): return self.path.suffixes def update(self, attrib=None, /, **attrs): if attrib: attrib = dict(attrib) if attrs: attrib.update(attrs) else: attrib = attrs href = attrib.pop("href", None) if href: self.rename(href) if attrib: super().update(attrib) return self def is_relative_to(self, /, *other): return self.path.is_relative_to(*other) def joinpath(self, /, *others): return PurePosixPath(normpath(joinpath(self._parent, *others))) __truediv__ = joinpath def relpath(self, other, /): return PurePosixPath(posixpath.relpath(other, self._parent)) def relative_to(self, /, *other): return self.path.relative_to(*other) def with_name(self, /, name): return self.path.with_name(str(name)) def with_stem(self, /, stem): return self.path.with_stem(str(stem)) def with_suffix(self, /, suffix): return self.path.with_suffix(str(suffix)) def exists(self, /): return self._manifest.exists(self) def is_file(self, /): return self.exists() def is_dir(self, /): return False def is_symlink(self, /): return False def glob(self, /, pattern="*", ignore_case=False): return self._manifest.glob(pattern, self, ignore_case=ignore_case) def rglob(self, /, pattern="", ignore_case=False): return self._manifest.rglob(pattern, self, ignore_case=ignore_case) def iterdir(self, /): return self._manifest.iterdir(self) def match(self, /, path_pattern, ignore_case=False): path_pattern = path_pattern.strip("/") if not path_pattern: return False pattern = joinpath(*posix_glob_translate_iter(path_pattern)) if ignore_case: pattern = "(?i:%s)" % pattern return re_compile(pattern).fullmatch(self._attrib["href"]) is not None def open( self, /, mode="r", buffering=-1, encoding=None, errors=None, newline=None, ): return self._manifest.open( self, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, /, buffering=0): return self._manifest.read(self, buffering=buffering) read_bytes = read def read_text(self, /, encoding=None): return self._manifest.read_text(self, encoding=encoding) def remove(self, /): self._manifest.remove(self) return self def rename(self, dest_href, /, repair=False): return self._manifest.rename(self, dest_href, repair=repair) def batch_rename(self, mapper, /, predicate=None, repair=False): return self._manifest.batch_rename(self, mapper, predicate=predicate, repair=repair) def replace(self, href, /): self._manifest.replace(self, href) return self def stat(self, /) -> Optional[stat_result]: return self._manifest.stat(self) def touch(self, /): self._manifest.touch(self) return self unlink = remove def write(self, /, data): return self._manifest.write(self, data) write_bytes = write def write_text(self, /, text, encoding=None, errors=None, newline=None): return self._manifest.write_text(self, text, encoding=encoding, errors=errors, newline=newline) class Itemref(ElementAttribProxy): __const_keys__ = ("idref",) __optional_keys__ = ("id", "linear", "properties") @property def linear(self, /): return "no" if self._attrib.get("linear") == "no" else "yes" @linear.setter def linear(self, value, /): self._attrib["linear"] = "no" if value == "no" else "yes" class Metadata(ElementProxy): __wrap_class_map__ = {"{*}meta": Meta, "{*}": Link, "dc:*": DCTerm} def __repr__(self, /): return f"{super().__repr__()}\n{pformat(self.iter().list())}" @property def info(self, /): return tuple(meta.info for meta in self.iter()) def add( self, name: str = "meta", /, attrib: Optional[Mapping] = None, text: Optional[str] = None, tail: Any = undefined, **_disregards, ): return super().add(name, attrib=attrib, text=text) def dc( self, name: str, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if text_value is not undefined: if find_attrib: find_attrib = {**find_attrib, "": text_value} else: find_attrib = {"": text_value} return self.setfind( "dc:%s" % name, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def meta( self, preds: str = "", /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): return self.setfind( "{*}meta%s" % preds, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def name_meta( self, name, content: Optional[str] = None, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "name": name} else: find_attrib = {"name": name} if content is not None: find_attrib["content"] = content return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def property_meta( self, property, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "property": property} else: find_attrib = {"property": property} if text_value is not undefined: find_attrib[""] = text_value return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) class ManifestProxy(ElementAttribProxy): __optional_keys__ = ("id",) class Manifest(dict[str, Item]): def __init__(self, /, root: Element, epub): self._root = root self._attrib = root.attrib self._epub = epub self._proxy = ManifestProxy(root) self._href_to_id: dict[str, str] = {} self._href_to_file: dict[str, File] = {} if len(root): href_to_id = self._href_to_id dangling_items = [] for item in root.iterfind("{*}item"): id = item.attrib.get("id") href = item.attrib.get("href") if id is None or not href: dangling_items.append(item) continue id = cast(str, id) href = cast(str, unquote(href)) super().__setitem__(id, Item(item, self)) href_to_id[href] = id if dangling_items: for item in reversed(dangling_items): root.remove(item) warn(f"removed a dangling item element: {item!r}") zfile = epub.__dict__.get("_zfile") opf_dir = epub._opf_dir if zfile: href_to_file = self._href_to_file for href in href_to_id: zpath = joinpath(opf_dir, href) zinfo = zfile.NameToInfo.get(zpath) if not zinfo or zinfo.is_dir(): warn(f"missing file in original epub: {href!r}") href_to_file[href] = File(str(uuid4()), self._workfs) else: href_to_file[href] = File(zpath, zfile, open_modes="r") def __init_subclass__(self, /, **kwargs): raise TypeError("subclassing is not allowed") def __call__(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") return href if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id[href] except LookupError as e: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") from e return super().__getitem__(id) def __contains__(self, other, /): if isinstance(other, Item): return other._manifest is self and super().__contains__(other["id"]) return super().__contains__(other) def __delitem__(self, key, /): pop = self.pop if isinstance(key, int): el = self._root[key] try: id = el.attrib["id"] except AttributeError: try: self._root.remove(el) except: pass else: pop(id) elif isinstance(key, slice): root = self._root for el in root[key]: try: id = el.attrib["id"] except AttributeError: try: root.remove(el) except: pass else: pop(id, None) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") pop(key["id"]) elif isinstance(key, str): pop(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") return self def __getitem__(self, key, /): def wrap(el): try: if el.tag == "item" or el.tag.endswith("}item"): return Item(el, self) return ElementProxy(el) except AttributeError: return el if isinstance(key, int): return wrap(self._root[key]) elif isinstance(key, slice): return list(map(wrap, self._root[key])) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") return key elif isinstance(key, str): return super().__getitem__(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") def __setitem__(self, id, value, /): if id not in self: raise LookupError(f"no such item: {id!r}") if isinstance(id, Item): item = id else: item = super().__getitem__(id) href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return self @cached_property def _workfs(self, /): if self._epub._maketemp: return TemporaryFS(self._epub._workroot) else: return RootFS(self._epub._workroot) @cached_property def href_to_id(self, /): return MappingProxyType(self._href_to_id) @cached_property def href_to_file(self, /): return MappingProxyType(self._href_to_file) @property def home(self, /): return self._epub._opf_dir @property def attrib(self, /): return self._attrib @property def proxy(self, /): return self._proxy @property def info(self, /): return tuple(item.info for item in self.values()) delete = __delitem__ def clear(self, /): self._root.clear() self._href_to_file.clear() self._href_to_id.clear() super().clear() return self def pop(self, id, /, default=undefined): if id not in self: if default is undefined: raise LookupError(f"no such item: {id!r}") return default if isinstance(id, Item): id = id["id"] item = super().pop(id) try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return item def popitem(self, /): id, item = super().popitem() try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return id, item def set(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(href, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return item def setdefault(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(value, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: if isinstance(value, Mapping) and not ("open" in value and callable(value["open"])): item.merge(value) return item def merge(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.merge(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.merge(attrs) else: self._proxy.merge(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.merge(id_or_attrib) return self def update(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.update(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.update(attrs) else: self._proxy.update(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.update(id_or_attrib) return self #################### SubElement Methods #################### @PyLinq.streamify def filter(self, /, predicate=None): if not callable(predicate): return iter(self.values()) return filter(predicate, self.values()) @PyLinq.streamify def filter_by_attr(self, predicate=None, attr="media-type", /): def activate_predicate(predicate): if predicate is None: return None if callable(predicate): return predicate elif isinstance(predicate, Pattern): return predicate.search elif isinstance(predicate, str): use_false = False if predicate.startswith(r"!"): use_false = True predicate = predicate[1:] predicate_startswith = predicate.startswith if predicate_startswith(r"="): predicate = predicate[1:].__eq__ elif predicate_startswith(r"~"): predicate = methodcaller("__contains__", predicate[1:]) elif predicate_startswith(r"^"): predicate = methodcaller("startswith", predicate[1:]) elif predicate_startswith(r"$"): predicate = methodcaller("endswith", predicate[1:]) elif predicate_startswith(r";"): predicate = lambda s, needle=predicate[1:]: needle in s.split() elif predicate_startswith(r","): predicate = lambda s, needle=predicate[1:]: needle in s.split(",") elif predicate_startswith(r"<"): predicate = re_compile(r"\b"+re_escape(predicate[1:])).search elif predicate_startswith(r">"): predicate = re_compile(re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"|"): predicate = re_compile(r"\b"+re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"*"): predicate = re_compile(wildcard_translate(predicate[1:])).fullmatch elif predicate_startswith(r"/"): predicate = re_compile(predicate[1:]).search elif predicate_startswith(r"%"): predicate = re_compile(predicate[1:]).fullmatch else: predicate = predicate.__eq__ if use_false: predicate = lambda s, _pred=predicate: not _pred(s) return predicate elif type(predicate) in (tuple, list): preds = tuple(pred for p in predicate if (pred:=activate_predicate(p)) is not None) if not preds: return None if type(predicate) is tuple: return lambda s, _preds=preds: any(p(s) for p in preds) else: return lambda s, _preds=preds: all(p(s) for p in preds) elif isinstance(predicate, Container): return predicate.__contains__ predicate = activate_predicate(predicate) if predicate is None: return filter(lambda item: attr in item, self.values()) return filter(lambda item: attr in item and predicate(item[attr]), self.values()) @PyLinq.streamify def iter(self, /): root = self._root for el in root.iterfind("*"): if not (el.tag == "item" or el.tag.endswith("}item")): yield ElementProxy(el) continue id = el.attrib.get("id") href = el.attrib.get("href") if not href: if id is None or not super().__contains__(id): try: root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass else: item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") else: self.pop(id, None) warn(f"removed an item because of missing href attribute: {item!r}") continue href = unquote(href) if not el.attrib.get("media-type"): el.attrib["media-type"] = guess_media_type(href) if id is None: yield self.add(href) elif super().__contains__(id): item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") yield item else: try: self._root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass def list(self, /, mapfn=None): if mapfn is None: return list(self.iter()) return list(map(mapfn, self.iter())) def audio_iter(self, /): return self.filter_by_attr("^audio/") def css_iter(self, /): return self.filter_by_attr("text/css") def font_iter(self, /): return self.filter_by_attr(("^font/", "^application/font-")) def image_iter(self, /): return self.filter_by_attr("^image/") def javascript_iter(self, /): return self.filter_by_attr(("text/javascript", "application/javascript", "application/ecmascript")) def media_iter(self, /): return self.filter_by_attr(("^audio/", "^image/", "^video/")) def text_iter(self, /): return self.filter_by_attr(("^text/", "$+xml")) def video_iter(self, /): return self.filter_by_attr("^video/") @PyLinq.streamify def html_item_ref_pair_iter(self, /): spine = self._epub.spine for id, itemref in spine.items(): yield self[id], itemref for item in self.filter_by_attr(("text/html", "application/xhtml+xml")): if item["id"] in spine: continue yield item, None #################### File System Methods #################### def add( self, href, /, file=None, fs=None, open_modes="r", id=None, media_type=None, attrib=None, ): if isinstance(href, Item): raise TypeError("can't directly add `Item` object") if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" if href in self._href_to_id: raise FileExistsError(errno.EEXIST, f"file exists: {href!r}") uid = str(uuid4()) if id is None: generate_id = self._epub._generate_id if generate_id is None: id = uid else: keys = self.keys() id = generate_id(href, keys) while id in keys: nid = generate_id(href, keys) if nid == id: i = sup(lambda i: f"{i}_{nid}" in keys) id = f"{i}_{nid}" break id = nid if id in self: raise LookupError(f"id already exists: {id!r}") attrib = dict(attrib) if attrib else {} attrib["id"] = id attrib["href"] = quote(href, safe=":/?&=#") if media_type: attrib["media-type"] = media_type if fs is not None: file = File(file, fs=fs, open_modes=open_modes) elif file is None: file = File(uid, self._workfs) elif isinstance(file, IOBase) or hasattr(file, "read") and not hasattr(file, "open"): file0 = file file = File(uid, self._workfs) test_data = file0.read(0) if test_data == b"": copyfileobj(file0, self._workfs.open(uid, "wb")) elif test_data == "": attrib.setdefault("media-type", "text/plain") copyfileobj(file0, self._workfs.open(uid, "w")) else: raise TypeError(f"incorrect read behavior: {file0!r}") else: file = File(file, open_modes=open_modes) if not attrib.get("media-type"): attrib["media-type"] = guess_media_type(href) item = Item(el_add(self._root, "item", attrib=attrib, namespaces=NAMESPACES), self) super().__setitem__(id, item) self._href_to_id[href] = id self._href_to_file[href] = file return item def change( self, href, /, file=None, fs=None, open_modes="r", id=None, media_type=None, attrib=None, ): if fs is self._workfs: raise OSError(errno.EINVAL, f"Remapping the file that in the working fs is not supported, use `rename` instead: {fs!r}") if href in self.href_to_id: item = self[self.href_to_id[href]] if attrib: item.update(attrib) if media_type: item.media_type = media_type try: self.href_to_file[href].remove() except: pass self._href_to_file[href] = File(file, fs, open_modes) return item else: return self.add( href, file=file, fs=fs, open_modes=open_modes, id=id, media_type=media_type, attrib=attrib, ) def exists(self, href, /): if isinstance(href, Item): return href in self if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" return href in self._href_to_id @PyLinq.streamify def glob(self, pattern="*", dirname="", ignore_case=False): pattern = pattern.strip("/") if not pattern: return if isinstance(dirname, Item): dirname = posixpath.dirname(unquote(href._attrib["href"])) else: dirname = dirname.strip("/") if dirname: dirname = re_escape(dirname) pattern = joinpath(dirname, *posix_glob_translate_iter(pattern)) if ignore_case: pattern = "(?i:%s)" % pattern matches = re_compile(pattern).fullmatch for href, id in self._href_to_id.items(): if not matches(href): continue try: yield super().__getitem__(id) except KeyError: pass @PyLinq.streamify def iterdir(self, /, dirname=""): if isinstance(dirname, Item): dirname = posixpath.dirname(unquote(href._attrib["href"])) else: dirname = dirname.strip("/") for href, id in self._href_to_id.items(): if posixpath.dirname(href) != dirname: continue try: yield super().__getitem__(id) except KeyError: pass def open( self, href, /, mode="r", buffering=-1, encoding=None, errors=None, newline=None, ): if mode not in OPEN_MODES: raise ValueError(f"invalid open mode: {mode!r}") if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" href_to_file = self._href_to_file if href in self._href_to_id: if "x" in mode: raise FileExistsError(errno.EEXIST, f"file exists: {href!r}") file = href_to_file.get(href) uid = str(uuid4()) if file is None: href_to_file[href] = file = File(uid, self._workfs) elif not file.check_open_mode(mode): if "w" not in mode: try: fsrc = file.open("rb", buffering=0) except FileNotFoundError: if "r" in mode: raise else: with fsrc: copyfileobj(fsrc, self._workfs.open(uid, "wb")) href_to_file[href] = file = File(uid, self._workfs) elif "r" in mode: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") else: item = self.add(href) file = href_to_file[href] if "b" not in mode and encoding is None: encoding = "utf-8" return file.open( mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, href, /, buffering=0): with self.open(href, "rb", buffering=buffering) as f: return f.read() read_bytes = read def read_text(self, href, /, encoding=None): with self.open(href, "r", encoding=encoding) as f: return f.read() def remove(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") item = super().pop(id, None) if item is not None: try: self._root.remove(item._root) except: pass file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass def _rename(self, item, href, dest_href, /): try: id = self._href_to_id[dest_href] = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") if item is None: item = super().__getitem__(id) item._attrib["href"] = quote(dest_href, safe=":/?&=#") self._href_to_file[dest_href] = self._href_to_file.pop(href, None) def rename(self, href, dest_href, /, repair=False): result = {} if isinstance(href, Item): item = href if item not in self: raise LookupError(f"no such item: {item!r}") href = unquote(item._attrib["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" item = None if isinstance(dest_href, (bytes, PathLike)): dest_href = fsdecode(dest_href) else: dest_href = str(dest_href) assert (dest_href := dest_href.strip("/")), "empty href" result["pathpair"] = (href, dest_href) if href != dest_href: if dest_href in self._href_to_id: raise FileExistsError(errno.EEXIST, f"target file exists: {dest_href!r}") self._rename(item, href, dest_href) if repair:
result["repairs"] = remap_links(self, (href, dest_href))
12
2023-11-20 14:46:41+00:00
16k
ymp5078/AI-SAM
segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\n \"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False\n )\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], dim=0\n )\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(\n masks, original_size, mode=\"bilinear\", align_corners=False\n )\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "segment_anything/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[\n None, :, :, :\n ]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(\n point_coords, dtype=torch.float, device=self.device\n )\n labels_torch = torch.as_tensor(\n point_labels, dtype=torch.int, device=self.device\n )\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(\n mask_input, dtype=torch.float, device=self.device\n )\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(\n low_res_masks, self.input_size, self.original_size\n )\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert (\n self.features is not None\n ), \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "segment_anything/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "segment_anything/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "segment_anything/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "segment_anything/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "segment_anything/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "segment_anything/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "segment_anything/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "segment_anything/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "segment_anything/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "segment_anything/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "segment_anything/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "segment_anything/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "segment_anything/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "segment_anything/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "segment_anything/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "segment_anything/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
10,971
ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score
# -*- coding: utf-8 -*- # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [ coco_encode_rle(rle) for rle in mask_data["rles"] ] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score
data["stability_score"] = calculate_stability_score(
8
2023-11-26 23:42:53+00:00
16k
sophiaalthammer/alforrankers
matchmaker/utils/input_pipeline.py
[ { "identifier": "ConditionalQueryGenerationInferenceReader", "path": "matchmaker/dataloaders/query_generation_inference_loader.py", "snippet": "class ConditionalQueryGenerationInferenceReader(DatasetReader):\n \"\"\"\n Read a tsv file containing a passage collection.\n \n Expected format for each input line: <doc_id>\\t<doc_sequence_string>\n The output of ``read`` is a list of ``Instance`` s with the fields:\n doc_tokens: ``TextField`` \n target_query_type: ``MetadataField``\n target_query_length: ``MetadataField``\n\n\n Parameters\n ----------\n tokenizer : ``Tokenizer``, optional\n Tokenizer to use to split the input sequences into words or other kinds of tokens. \n token_indexers : ``Dict[str, TokenIndexer]``, optional\n Indexers used to define input (source side) token representations. Defaults to\n ``{\"tokens\": SingleIdTokenIndexer()}``.\n \"\"\"\n def __init__(self,\n tokenizer: Tokenizer = None,\n token_indexers: Dict[str, TokenIndexer] = None,\n \n max_doc_length:int = -1,\n max_query_length:int = -1,\n\n target_distribution_file:str = None,\n target_number_of_queries_total:int = 1 # ATTENTION, this is per worker!! (divide on your own if using > 1 worker)\n ):\n\n super().__init__(\n manual_distributed_sharding=True,\n manual_multiprocess_sharding=True\n )\n self._tokenizer = tokenizer\n self._token_indexers = token_indexers\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n self.target_number_of_queries_total = target_number_of_queries_total\n\n target_distribution,(target_label_types,target_label_lengths) = approximate_target_distribution_from_file(target_distribution_file)\n\n console = Console()\n\n console.log(\"[QueryGenLoader] Targeting distribution:\",target_distribution*target_number_of_queries_total,\", labels\",(target_label_types,target_label_lengths))\n\n self.target_distribution = target_distribution\n self.target_label_types = target_label_types\n self.target_label_lengths = target_label_lengths\n\n @overrides\n def _read(self, file_path):\n with open(cached_path(file_path), \"r\", encoding=\"utf8\") as data_file:\n #logger.info(\"Reading instances from lines in file at: %s\", file_path)\n for i,line in enumerate(self.shard_iterable(data_file)):\n if i == self.target_number_of_queries_total:\n break\n\n line = line.strip()\n\n if not line:\n continue\n\n line_parts = line.split('\\t')\n if len(line_parts) == 2:\n doc_id, doc_sequence = line_parts\n else:\n raise ConfigurationError(\"Invalid line format: %s\" % (line))\n\n yield self.text_to_instance(doc_id, doc_sequence)\n\n @overrides\n def text_to_instance(self, doc_id:str, doc_sequence: str) -> Instance:\n\n doc_id_field = MetadataField(doc_id)\n\n target_idx = np.random.choice(len(self.target_distribution),1,replace=False,p=self.target_distribution)[0]\n\n concat_sequence = (\":query_group\"+str(self.target_label_types[target_idx]) + \" \"+ str(self.target_label_lengths[target_idx]) + \" \" + doc_sequence)\n\n doc_tokenized = self._tokenizer.tokenize(concat_sequence, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n doc_field = TransformerTextField(**doc_tokenized,padding_token_id=self._tokenizer._tokenizer.pad_token_id)\n\n return Instance({\n \"doc_id\":doc_id_field,\n \"doc_tokens\":doc_field,\n \"target_query_type\":MetadataField(self.target_label_types[target_idx]),\n \"target_query_length\":MetadataField(self.target_label_lengths[target_idx])})" }, { "identifier": "PseudoLabelDatasetLoader", "path": "matchmaker/dataloaders/pseudo_label_training_loader.py", "snippet": "class PseudoLabelDatasetLoader():\n \"\"\"\n \n \"\"\"\n\n def __init__(\n self,\n\n query_file: str,\n collection_file: str,\n rankings_with_teacher_scores: str,\n\n selection_type: str, # values: \"scores\", \"scores-non-fixed\", \"top-rank\"\n min_pos_score: float,\n max_diff_to_be_pos: float,\n min_diff_to_neg: float,\n\n batch_size: int,\n\n tokenizer: Tokenizer = None,\n\n max_doc_length: int = -1,\n max_query_length: int = -1,\n\n concatenate_sequences = False,\n random_seed=42,\n ):\n\n self.query_file = query_file\n self.collection_file = collection_file\n self.rankings_with_teacher_scores = rankings_with_teacher_scores\n self.batch_size = batch_size\n\n self._tokenizer = tokenizer\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n if type(tokenizer) != FastTransformerTokenizer:\n raise Exception(\"only huggingface tokenizer supported\")\n\n self.selection_type = selection_type\n self.min_pos_score = min_pos_score\n self.max_diff_to_be_pos = max_diff_to_be_pos\n self.min_diff_to_neg = min_diff_to_neg\n\n\n self.read_with_scores = True\n self.concatenate_sequences = concatenate_sequences\n self.seed = random_seed\n\n self.uniqe_pos_only = False\n\n def __iter__(self) -> Iterator[TensorDict]:\n \n ctx = mp.get_context(\"fork\" if \"fork\" in mp.get_all_start_methods() else \"spawn\")\n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.data_loader_subprocess, args=(queue,), daemon=True\n )\n worker.start()\n\n try:\n for batch, worker_error in iter(queue.get, (None, None)):\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n def load_data(self):\n\n console = Console()\n\n console.log(\"[PseudoLabel] Loading rankings from:\",self.rankings_with_teacher_scores)\n self.pos_by_qid = defaultdict(list)\n self.neg_by_qid = defaultdict(list)\n\n stat_total_pos = 0\n stat_total_neg = 0\n with open(self.rankings_with_teacher_scores, \"r\", encoding=\"utf8\") as qf:\n current_q_id = \"\"\n current_top_score = 0\n for line in qf:\n ls = line.split() # pos_score<t>neg_score<t>pos_id<t>neg_id\n if current_q_id != ls[0]:\n current_q_id = ls[0]\n current_top_score = float(ls[3])\n if self.selection_type == \"scores\" or self.selection_type == \"scores-non-fixed\":\n if current_top_score >= self.min_pos_score:\n self.pos_by_qid[ls[0]].append((ls[1],float(ls[3])))\n stat_total_pos+=1\n\n elif self.selection_type == \"top-rank\": \n self.pos_by_qid[ls[0]].append((ls[1],float(ls[3])))\n stat_total_pos+=1\n else:\n score = float(ls[3])\n if self.selection_type == \"scores\":\n if score >= current_top_score - self.max_diff_to_be_pos and score >= self.min_pos_score:\n self.pos_by_qid[ls[0]].append((ls[1],score))\n stat_total_pos+=1\n\n elif score < current_top_score - self.min_diff_to_neg:\n if ls[0] in self.pos_by_qid:\n self.neg_by_qid[ls[0]].append((ls[1],score))\n stat_total_neg+=1\n\n elif self.selection_type == \"scores-non-fixed\":\n if score >= current_top_score - self.max_diff_to_be_pos: # TODO apply this fix and score >= min_pos_score:\n self.pos_by_qid[ls[0]].append((ls[1],score))\n stat_total_pos+=1\n\n elif score < current_top_score - self.min_diff_to_neg:\n if ls[0] in self.pos_by_qid:\n self.neg_by_qid[ls[0]].append((ls[1],score))\n stat_total_neg+=1\n\n elif self.selection_type == \"top-rank\": \n if score >= current_top_score - self.max_diff_to_be_pos:\n self.pos_by_qid[ls[0]].append((ls[1],score))\n stat_total_pos+=1\n\n elif score < current_top_score - self.min_diff_to_neg:\n if ls[0] in self.pos_by_qid:\n self.neg_by_qid[ls[0]].append((ls[1],score))\n stat_total_neg+=1\n\n\n console.log(\"[PseudoLabel] Loading collection from:\",self.collection_file)\n self.collection = {}\n self.collection_ids = []\n with open(self.collection_file, \"r\", encoding=\"utf8\") as cf:\n for line in cf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.collection[ls[0]] = ls[1].rstrip()[:100_000]\n self.collection_ids.append(ls[0])\n\n console.log(\"[PseudoLabel] Loading queries from:\",self.query_file)\n self.queries = {}\n with open(self.query_file, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.queries[ls[0]] = ls[1].rstrip()\n\n self.query_ids = np.array(sorted(list(set(self.pos_by_qid.keys()).intersection(set(self.neg_by_qid.keys())))))\n\n console.log(f\"[PseudoLabel] Done loading! Using {stat_total_pos} positives and {stat_total_neg} negatives for {len(self.query_ids)} queries\")\n\n def data_loader_subprocess(self, queue):\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n random.seed(self.seed)\n \n try:\n self.load_data()\n\n query_target_count = self.batch_size # int((self.batch_size / self.clusters_per_batch))\n\n while len(self.query_ids) > query_target_count:\n\n main_instances = []\n\n #while len(main_instances) < self.batch_size:\n\n #q_ids = random.sample(self.query_ids, query_target_count)\n q_id_idxs = random.sample(range(len(self.query_ids)), query_target_count)\n \n query_idx_remove_buffer = [] # only used for self.uniqe_pos_only==True, we need to buffer the removals, \n # otherwise we break the for loop access of already drawn q_ids\n\n for q_idx in q_id_idxs:\n q_id = self.query_ids[q_idx]\n\n #if q_id not in self.pos_by_qid or q_id not in self.neg_by_qid: # need to make sure that we did not just remove the query from the dataset (only for self.uniqe_pos_only==True)\n # continue\n\n pos = random.choice(self.pos_by_qid[q_id])\n neg = random.choice(self.neg_by_qid[q_id])\n\n if self.uniqe_pos_only:\n self.pos_by_qid[q_id].remove(pos) # ok to remove here, because q_id is unique in this for loop\n if len(self.pos_by_qid[q_id]) == 0:\n #del self.pos_by_qid[q_id]\n query_idx_remove_buffer.append(q_idx)\n #self.query_ids.pop(q_idx)\n\n if self.concatenate_sequences:\n ret_instance = {\n \"doc_pos_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[pos[0]],self.max_query_length + self.max_doc_length)),\n \"doc_neg_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[neg[0]],self.max_query_length + self.max_doc_length))}\n else:\n ret_instance = {\n \"query_tokens\": self.get_tokenized_query(self.queries[q_id]),\n \"doc_pos_tokens\": self.get_tokenized_document(self.collection[pos[0]]),\n \"doc_neg_tokens\": self.get_tokenized_document(self.collection[neg[0]]),\n }\n\n if self.read_with_scores:\n ret_instance[\"pos_score\"] = ArrayField(np.array(pos[1]))\n ret_instance[\"neg_score\"] = ArrayField(np.array(neg[1]))\n\n main_instances.append(Instance(ret_instance))\n\n #if len(main_instances) == self.batch_size:\n # break\n if self.uniqe_pos_only:\n if len(query_idx_remove_buffer) > 0:\n self.query_ids = np.delete(self.query_ids,query_idx_remove_buffer)\n\n main_batch = Batch(main_instances)\n main_batch = main_batch.as_tensor_dict(main_batch.get_padding_lengths())\n\n queue.put((main_batch,None))\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n \n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()\n\n def get_tokenized_query(self, text):\n query_tokenized = self._tokenizer.tokenize(text, max_length=self.max_query_length)\n if query_tokenized.get('token_type_ids') is not None:\n query_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**query_tokenized)\n\n def get_tokenized_document(self, text):\n doc_tokenized = self._tokenizer.tokenize(text, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**doc_tokenized)" }, { "identifier": "PseudoLabelTextDatasetLoader", "path": "matchmaker/dataloaders/pseudo_label_training_loader.py", "snippet": "class PseudoLabelTextDatasetLoader():\n \"\"\"\n\n \"\"\"\n\n def __init__(\n self,\n\n rankings_with_teacher_scores: str,\n\n batch_size: int,\n\n tokenizer: Tokenizer = None,\n\n max_doc_length: int = -1,\n max_query_length: int = -1,\n\n concatenate_sequences=False,\n random_seed=42,\n ):\n\n self.rankings_with_teacher_scores = rankings_with_teacher_scores\n self.batch_size = batch_size\n\n self._tokenizer = tokenizer\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n if type(tokenizer) != FastTransformerTokenizer:\n raise Exception(\"only huggingface tokenizer supported\")\n\n self.read_with_scores = True\n self.concatenate_sequences = concatenate_sequences\n self.seed = random_seed\n\n self.uniqe_pos_only = False\n\n def __iter__(self) -> Iterator[TensorDict]:\n\n ctx = mp.get_context(\"fork\" if \"fork\" in mp.get_all_start_methods() else \"spawn\")\n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.data_loader_subprocess, args=(queue,), daemon=True\n )\n worker.start()\n\n try:\n for batch, worker_error in iter(queue.get, (None, None)):\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n def load_data(self):\n\n console = Console()\n\n console.log(\"[PseudoLabel] Loading rankings from:\", self.rankings_with_teacher_scores)\n\n self.triples = [] # query_id pos_id neg_id pos_score neg_score\n\n with open(self.rankings_with_teacher_scores, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split('\\t') # pos_score neg_score query_text pos_text neg_text\n self.triples.append((float(ls[0]), float(ls[1]), ls[2], ls[3], ls[4]))\n\n console.log(f\"[TripleId] Done loading! Using {len(self.triples)} triples\")\n\n\n def data_loader_subprocess(self, queue):\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n random.seed(self.seed)\n\n try:\n self.load_data()\n\n query_target_count = self.batch_size # int((self.batch_size / self.clusters_per_batch))\n\n while True:\n\n main_instances = []\n\n while len(main_instances) < self.batch_size:\n\n pos_score, neg_score, q_text, pos_text, neg_text = random.choice(self.triples)\n\n if self.concatenate_sequences:\n ret_instance = {\n \"doc_pos_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(q_text, pos_text,\n self.max_query_length + self.max_doc_length)),\n \"doc_neg_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(q_text, neg_text,\n self.max_query_length + self.max_doc_length))}\n else:\n ret_instance = {\n \"query_tokens\": self.get_tokenized_query(q_text),\n \"doc_pos_tokens\": self.get_tokenized_document(pos_text),\n \"doc_neg_tokens\": self.get_tokenized_document(neg_text),\n }\n\n if self.read_with_scores:\n ret_instance[\"pos_score\"] = ArrayField(np.array(pos_score))\n ret_instance[\"neg_score\"] = ArrayField(np.array(neg_score))\n\n main_instances.append(Instance(ret_instance))\n\n if len(main_instances) == self.batch_size:\n break\n\n main_batch = Batch(main_instances)\n main_batch = main_batch.as_tensor_dict(main_batch.get_padding_lengths())\n\n queue.put((main_batch, None))\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n\n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()\n\n def get_tokenized_query(self, text):\n query_tokenized = self._tokenizer.tokenize(text, max_length=self.max_query_length)\n if query_tokenized.get('token_type_ids') is not None:\n query_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**query_tokenized)\n\n def get_tokenized_document(self, text):\n doc_tokenized = self._tokenizer.tokenize(text, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**doc_tokenized)" }, { "identifier": "TripleIdDatasetLoader", "path": "matchmaker/dataloaders/triple_id_training_loader.py", "snippet": "class TripleIdDatasetLoader():\n \"\"\"\n \n \"\"\"\n\n def __init__(\n self,\n\n query_file: str,\n collection_file: str,\n triples_with_teacher_scores: str,\n\n batch_size: int,\n\n tokenizer: Tokenizer = None,\n\n max_doc_length: int = -1,\n max_query_length: int = -1,\n\n concatenate_sequences = False,\n random_seed=42,\n ):\n\n self.query_file = query_file\n self.collection_file = collection_file\n self.triples_with_teacher_scores = triples_with_teacher_scores\n self.batch_size = batch_size\n\n self._tokenizer = tokenizer\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n if type(tokenizer) != FastTransformerTokenizer:\n raise Exception(\"only huggingface tokenizer supported\")\n\n self.read_with_scores = True\n self.concatenate_sequences = concatenate_sequences\n self.seed = random_seed\n\n def __iter__(self) -> Iterator[TensorDict]:\n \n ctx = mp.get_context(\"fork\" if \"fork\" in mp.get_all_start_methods() else \"spawn\")\n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.data_loader_subprocess, args=(queue,), daemon=True\n )\n worker.start()\n\n try:\n for batch, worker_error in iter(queue.get, (None, None)):\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n def load_data(self):\n\n console = Console()\n\n console.log(\"[TripleId] Loading rankings from:\",self.triples_with_teacher_scores)\n self.triples = [] # query_id pos_id neg_id pos_score neg_score\n\n with open(self.triples_with_teacher_scores, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split() # pos_score neg_score query_id pos_id neg_id\n self.triples.append((ls[2],ls[3],ls[4],float(ls[0]),float(ls[1])))\n\n console.log(\"[TripleId] Loading collection from:\",self.collection_file)\n self.collection = {}\n self.collection_ids = []\n with open(self.collection_file, \"r\", encoding=\"utf8\") as cf:\n for line in cf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.collection[ls[0]] = ls[1].rstrip()[:100_000]\n self.collection_ids.append(ls[0])\n\n console.log(\"[TripleId] Loading queries from:\",self.query_file)\n self.queries = {}\n with open(self.query_file, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.queries[ls[0]] = ls[1].rstrip()\n\n console.log(f\"[TripleId] Done loading! Using {len(self.triples)} triples\")\n\n def data_loader_subprocess(self, queue):\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n random.seed(self.seed)\n \n try:\n self.load_data()\n\n query_target_count = self.batch_size # int((self.batch_size / self.clusters_per_batch))\n\n while True:\n\n main_instances = []\n\n while len(main_instances) < self.batch_size:\n\n q_id,pos_id,neg_id,pos_score,neg_score = random.choice(self.triples)\n\n if self.concatenate_sequences:\n ret_instance = {\n \"doc_pos_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[pos_id],self.max_query_length + self.max_doc_length)),\n \"doc_neg_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[neg_id],self.max_query_length + self.max_doc_length))}\n else:\n ret_instance = {\n \"query_tokens\": self.get_tokenized_query(self.queries[q_id]),\n \"doc_pos_tokens\": self.get_tokenized_document(self.collection[pos_id]),\n \"doc_neg_tokens\": self.get_tokenized_document(self.collection[neg_id]),\n }\n\n if self.read_with_scores:\n ret_instance[\"pos_score\"] = ArrayField(np.array(pos_score))\n ret_instance[\"neg_score\"] = ArrayField(np.array(neg_score))\n\n main_instances.append(Instance(ret_instance))\n\n if len(main_instances) == self.batch_size:\n break\n\n main_batch = Batch(main_instances)\n main_batch = main_batch.as_tensor_dict(main_batch.get_padding_lengths())\n\n queue.put((main_batch,None))\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n \n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()\n\n def get_tokenized_query(self, text):\n query_tokenized = self._tokenizer.tokenize(text, max_length=self.max_query_length)\n if query_tokenized.get('token_type_ids') is not None:\n query_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**query_tokenized)\n\n def get_tokenized_document(self, text):\n doc_tokenized = self._tokenizer.tokenize(text, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**doc_tokenized)" }, { "identifier": "BlingFireTokenizer", "path": "matchmaker/dataloaders/bling_fire_tokenizer.py", "snippet": "class BlingFireTokenizer():\n \"\"\"\n basic tokenizer using bling fire library\n \"\"\"\n\n def tokenize(self, sentence: str) -> List[Token]:\n return [Token(t) for t in text_to_words(sentence).split()]" }, { "identifier": "FastTransformerTokenizer", "path": "matchmaker/dataloaders/transformer_tokenizer.py", "snippet": "class FastTransformerTokenizer():\n \"\"\"\n basic wrapper for an HuggingFace AutoTokenizer\n \"\"\"\n\n def __init__(self, model,add_unique_ids=False,uniqueness_type=\"lower\",create_global_id=False):\n\n if \"t5\" in model:\n self._tokenizer = T5Tokenizer.from_pretrained(model)\n # when generating, we will use the logits of right-most token to predict the next token\n # so the padding should be on the left\n self._tokenizer.padding_side = \"left\"\n self._tokenizer.pad_token = self._tokenizer.eos_token # to avoid an error\n elif \"bart\" in model:\n self._tokenizer = BartTokenizer.from_pretrained(model)\n else:\n self._tokenizer = AutoTokenizer.from_pretrained(model)\n\n self.add_unique_ids = add_unique_ids\n if self.add_unique_ids:\n self.pre_tokenzier = BertPreTokenizer()\n\n from nltk.stem.porter import PorterStemmer\n self.stemmer = PorterStemmer()\n \n self.uniqueness_type = uniqueness_type # or \"stemmed\"\n self.create_global_id = create_global_id\n\n self.stem_cache = {}\n\n def tokenize(self, sentence: str, sentence2: str = None, max_length: int = 512, padding=False, random_spans=False):\n if sentence2 != None:\n seq_tokenized = self._tokenizer(sentence, sentence2,\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n return_attention_mask=True,\n padding=\"max_length\" if padding else False)\n\n else:\n if random_spans:\n sentences = nltk.sent_tokenize(sentence)\n sentence_ids = list(range(len(sentences)))\n random.shuffle(sentence_ids)\n sent_length = 0\n sentence = ''\n for id in sentence_ids:\n sent = sentences[id]\n if len(sent.split(' ')) + sent_length < 512:\n sentence = sentence + sent\n sent_length = len(sent.split(' '))\n\n seq_tokenized = self._tokenizer(sentence,\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n return_attention_mask=True,\n padding=\"max_length\" if padding else False)\n else:\n seq_tokenized = self._tokenizer(sentence,\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n return_attention_mask=True,\n padding=\"max_length\" if padding else False)\n\n #\n # only used for ColBERTer model\n #\n if self.add_unique_ids:\n\n seq_tokenized.data[\"unique_input_ids\"] = torch.unique(seq_tokenized.data[\"input_ids\"])\n \n # these are the wordpiece-subwords\n tf_offsets = seq_tokenized.encodings[0].offsets\n\n # these are the whole-word offsets (subwords are not split yet), but it uses the exact same splitting mechanism\n whole_word_offsets = self.pre_tokenzier.pre_tokenize_str(sentence)\n\n # create unique_token_dict\n whole_word_unique = {}\n for i,(tok,offsets) in enumerate(whole_word_offsets):\n if self.uniqueness_type == \"stemmed\":\n lower_tok = tok.lower()\n if lower_tok not in self.stem_cache:\n tok_transformed = self.stemmer.stem(lower_tok)\n self.stem_cache[lower_tok] = tok_transformed\n else:\n tok_transformed = self.stem_cache[lower_tok]\n else:\n tok_transformed = tok.lower()\n\n whole_word_offsets[i] = (tok_transformed,offsets)\n \n if tok_transformed not in whole_word_unique:\n if self.create_global_id:\n hashed = int.from_bytes(hashlib.sha256(tok_transformed.encode('utf-8')).digest()[:4], 'little', signed=False) # 32-bit int\n # 0 is a reserved id for padding, don't think this will happen often though\n if hashed == 0:\n hashed = 1\n \n if hashed < 0 or hashed > 4294967295:\n #if hashed < -2147483648 or hashed > 2147483647:\n print(\"Warning: hash value is too large, will be truncated to 32-bit int\")\n whole_word_unique[tok_transformed] = hashed\n else:\n whole_word_unique[tok_transformed] = len(whole_word_unique) + 1\n\n # map tf_offsets to whole_word_unique\n tf_input_ids_to_whole_word_unique_map = torch.zeros_like(seq_tokenized.data[\"input_ids\"])\n for i,tf_offset in enumerate(tf_offsets[1:-1]): # ignore special tokens\n for whole_word_token,whole_word_offset in whole_word_offsets:\n if tf_offset[0] >= whole_word_offset[0] and tf_offset[1] <= whole_word_offset[1]:\n tf_input_ids_to_whole_word_unique_map[0][i+1] = whole_word_unique[whole_word_token]\n break\n \n # if the tokenizer cuts off the sequence, we might have some tokens that are in the pre-tokenizer, but not mapped\n # because they only appear in the end and where cut -> in this case we just remove them also from the unique list\n # as the main tokenizer is the main anchor point\n skipped_whole_word =[]\n for tok,i in whole_word_unique.items():\n if i not in tf_input_ids_to_whole_word_unique_map[0]:\n skipped_whole_word.append(tok)\n for tok in skipped_whole_word:\n del whole_word_unique[tok]\n\n #\n # this is just sanity checking to make sure that the mapping is correct\n #\n #if (tf_input_ids_to_whole_word_unique_map[0][1:-1] == 0).any():\n # missing_ids = seq_tokenized.data[\"input_ids\"][0][1:-1][tf_input_ids_to_whole_word_unique_map[0][1:-1] == 0]\n # missing_toks = self._tokenizer.convert_ids_to_tokens(missing_ids)\n # if not (len(set(missing_toks)) <= 2 and ((set(missing_toks) == set([\"[PAD]\", \"[SEP]\"])) or missing_toks[0] == \"[PAD]\")):\n # print(\"WARNING: some tokens were not found in the whole_word dictionary\",missing_toks,\"in sentence:\", sentence, \"with offset:\", whole_word_offsets,\"unique_words\", whole_word_unique)\n\n seq_tokenized.data[\"input_ids_to_words_map\"] = tf_input_ids_to_whole_word_unique_map\n seq_tokenized.data[\"unique_words\"] = torch.from_numpy(numpy.array(list(whole_word_unique.values()),dtype=numpy.int64)).unsqueeze(0)\n\n for _, d in seq_tokenized.data.items():\n d.squeeze_(0)\n return seq_tokenized.data" }, { "identifier": "PretrainedBertIndexerNoSpecialTokens", "path": "matchmaker/modules/bert_embedding_token_embedder.py", "snippet": "class PretrainedBertIndexerNoSpecialTokens(PretrainedTransformerIndexer):\n\n \"\"\"\n A ``TokenIndexer`` corresponding to a pretrained BERT model.\n Parameters\n ----------\n pretrained_model: ``str``\n Either the name of the pretrained model to use (e.g. 'bert-base-uncased'),\n or the path to the .txt file with its vocabulary.\n If the name is a key in the list of pretrained models at\n https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/tokenization.py#L33\n the corresponding path will be used; otherwise it will be interpreted as a path or URL.\n use_starting_offsets: bool, optional (default: False)\n By default, the \"offsets\" created by the token indexer correspond to the\n last wordpiece in each word. If ``use_starting_offsets`` is specified,\n they will instead correspond to the first wordpiece in each word.\n do_lowercase: ``bool``, optional (default = True)\n Whether to lowercase the tokens before converting to wordpiece ids.\n never_lowercase: ``List[str]``, optional\n Tokens that should never be lowercased. Default is\n ['[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]'].\n max_pieces: int, optional (default: 512)\n The BERT embedder uses positional embeddings and so has a corresponding\n maximum length for its input ids. Any inputs longer than this will\n either be truncated (default), or be split apart and batched using a\n sliding window.\n truncate_long_sequences : ``bool``, optional (default=``True``)\n By default, long sequences will be truncated to the maximum sequence\n length. Otherwise, they will be split apart and batched using a\n sliding window.\n \"\"\"\n\n def __init__(\n self,\n pretrained_model: str,\n use_starting_offsets: bool = False,\n do_lowercase: bool = True,\n never_lowercase: List[str] = None,\n max_pieces: int = 512,\n truncate_long_sequences: bool = True,\n ) -> None:\n\n bert_tokenizer = PretrainedTransformerTokenizer(pretrained_model, do_lower_case=do_lowercase)\n super().__init__(\n vocab=bert_tokenizer.vocab,\n wordpiece_tokenizer=bert_tokenizer.wordpiece_tokenizer.tokenize,\n namespace=\"bert\",\n use_starting_offsets=use_starting_offsets,\n max_pieces=max_pieces,\n do_lowercase=do_lowercase,\n never_lowercase=never_lowercase,\n start_tokens=[],\n end_tokens=[],\n separator_token=\"[SEP]\",\n truncate_long_sequences=truncate_long_sequences,\n )\n\n def __eq__(self, other):\n if isinstance(other, PretrainedBertIndexerNoSpecialTokens):\n for key in self.__dict__:\n if key == \"wordpiece_tokenizer\":\n # This is a reference to a function in the huggingface code, which we can't\n # really modify to make this clean. So we special-case it.\n continue\n if self.__dict__[key] != other.__dict__[key]:\n return False\n return True\n return NotImplemented" } ]
import torch import numpy import random import torch.multiprocessing as mp from allennlp.data.samplers import BucketBatchSampler, MaxTokensBatchSampler from allennlp.data.vocabulary import Vocabulary from allennlp.data.data_loaders import MultiProcessDataLoader from transformers import T5Tokenizer from allennlp.data.token_indexers import PretrainedTransformerIndexer from allennlp.data.tokenizers import PretrainedTransformerTokenizer from matchmaker.dataloaders.concatenated_reranking_loader import * from matchmaker.dataloaders.concatenated_training_loader import * from matchmaker.dataloaders.independent_reranking_loader import * from matchmaker.dataloaders.independent_training_loader import * from matchmaker.dataloaders.id_sequence_loader import * from matchmaker.dataloaders.mlm_masked_sequence_loader import * from matchmaker.dataloaders.query_generation_inference_loader import ConditionalQueryGenerationInferenceReader from matchmaker.dataloaders.tas_balanced_training_loader import * from matchmaker.dataloaders.pseudo_label_training_loader import PseudoLabelDatasetLoader, PseudoLabelTextDatasetLoader from matchmaker.dataloaders.triple_id_training_loader import TripleIdDatasetLoader from transformers import AutoTokenizer from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer from matchmaker.dataloaders.transformer_tokenizer import FastTransformerTokenizer from matchmaker.modules.bert_embedding_token_embedder import PretrainedBertIndexerNoSpecialTokens from typing import Dict, Tuple, List
12,133
batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "triple_ids": loader = TripleIdDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], triples_with_teacher_scores=run_config["dynamic_triples_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"] reader = ConditionalQueryGenerationInferenceReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=max_length, target_distribution_file=run_config["target_distribution_file"], target_number_of_queries_total=run_config["target_number_of_queries_total"]) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def _get_indexer(model_config, max_length): # default values _tokenizer = BlingFireTokenizer() _vocab = Vocabulary() if model_config["token_embedder_type"] == "embedding": _token_indexers = {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)} _vocab = Vocabulary.from_files(model_config["vocab_directory"]) elif model_config["token_embedder_type"] == "bert_embedding" or model_config["token_embedder_type"] == "bert_vectors": _tokenizer = PretrainedTransformerTokenizer(model_config["bert_pretrained_model"], do_lowercase=True, start_tokens=[], end_tokens=[])
#from tokenizers import ByteLevelBPETokenizer,CharBPETokenizer #from matchmaker.dataloaders.transformer_tokenizer import CustomTransformerTokenizer,CustomTransformerIndexer mp.set_sharing_strategy("file_system") # VERY MUCH needed for linux !! makes everything faster, but tends to break stuff def allennlp_single_sequence_loader(model_config, run_config, _input_file, sequence_type, force_exact_batch_size=False): ''' Load examples from a .tsv file in the single sequence format: id<tab>text (Using allennlp's v2 multiprocess loader) ''' if model_config.get("model_input_type", "") == "mlm": sequence_type == "single_mlm" if sequence_type == "query": max_length = run_config.get("overwrite_max_query_length", model_config["max_query_length"]) min_length = model_config.get("min_query_length",-1) batch_size = run_config["query_batch_size"] split_document=False split_document_window_size=-1 if sequence_type == "single_mlm": max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length", -1) batch_size = run_config.get("collection_batch_size", run_config["batch_size_train"]) make_multiple_of=run_config.get("make_multiple_of",8) mask_probability=run_config.get("mask_probability",0.1) mlm_mask_replace_probability=run_config.get("mlm_mask_replace_probability",0.5) mlm_mask_random_probability=run_config.get("mlm_mask_random_probability",0.5) else: # doc max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length",-1) batch_size = run_config["collection_batch_size"] split_document=run_config.get("split_document",False) split_document_window_size=run_config.get("split_document_window_size",-1) _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max_length) #if model_config.get("model_input_type", "") == "mlm": # reader = MLMMaskedSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=max_length, min_doc_length=min_length, # mask_probability=mask_probability, # mlm_mask_replace_probability=mlm_mask_replace_probability, # mlm_mask_random_probability=mlm_mask_random_probability, # make_multiple_of=make_multiple_of) reader = IdSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, split_document=split_document,split_document_window_size=split_document_window_size, max_seq_length=max_length, min_seq_length=min_length, sequence_type=sequence_type) if force_exact_batch_size: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=int(batch_size)) else: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["seq_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_triple_training_loader(model_config, run_config, _input_file,add_text_to_batch=False): ''' Load training examples (either in the re-ranking text file format or a dynamic loader) (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if run_config.get("dynamic_sampler", False) == False: if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) else: reader = IndependentTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], query_augment_mask_number=run_config["query_augment_mask_number"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) else: #if run_config["dynamic_sampler_type"] == "list": # loader = IrDynamicTripleDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], # qrels_file=run_config["dynamic_qrels_file"], candidate_file=run_config["dynamic_candidate_file"], # batch_size=int(run_config["batch_size_train"]), queries_per_batch=run_config["dynamic_queries_per_batch"], tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], # min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], # data_augment=run_config["train_data_augment"], vocab=_vocab) if run_config["dynamic_sampler_type"] == "tas_balanced": loader = TASBalancedDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], pairs_with_teacher_scores=run_config["dynamic_pairs_with_teacher_scores"], query_cluster_file=run_config["dynamic_query_cluster_file"], batch_size=int(run_config["batch_size_train"]), clusters_per_batch=run_config["dynamic_clusters_per_batch"], tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], pair_balancing_strategy=run_config["tas_balanced_pair_strategy"],random_seed =run_config["random_seed"]) elif run_config["dynamic_sampler_type"] == "pseudo_label": loader = PseudoLabelDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], selection_type=run_config["pseudo_label_selection_type"],min_pos_score=run_config["pseudo_label_min_pos_score"], max_diff_to_be_pos=run_config["pseudo_label_max_diff_to_be_pos"],min_diff_to_neg=run_config["pseudo_label_min_diff_to_neg"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "pseudo_labeltext": loader = PseudoLabelTextDatasetLoader(rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "triple_ids": loader = TripleIdDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], triples_with_teacher_scores=run_config["dynamic_triples_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"] reader = ConditionalQueryGenerationInferenceReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=max_length, target_distribution_file=run_config["target_distribution_file"], target_number_of_queries_total=run_config["target_number_of_queries_total"]) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def _get_indexer(model_config, max_length): # default values _tokenizer = BlingFireTokenizer() _vocab = Vocabulary() if model_config["token_embedder_type"] == "embedding": _token_indexers = {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)} _vocab = Vocabulary.from_files(model_config["vocab_directory"]) elif model_config["token_embedder_type"] == "bert_embedding" or model_config["token_embedder_type"] == "bert_vectors": _tokenizer = PretrainedTransformerTokenizer(model_config["bert_pretrained_model"], do_lowercase=True, start_tokens=[], end_tokens=[])
_ind = PretrainedBertIndexerNoSpecialTokens(pretrained_model=model_config["bert_pretrained_model"], do_lowercase=True, max_pieces=max_length)
6
2023-11-21 10:38:22+00:00
16k
MICLab-Unicamp/medpseg
medpseg/poly_pipeline.py
[ { "identifier": "PolySeg2DModule", "path": "medpseg/poly_seg_2d_module.py", "snippet": "class PolySeg2DModule(pl.LightningModule):\n '''\n Regarding of the name, also works with 3D networks\n '''\n def __init__(self, hparams):\n '''\n Check starter.py for description of all hparams\n '''\n super().__init__()\n self.save_hyperparameters(hparams)\n\n ####### Hyperparameters used during development, ignore this its confusing #######\n self.pretraining = self.hparams.pretraining\n self.findings_only = getattr(self.hparams, \"findings_only\", False)\n self.weight_decay = getattr(self.hparams, \"weight_decay\", None)\n self.scheduling_factor = getattr(self.hparams, \"scheduling_factor\", None)\n self.scheduling = getattr(self.hparams, \"scheduling\", \"step\")\n self.scratch = getattr(self.hparams, \"scratch\", False)\n self.expand_bifpn = getattr(self.hparams, \"expand_bifpn\", \"conv\")\n self.backbone = getattr(self.hparams, \"backbone\", \"effnet\")\n self.val_3d = getattr(self.hparams, \"val_3d\", False)\n self.gdl = getattr(self.hparams, \"gdl\", False)\n self.bdl = getattr(self.hparams, \"bdl\", False)\n self.focal = getattr(self.hparams, \"focal\", False)\n self.atmbranch = getattr(self.hparams, \"atmbranch\", None)\n self.vesselbranch = getattr(self.hparams, \"vesselbranch\", None)\n self.recbranch = getattr(self.hparams, \"recbranch\", None)\n self.include_bg = getattr(self.hparams, \"include_background\", False)\n self.unet = getattr(self.hparams, \"unet\", False)\n self.unettr = getattr(self.hparams, \"unettr\", False)\n self.poly_level = getattr(self.hparams, \"poly_level\", None)\n self.flag_3d_metric = '_3d' if self.val_3d or self.unettr else ''\n self.excluded_average_metric_keys = [\"volume_similarity\", \"avg_hd\", \"hd\"]\n self.downstream_method = getattr(self.hparams, \"downstream_method\", None)\n self.perceptual_loss = getattr(self.hparams, \"perceptual_loss\", False)\n self.stem_replacement = getattr(self.hparams, \"stem_replacement\", False)\n self.new_latent_space = getattr(self.hparams, \"new_latent_space\", False)\n self.compound_coef = getattr(self.hparams, \"compound_coef\", 4)\n self.consistency = getattr(self.hparams, \"consistency\", False)\n self.imnet_norm = getattr(self.hparams, \"imnet_norm\", False)\n self.learnable_norm = getattr(self.hparams, \"learnable_norm\", False)\n self.circulatory_branch = getattr(self.hparams, \"circulatory_branch\", None)\n self.bifpn_channels = getattr(self.hparams, \"bifpn_channels\", 128)\n self.combined_loss = getattr(self.hparams, \"combined_loss\", False)\n self.sam = getattr(self.hparams, \"sam\", False)\n self.freeze_encoder = getattr(self.hparams, \"freeze_encoder\", False)\n self.batchfy_e2d = getattr(self.hparams, \"batchfy_e2d\", False)\n self.circulatory_regularization = getattr(self.hparams, \"circulatory_regularization\", False)\n self.medseg3d = getattr(self.hparams, \"medseg3d\", False)\n self.fpn_c = getattr(self.hparams, \"fpn_c\", None)\n # Post ATS ideas\n self.soft_circulatory = getattr(self.hparams, \"soft_circulatory\", False)\n self.poi_loss = getattr(self.hparams, \"poi_loss\", False)\n self.nrdice_loss = getattr(self.hparams, \"nrdice_loss\", False)\n self.polyunet25d = getattr(self.hparams, \"polyunet25d\", False)\n self.polyunet3d = getattr(self.hparams, \"polyunet3d\", False)\n self.mccl = getattr(self.hparams, \"mccl\", False)\n self.tversky = getattr(self.hparams, \"tversky\", False)\n self.airway_ths = getattr(self.hparams, \"airway_ths\", 0.5)\n self.vessel_ths = getattr(self.hparams, \"vessel_ths\", 0.5)\n self.self_attention = getattr(self.hparams, \"self_attention\", False)\n self.deep_supervision = getattr(self.hparams, \"deep_supervision\", False)\n self.con_detect = getattr(self.hparams, \"con_detect\", False)\n self.celoss = getattr(self.hparams, \"celoss\", False)\n self.large = getattr(self.hparams, \"large\", False)\n self.combined_gdl = getattr(self.hparams, \"combined_gdl\", False)\n self.full_silver = getattr(self.hparams, \"preprocess\", '') == \"full_silver_poly_3levels_circulatory\"\n if self.full_silver:\n print(\"Full silver mode detected, every item on batch must be fullsilver preprocess\")\n ####### Hyperparameters used during development, ignore this its confusing #######\n\n # Determine offset for polymorphic labels depending on poly level\n # Poly level:\n # None: supervised training only\n # 0: self supervised only\n # 2: lung -> unhealthy/healthy\n # 3: unhealthy -> GGO/CON\n self.nlossterms = 0\n if self.poly_level == 3: # Previous logic for this was wrong, changing to count from beginning\n self.simple_offset = 2 # BG + Lung\n self.detailed_offset = 3 # BG + Healthy + Unhealthy\n else:\n self.simple_offset = 2 # BG + Lung\n self.detailed_offset = None # Not present if not poly_level 3\n\n # Redundant argument necessary to not tie module to data preprocessing\n if \"poly_3levels\" in self.hparams.preprocess:\n assert self.poly_level == 3 or self.poly_level == 2\n\n self.two5d = True\n self.model = MEDSeg(self.hparams.nin, self.hparams.seg_nout, apply_sigmoid=False, backbone=self.backbone, expand_bifpn=self.expand_bifpn, pretrained=not self.scratch,\n num_classes_atm=self.atmbranch, num_classes_vessel=self.vesselbranch, num_classes_rec=self.recbranch, stem_replacement=self.stem_replacement, new_latent_space=self.new_latent_space,\n compound_coef=self.compound_coef, imnet_norm=self.imnet_norm, learnable_norm=self.learnable_norm, circulatory_branch=self.circulatory_branch,\n bifpn_channels=self.bifpn_channels, sam_embedding=self.sam, self_attention=self.self_attention, deep_supervision=self.deep_supervision,\n con_detecting=self.con_detect, large=self.large, soft_circulatory=self.soft_circulatory)\n \n self.pretrained_weights = self.hparams.pretrained_weights\n if self.pretrained_weights is not None:\n print(f\"Loading pretrained weights from {self.pretrained_weights}\")\n self.model = PolySeg2DModule.load_from_checkpoint(self.pretrained_weights).model\n\n # Supervised loss\n assert (not(self.combined_loss) or not(self.nrdice_loss)) and (not(self.combined_loss) or not(self.mccl)) and (not(self.nrdice_loss) or not(self.mccl)), \"Cant do combined loss and nrdice loss or combined loss and mccl at the same time\"\n \n if self.combined_loss:\n print(\"Combined Loss\")\n self.lossfn = CombinedLoss(include_background=self.include_bg, cross_entropy=self.celoss, gdl=self.combined_gdl, soft_circulatory=self.soft_circulatory)\n self.dicer = DICEMetric(per_channel_metric=True, check_bounds=False)\n\n print('-'*100 + \n f\"\\nPoly2D Module in the following configuration:\"\n f\"\\npoly_level: {self.poly_level} soft_circulatory: {self.soft_circulatory}\"\n f\"\\nnin: {self.hparams.nin} main_nout: {self.hparams.seg_nout}, DS: {self.deep_supervision}, SA: {self.self_attention}\"\n f\"\\nMEDSeg 3D? {self.medseg3d}\\n\" +\n '-'*100)\n\n def save_pt_model(self, path):\n torch.save(self.model.state_dict(), path)\n\n def load_pt_model(self, path):\n self.model.load_state_dict(torch.load(path))\n\n def visual_debug(self, x, y, label):\n pass\n\n def forward(self, x, stacking=False):\n if self.val_3d and not self.training and not stacking: # either training, or bein in val_3d or stacking flag avoids this branch and...\n return real_time_stack_predict(self, x, self.hparams.eval_batch_size, extended_2d=self.hparams.extended_2d, num_workers=self.hparams.nworkers, device=torch.device(\"cpu\") if self.hparams.cpu else x.device)\n else: # ...we return direct slice activations\n y_hat = self.model(x) \n if isinstance(y_hat, dict):\n for k in y_hat.keys():\n if 'atm' in k or 'vessel' in k:\n if self.soft_circulatory:\n y_hat[k] = y_hat[k].softmax(dim=1) \n else:\n y_hat[k] = y_hat[k].sigmoid()\n elif 'main' in k:\n y_hat[k] = y_hat[k].softmax(dim=1)\n else:\n raise ValueError(f\"Unexpected key in MEDSeg return: {k}\")\n if self.hparams.debug and not stacking:\n print(\"y_hat state:\")\n for k, v in y_hat.items():\n print(f\"{k}: {v.shape}\")\n else:\n y_hat = y_hat.softmax(dim=1)\n if self.hparams.debug and not stacking:\n print(f\"y_hat state: {y_hat.shape}\")\n \n return y_hat\n\n # Main branch forms ##################################\n def simple_level(self, y_hat, y, simple, ds, do_loss):\n '''\n Where we train on lung masks only. \n '''\n if self.full_silver and self.training:\n raise RuntimeError(\"Shouldn't be running simple_level on full_silver\")\n \n if isinstance(y_hat, dict):\n lung = y_hat[\"main\"][simple, 1:].sum(dim=1, keepdim=True) # lung is everything after bg summed\n y_hat_simple = torch.cat([y_hat[\"main\"][simple, :1], lung], dim=1) # 2 channel bg + lung on simple cases\n else:\n lung = y_hat[simple, 1:].sum(dim=1, keepdim=True) # lung is everything after bg summed\n y_hat_simple = torch.cat([y_hat[simple, :1], lung], dim=1) # bg + lung on simple cases\n \n # WANING: Boundary Loss deprecated, no significant difference shown \n if self.simple_offset is None: # poly simplification removes unhealthy label\n y_simple = y[simple] \n else:\n y_simple = y[simple, :self.simple_offset] \n NS = y_simple.shape[0]\n \n # Loss can be disabled to accelerate validation\n if do_loss:\n simple_loss = self.lossfn(y_hat_simple, y_simple)\n else:\n simple_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n y_hat_simple_argmax = y_hat_simple.argmax(dim=1, keepdim=True)\n y_hat_lung = y_hat_simple_argmax == 1\n for ns in range(NS):\n struct_names = [\"lung\"]\n seg_metrics(gts=y_simple[ns, 1:2].cpu().numpy().astype(np.uint8), preds=y_hat_lung.detach().cpu().numpy().astype(np.uint8),\n metrics=self.metrics, struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n raise NotImplementedError(\"2D validation for simplified level not implemented\")\n \n return simple_loss\n\n def detailed_level(self, y_hat, y, detailed, ds, do_loss):\n '''\n Where we train on Healthy/Unhealthy masks\n Still supports old 2.5D validation metrics do pretraining project\n '''\n if self.full_silver and self.training:\n raise RuntimeError(\"Shouldn't be running detailed_level on full_silver\")\n \n if isinstance(y_hat, dict): \n if self.poly_level == 3: # if we have ggo and con outputs, reduce then\n unhealthy = y_hat[\"main\"][detailed, 2:].sum(dim=1, keepdim=True) # GGO + CON = unhealthy\n y_hat_detailed = torch.cat([y_hat[\"main\"][detailed, :2], unhealthy], dim=1) # Concating BG, Healthy with unhealthy\n else:\n y_hat_detailed = y_hat[\"main\"][detailed]\n else:\n if self.poly_level == 3: # if we have ggo and con outputs, reduce then\n unhealthy = y_hat[detailed, 2:].sum(dim=1, keepdim=True) # GGO + CON = unhealthy\n y_hat_detailed = torch.cat([y_hat[detailed, :2], unhealthy], dim=1) # Concating BG, Healthy with unhealthy\n else:\n y_hat_detailed = y_hat[detailed]\n \n # Logic to separate concatenations on x and y. Kind of complicated\n # Although boundary loss is implemented, early experiments showed it not being signifcantly better so, deprecated.\n if self.detailed_offset is None:\n y_detailed = y[detailed]\n else:\n y_detailed = y[detailed, :self.detailed_offset] \n ND = y_detailed.shape[0]\n\n # Loss can be disabled to accelerate validation\n if do_loss:\n detailed_loss = self.lossfn(y_hat_detailed, y_detailed)\n else:\n detailed_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n y_hat_detailed_argmax = y_hat_detailed.argmax(dim=1, keepdim=True)\n y_hat_detailed = torch.cat((y_hat_detailed_argmax == 1, y_hat_detailed_argmax == 2), dim=1)\n for nd in range(ND):\n struct_names = [\"healthy\", \"unhealthy\"]\n seg_metrics(gts=y_detailed[nd, 1:3].cpu().numpy().astype(np.uint8), preds=y_hat_detailed[nd, :2].detach().cpu().numpy().astype(np.uint8),\n metrics=self.metrics, struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n healthy_metric, unhealthy_metric = self.dicer(y_hat_detailed[:, 1:3], y_detailed[:, 1:3])\n self.log(\"healthy_dice\", healthy_metric, on_epoch=True, on_step=False, prog_bar=False)\n self.log(\"unhealthy_dice\", unhealthy_metric, on_epoch=True, on_step=False, prog_bar=False)\n\n return detailed_loss\n\n def separation_level(self, y_hat, y, separation, ds, do_loss):\n '''\n Where we train on separating GGO and Consolidations \n (semi-supervised through threshold + unhealthy label)\n\n One day might be manual labels too\n '''\n if isinstance(y_hat, dict):\n y_hat_separation = y_hat[\"main\"][separation][:, :4]\n else:\n y_hat_separation = y_hat[separation][:, :4]\n\n y_separation = y[separation][:, :4]\n ND = y_separation.shape[0]\n\n # Loss can be disabled to accelerate validation\n if do_loss:\n separation_loss = self.lossfn(y_hat_separation, y_separation)\n else:\n separation_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n y_hat_separation_argmax = y_hat_separation.argmax(dim=1, keepdim=True)\n y_hat_separation = torch.cat((y_hat_separation_argmax == 2, y_hat_separation_argmax == 3), dim=1)\n for nd in range(ND):\n struct_names = [\"ggo\", \"con\"]\n seg_metrics(gts=y_separation[nd, 2:4].cpu().numpy().astype(np.uint8), preds=y_hat_separation[nd, :2].detach().cpu().numpy().astype(np.uint8),\n metrics=self.metrics, struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n\n return separation_loss\n ####################################################\n\n # ATM branch computations\n def atm_branch(self, y_hat, y, atm, ds, do_loss):\n '''\n where we optimize atm parts of the batch, binary label\n '''\n if self.full_silver and self.training:\n if self.soft_circulatory:\n bg = torch.ones_like(y[atm, 5:6]) - y[atm, 5:6]\n y_airway = torch.cat([bg, y[atm, 5:6]], dim=1)\n y_hat_airway = y_hat[\"atm\"][atm, :2] \n else:\n raise RuntimeError(\"Why are you running full_silver without SoftCirculatory\")\n else:\n if self.soft_circulatory:\n y_airway = y[atm, :2] # Taking one hot map\n y_hat_airway = y_hat[\"atm\"][atm, :2] # output has 2 channels\n else:\n y_airway = y[atm, 1:2] # 0 is BG, taking binary airway map\n y_hat_airway = y_hat[\"atm\"][atm, :1] # output has only 1 channel\n NS = y_airway.shape[0] # nsamples\n \n # Loss can be disabled to accelerate validation\n if do_loss:\n atm_loss = self.lossfn(y_hat_airway, y_airway)\n else:\n atm_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n # Making sure to get the correct activation when softmax (soft_circulatory) is turned on.\n if self.soft_circulatory:\n # Note that this is already 0 and 1 after argmax\n binary_y_hat_airway = y_hat_airway.detach().argmax(dim=1, keepdim=True).cpu().numpy().astype(np.uint8)\n binary_y_airway = y_airway[:, 1:2].cpu().numpy().astype(np.uint8)\n else:\n # Split sigmoid on THS\n binary_y_hat_airway = (y_hat_airway.detach() > self.airway_ths).cpu().numpy().astype(np.uint8)\n binary_y_airway = y_airway[:, 0:1].cpu().numpy().astype(np.uint8)\n assert binary_y_hat_airway.shape[1] == 1 and binary_y_hat_airway.max() <= 1\n\n for ns in range(NS):\n struct_names = [\"airway\"]\n seg_metrics(gts=binary_y_airway[ns], \n preds=binary_y_hat_airway[ns],\n metrics=self.metrics, \n struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n raise NotImplementedError(\"2D validation for atm not implemented\")\n \n return atm_loss\n\n # Vessel branch computations\n def vessel_branch(self, y_hat, y, vessel, ds, do_loss):\n '''\n where we optimize atm parts of the batch\n '''\n '''\n where we optimize atm parts of the batch, binary label\n '''\n if self.full_silver and self.training:\n if self.soft_circulatory:\n bg = torch.ones_like(y[vessel, 4:5]) - y[vessel, 4:5]\n y_vessel = torch.cat([bg, y[vessel, 4:5]], dim=1)\n y_hat_vessel = y_hat[\"vessel\"][vessel, :2] \n else:\n raise RuntimeError(\"Why are you running full_silver without SoftCirculatory\")\n else:\n if self.soft_circulatory:\n y_vessel = y[vessel, :2] # Taking one hot map\n y_hat_vessel = y_hat[\"vessel\"][vessel, :2] # output has 2 channels\n else:\n y_vessel = y[vessel, 1:2] # 0 is BG, taking binary airway map\n y_hat_vessel = y_hat[\"vessel\"][vessel, :1] # output has only 1 channel\n \n NS = y_vessel.shape[0] # nsamples\n \n # Loss can be disabled to accelerate validation\n if do_loss:\n vessel_loss = self.lossfn(y_hat_vessel, y_vessel)\n else:\n vessel_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n # Making sure to get the correct activation when softmax (soft_circulatory) is turned on.\n if self.soft_circulatory:\n # Note that this is already 0 and 1 after argmax\n binary_y_hat_vessel = y_hat_vessel.detach().argmax(dim=1, keepdim=True).cpu().numpy().astype(np.uint8)\n binary_y_vessel = y_vessel[:, 1:2].cpu().numpy().astype(np.uint8)\n else:\n # Split sigmoid on THS\n binary_y_hat_vessel = (y_hat_vessel.detach() > self.vessel_ths).cpu().numpy().astype(np.uint8)\n binary_y_vessel = y_vessel[:, 0:1].cpu().numpy().astype(np.uint8)\n assert binary_y_hat_vessel.shape[1] == 1 and binary_y_hat_vessel.max() <= 1\n\n for ns in range(NS):\n struct_names = [\"vessel\"]\n seg_metrics(gts=binary_y_vessel[ns], \n preds=binary_y_hat_vessel[ns],\n metrics=self.metrics, \n struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n raise NotImplementedError(\"2D validation for vessel not implemented\")\n \n return vessel_loss\n\n def debug_batch(self, simple, detailed, separation, atm, vessel, y, meta):\n if self.hparams.debug:\n print(f\"Training? {self.training}\")\n print(\"Simple\")\n print(simple)\n print(\"Detailed\")\n print(detailed)\n print(\"Separation\")\n print(separation)\n print(\"ATM\")\n print(atm)\n print(\"Vessel (parse)\")\n print(vessel)\n \n # Assuming B, C, ... format\n preprocess = meta[\"preprocess\"]\n import matplotlib.pyplot as plt\n for i, y_item in enumerate(y):\n item_preprocess = preprocess[i]\n print(y_item.max())\n display_buffer = y_item.cpu().argmax(dim=0).numpy()\n print(display_buffer.max())\n print(f\"Display buffer: {display_buffer.shape}\")\n if os.getenv(\"NSLOTS\") is None:\n if len(display_buffer.shape) == 3:\n pass\n else:\n plt.title(f\"Batch target {i} preprocess {item_preprocess}\")\n plt.imshow(display_buffer)\n plt.show()\n\n def deep_supervision_fn(self, \n loss_fn: Callable, \n key: str, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n y: torch.Tensor, \n index: np.ndarray, \n do_loss: bool):\n loss_acum = []\n \n for i in range(1, 5):\n current_size = (y_hat[key].shape[-2], y_hat[key].shape[-1])\n current_size = (current_size[0]//(2**(i)), current_size[1]//(2**(i)))\n \n transform = Resize(current_size, interpolation=InterpolationMode.NEAREST)\n \n # Craft prediction and target for deep supervision outputs\n new_y_hat = {}\n\n if key == \"main\":\n new_y_hat[key] = y_hat[f\"{key}{i}\"]\n elif key == \"vessel\" or key == \"atm\":\n new_y_hat[key] = y_hat[f\"{key}{i}\"]\n else:\n raise ValueError(f\"Key {key} not valid\")\n\n new_y = transform(y)\n loss = loss_fn(new_y_hat, new_y, index, True, do_loss)\n\n loss_acum.append(loss)\n\n return loss_acum\n\n def compute_loss(self, \n loss_fn: Callable, \n key: str, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n y: torch.Tensor, \n index: np.ndarray, \n do_loss: bool, \n deep_supervision: bool):\n if index.sum() > 0:\n loss = loss_fn(y_hat, y, index, False, do_loss)\n if deep_supervision and self.training:\n loss_acum = self.deep_supervision_fn(loss_fn, key, y_hat, y, index, do_loss)\n # Due to observing good results with only high resolution loss in poly, bumping high resolution weight in optimization\n # To 0.75, with rest of DS contributing to 0.25 of optimization\n loss = ((2**-1)+(2**-2))*loss + (2**-3)*loss_acum[0] + (2**-4)*loss_acum[1] + (2**-5)*loss_acum[2] + (2**-6)*loss_acum[3]\n for i in range(5):\n self.log(f\"{loss_fn.__name__}_deep_supervision_{i}\", loss if i == 0 else loss_acum[i-1], prog_bar=False, on_step=True, on_epoch=True)\n else:\n loss = 0\n\n return loss\n\n def loss_wrapper(self, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n y: torch.Tensor, \n indexes: Dict[str, np.ndarray], \n do_loss: bool, \n deep_supervision: bool):\n simple, detailed, separation, atm, vessel = indexes[\"simple\"], indexes[\"detailed\"], indexes[\"separation\"], indexes[\"atm\"], indexes[\"vessel\"]\n\n simple_loss = self.compute_loss(self.simple_level, \"main\", y_hat, y, simple, do_loss, deep_supervision)\n detailed_loss = self.compute_loss(self.detailed_level, \"main\", y_hat, y, detailed, do_loss, deep_supervision)\n separation_loss = self.compute_loss(self.separation_level, \"main\", y_hat, y, separation, do_loss, deep_supervision)\n atm_loss = self.compute_loss(self.atm_branch, \"atm\", y_hat, y, atm, do_loss, deep_supervision)\n vessel_loss = self.compute_loss(self.vessel_branch, \"vessel\", y_hat, y, vessel, do_loss, deep_supervision)\n\n if do_loss and simple_loss == 0 and detailed_loss == 0 and atm_loss == 0 and separation_loss == 0 and vessel_loss == 0:\n print(\">>>>>>>>>>>>>WARNING: Malformed batch, didn't find any level of polymorphism!<<<<<<<<<<<<<\")\n\n return simple_loss, detailed_loss, separation_loss, atm_loss, vessel_loss\n\n def polymorphic_loss_metrics(self, \n y: torch.Tensor, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n meta: Dict[str, List[str]], \n do_loss: bool = True):\n '''\n ####### Polymorphic training #############\n # Indexes whole batch and perform loss computations separately\n '''\n detailed = np.logical_or(np.logical_or(np.logical_or(np.logical_or(np.array(meta[\"preprocess\"]) == \"seg_raw_new\", np.array(meta[\"preprocess\"]) == \"seg_raw\"), np.array(meta[\"preprocess\"]) == \"msd_seg\"), np.array(meta[\"preprocess\"]) == \"seg_raw_new_hu\"), np.array(meta[\"preprocess\"]) == \"msd_seg_hu\") # Level 2 polymorphism, healthy/unhealthy annotation, cancer\n simple = np.logical_or(np.logical_or(np.array(meta[\"preprocess\"]) == \"pretrain_preprocessing\", np.array(meta[\"preprocess\"]) == \"classification_pretrain_preprocessing\"), np.array(meta[\"preprocess\"]) == \"pretrain_preprocessing_hu\") # Level 1 polymorphism, lung annotation\n separation = np.logical_or(np.array(meta[\"preprocess\"]) == \"separation\", np.array(meta[\"preprocess\"]) == \"manual_split_msc_hu\") # Level 3 polymorphism detect artificial con/ggo separation and correction with transform\n atm = np.logical_or(np.array(meta[\"preprocess\"]) == \"new_atm\", np.array(meta[\"preprocess\"]) == \"new_atm_hu\") # Auxiliary task, airway segmentation\n vessel = np.logical_or(np.array(meta[\"preprocess\"]) == \"parse\", np.array(meta[\"preprocess\"]) == \"parse_hu\") # Auxiliary task, vessel segmentation\n\n if self.full_silver and self.training:\n # The case where every batch item has everything, from teacher network labeling\n separation = np.array([True]*y.shape[0])\n atm = np.array([True]*y.shape[0])\n vessel = np.array([True]*y.shape[0])\n\n self.debug_batch(simple, detailed, separation, atm, vessel, y, meta)\n\n indexes = {\"simple\": simple, \"detailed\": detailed, \"separation\": separation, \"atm\": atm, \"vessel\": vessel}\n\n return self.loss_wrapper(y_hat, y, indexes, do_loss, deep_supervision=self.deep_supervision)\n\n def supervised_loss(self, y, y_hat, meta, prestr):\n '''\n Does all the dozens of losses involved in this training\n This function also computes and logs metrics internally. Only losses are returned to compute the final loss\n '''\n simple_loss, detailed_loss, separation_loss, atm_loss, vessel_loss = self.polymorphic_loss_metrics(y=y, y_hat=y_hat, meta=meta, do_loss=True)\n \n loss = simple_loss + detailed_loss + separation_loss + atm_loss + vessel_loss\n if loss is not None:\n if self.training:\n if simple_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}simple_loss\", simple_loss, on_step=True, on_epoch=True)\n if detailed_loss > 0: \n self.nlossterms += 1\n self.log(f\"{prestr}detailed_loss\", detailed_loss, on_step=True, on_epoch=True)\n if separation_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}separation_loss\", separation_loss, on_step=True, on_epoch=True)\n if atm_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}atm_loss\", atm_loss, on_step=True, on_epoch=True)\n if vessel_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}vessel_loss\", vessel_loss, on_step=True, on_epoch=True)\n \n self.log(f\"{prestr}loss\", loss, on_step=True, on_epoch=True)\n else:\n if simple_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}simple_loss{self.flag_3d_metric}\", simple_loss, on_step=True, on_epoch=True)\n if detailed_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}detailed_loss{self.flag_3d_metric}\", detailed_loss, on_step=True, on_epoch=True)\n if separation_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}separation_loss{self.flag_3d_metric}\", separation_loss, on_step=True, on_epoch=True)\n if atm_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}atm_loss{self.flag_3d_metric}\", atm_loss, on_step=True, on_epoch=True)\n if vessel_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}vessel_loss\", vessel_loss, on_step=True, on_epoch=True)\n \n self.log(f\"{prestr}loss{self.flag_3d_metric}\", loss, on_step=True, on_epoch=True)\n\n return loss\n\n def training_step(self, train_batch, batch_idx):\n '''\n Training step does different things if on exclusive pretraining mode or \n doing traditional supervision.\n\n We only need to return loss for optimizer, metrics are not computed\n '''\n self.nlossterms = 0\n x, y, meta = train_batch\n self.visual_debug(x, y, \"Training\")\n \n y_hat = None\n\n if self.poly_level != 0: # zero polymorphic means pretraining only\n # Traditional supervision\n if y_hat is None:\n y_hat = self.forward(x)\n\n supervised_loss = self.supervised_loss(y=y, y_hat=y_hat, meta=meta, prestr='')\n self.log(\"supervised_loss\", supervised_loss, on_step=True, on_epoch=True)\n else:\n supervised_loss = 0\n \n final_loss = supervised_loss/self.nlossterms\n self.log(\"nlossterms\", self.nlossterms, on_step=True, on_epoch=True)\n self.log(\"loss\", final_loss, on_step=True, on_epoch=True)\n\n if final_loss == 0:\n raise ValueError(\"Loss is equal to 0. Something is misconfigured.\")\n\n return final_loss # for outside optimization\n\n def validation_step(self, val_batch, batch_idx):\n '''\n Validation step does different things if on exclusive pretraining mode or \n doing traditional supervision\n\n There is no return but metrics are computed in 3D (takes a while)\n for pretraining loss is used as a validation metric. \n\n When using boundary loss, we are not computing it in 3D validation.\n '''\n self.nlossterms = 0\n x, y, meta = val_batch\n self.visual_debug(x, y, \"Validation\")\n \n y_hat = None\n preproc = meta[\"preprocess\"][0]\n if preproc == \"pretrain_preprocessing\" and self.val_3d:\n print(f\"Skipping no label 3D validation {preproc}\")\n return\n \n \n if self.poly_level != 0:\n # Traditional supervision\n if y_hat is None:\n y_hat = self.forward(x)\n \n # Compute loss and metrics on CPU due to val_3d memory usage\n if self.val_3d:\n if isinstance(y_hat, dict):\n for _, value in y_hat.items():\n if value.device == torch.device(\"cpu\"):\n y = y.to(value.device)\n break\n elif y_hat.device == torch.device(\"cpu\"):\n y = y.to(y_hat.device)\n \n supervised_loss = self.supervised_loss(y=y, y_hat=y_hat, meta=meta, prestr=\"val_\")\n else:\n supervised_loss = 0\n \n # We only compute validation loss when not using val_3d, since 3D validation loss is very heavy on gpu[\n if self.nlossterms != 0:\n final_loss = supervised_loss/self.nlossterms\n self.log(\"val_nlossterms\", self.nlossterms, on_step=True, on_epoch=True)\n self.log(\"val_supervised_loss\", supervised_loss, on_step=True, on_epoch=True)\n self.log(\"val_loss\", final_loss, on_step=True, on_epoch=True)\n \n def on_validation_epoch_start(self):\n '''\n Start of validation epoch tasks:\n Initialize metric dictionary and list of IDs\n '''\n # Reset metric dict\n if self.val_3d:\n self.metrics: Dict = defaultdict(lambda: defaultdict(list))\n \n def on_validation_epoch_end(self):\n '''\n End of epoch tasks:\n - Increment BDL weights\n - Print results so far in terminal (stdout) for backup logging\n '''\n if self.bdl:\n self.lossfn.increment_weights()\n\n if self.trainer.fast_dev_run or self.trainer.sanity_checking:\n print(\"Fast dev run or sanity checking detected, not logging\")\n elif not self.pretraining and self.val_3d:\n for key, value in self.metrics.items():\n print(f\"\\n{key}\")\n selected_metrics = {\"names\": [], \"values\": []}\n for metric, metric_value in value.items():\n np_metric_value = np.array(metric_value)\n mean = np_metric_value.mean() \n std = np_metric_value.std() \n print(f\"{key} {metric}: {mean}+-{std}\")\n \n # Stopped logging std for every metric, too much not very useful data on neptune\n # self.logger.experiment[f\"training/{key}_{metric}_3d_std\"].log(std)\n \n if metric not in self.excluded_average_metric_keys:\n if \"error\" in metric:\n selected_metrics[\"names\"].append(f\"1 - {metric}\")\n selected_metrics[\"values\"].append(1 - mean)\n else:\n selected_metrics[\"names\"].append(metric)\n selected_metrics[\"values\"].append(mean)\n \n np_selected_metrics = np.array(selected_metrics[\"values\"])\n np_selected_metrics_mean = np_selected_metrics.mean()\n np_selected_metrics_std = np_selected_metrics.std()\n print(f\"Building end-of-epoch composite metric:\")\n for metric, value in zip(selected_metrics[\"names\"], selected_metrics[\"values\"]):\n print(f\"{metric}: {value}\")\n print(f\"{key}_composite_metric: {np_selected_metrics_mean} +- {np_selected_metrics_std}\")\n \n self.logger.experiment[f\"training/{key}_composite_metric\"].log(np_selected_metrics_mean)\n self.logger.experiment[f\"training/{key}_composite_metric_std\"].log(np_selected_metrics_std)\n \n\n def configure_optimizers(self):\n '''\n Select optimizer and scheduling strategy according to hparams.\n '''\n opt = getattr(self.hparams, \"opt\", \"Adam\")\n optimizer = get_optimizer(opt, self.model.parameters(), self.hparams.lr, wd=self.weight_decay)\n print(f\"Opt: {opt}, Weight decay: {self.weight_decay}\")\n\n if self.scheduling == \"poly\":\n print(\"Polynomial LR\")\n # scheduler = PolynomialLR(optimizer, total_iters=self.hparams.max_epochs, power=0.9, verbose=True)\n elif self.scheduling == \"step\" and self.scheduling_factor is None:\n print(\"Not using any scheduler\")\n return optimizer\n elif self.scheduling_factor is not None and self.scheduling == \"step\":\n print(f\"Using step LR {self.scheduling_factor}!\")\n scheduler = StepLR(optimizer, 1, self.scheduling_factor, verbose=True)\n return [optimizer], [scheduler]\n elif self.scheduling == \"cosine\":\n print(f\"Using CosineAnnealingLR with tmax {self.scheduling_factor}!\")\n scheduler = CosineAnnealingLR(optimizer, T_max=self.scheduling_factor, verbose=True)\n return [optimizer], [scheduler]" }, { "identifier": "E2DStackDataset", "path": "medpseg/eval_2d_utils.py", "snippet": "class E2DStackDataset():\n '''\n Speed up evaluation time slice stacking with dataloader compatible dataset\n '''\n def __init__(self, volume, extended_2d):\n self.volume = volume\n self.limits = [0, volume.shape[2] - 1 ]\n self.extended_2d = extended_2d\n \n def __len__(self):\n return self.volume.shape[2]\n\n def __getitem__(self, i):\n if self.extended_2d is None:\n input_slice = self.volume[:, :, i]\n else:\n central_slice = self.volume[:, :, i]\n input_slice = []\n for extend_i in range(-self.extended_2d, self.extended_2d + 1):\n if extend_i == 0:\n input_slice.append(central_slice)\n continue\n\n new_i = i + extend_i\n if new_i > self.limits[1]:\n new_i = self.limits[1]\n if new_i < self.limits[0]:\n new_i = self.limits[0]\n \n input_slice.append(self.volume[:, :, new_i])\n input_slice = torch.cat(input_slice, dim=1)\n '''\n plt.figure(figsize=(12, 6))\n plt.subplot(1, 3, 1)\n plt.imshow(input_slice[0, 0].detach().cpu().numpy(), cmap=\"gray\")\n plt.subplot(1, 3, 2)\n plt.imshow(input_slice[0, 1].detach().cpu().numpy(), cmap=\"gray\")\n plt.subplot(1, 3, 3)\n plt.imshow(input_slice[0, 2].detach().cpu().numpy(), cmap=\"gray\")\n plt.show()\n '''\n return input_slice[0]\n\n def get_dataloader(self, batch_size, pin_memory, num_workers):\n return DataLoader(self, batch_size=batch_size, pin_memory=pin_memory, num_workers=num_workers)" }, { "identifier": "argon_cpu_count", "path": "medpseg/eval_2d_utils.py", "snippet": "def argon_cpu_count() -> int:\n if os.getenv(\"NSLOTS\") is not None:\n return int(os.getenv(\"NSLOTS\"))\n else:\n return cpu_count()" } ]
import os import torch import numpy as np import cc3d import SimpleITK as sitk from medpseg.poly_seg_2d_module import PolySeg2DModule from medpseg.eval_2d_utils import E2DStackDataset, argon_cpu_count from torch.nn import functional as F from tqdm import tqdm from collections import defaultdict from operator import itemgetter from typing import Dict, Optional from multiprocessing import Queue
10,858
''' Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab) https://miclab.fee.unicamp.br/ https://github.com/MICLab-Unicamp/medpseg All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. Independent script Updated pipeline using a single weight ''' def get_connected_components(volume, return_largest=2, verbose=False): ''' volume: input volume return_largest: how many of the largest labels to return. If 0, nothing is changed in input volume verbose: prints label_count returns: filtered_volume, label_count, labeled_volume ''' labels_out = cc3d.connected_components(volume.astype(np.int32)) label_count = np.unique(labels_out, return_counts=True)[1] # Indicate which was the original label and sort by count label_count = [(label, count) for label, count in enumerate(label_count)] label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction '''
''' Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab) https://miclab.fee.unicamp.br/ https://github.com/MICLab-Unicamp/medpseg All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. Independent script Updated pipeline using a single weight ''' def get_connected_components(volume, return_largest=2, verbose=False): ''' volume: input volume return_largest: how many of the largest labels to return. If 0, nothing is changed in input volume verbose: prints label_count returns: filtered_volume, label_count, labeled_volume ''' labels_out = cc3d.connected_components(volume.astype(np.int32)) label_count = np.unique(labels_out, return_counts=True)[1] # Indicate which was the original label and sort by count label_count = [(label, count) for label, count in enumerate(label_count)] label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction '''
e2d_stack_dataloader = E2DStackDataset(volume, extended_2d=1).get_dataloader(batch_size=batch_size, pin_memory=False, num_workers=argon_cpu_count())
2
2023-11-21 20:03:33+00:00
16k
DLYuanGod/TinyGPT-V
minigpt4/datasets/builders/image_text_pair_builder.py
[ { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "BaseDatasetBuilder", "path": "minigpt4/datasets/builders/base_dataset_builder.py", "snippet": "class BaseDatasetBuilder:\n train_dataset_cls, eval_dataset_cls = None, None\n\n def __init__(self, cfg=None):\n super().__init__()\n\n if cfg is None:\n # help to create datasets from default config.\n self.config = load_dataset_config(self.default_config_path())\n elif isinstance(cfg, str):\n self.config = load_dataset_config(cfg)\n else:\n # when called from task.build_dataset()\n self.config = cfg\n\n self.data_type = self.config.data_type\n\n self.vis_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n self.text_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n\n def build_datasets(self):\n # download, split, etc...\n # only called on 1 GPU/TPU in distributed\n\n if is_main_process():\n self._download_data()\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n # at this point, all the annotations and image/videos should be all downloaded to the specified locations.\n logging.info(\"Building datasets...\")\n datasets = self.build() # dataset['train'/'val'/'test']\n\n return datasets\n\n def build_processors(self):\n vis_proc_cfg = self.config.get(\"vis_processor\")\n txt_proc_cfg = self.config.get(\"text_processor\")\n\n if vis_proc_cfg is not None:\n vis_train_cfg = vis_proc_cfg.get(\"train\")\n vis_eval_cfg = vis_proc_cfg.get(\"eval\")\n\n self.vis_processors[\"train\"] = self._build_proc_from_cfg(vis_train_cfg)\n self.vis_processors[\"eval\"] = self._build_proc_from_cfg(vis_eval_cfg)\n\n if txt_proc_cfg is not None:\n txt_train_cfg = txt_proc_cfg.get(\"train\")\n txt_eval_cfg = txt_proc_cfg.get(\"eval\")\n\n self.text_processors[\"train\"] = self._build_proc_from_cfg(txt_train_cfg)\n self.text_processors[\"eval\"] = self._build_proc_from_cfg(txt_eval_cfg)\n\n @staticmethod\n def _build_proc_from_cfg(cfg):\n return (\n registry.get_processor_class(cfg.name).from_config(cfg)\n if cfg is not None\n else None\n )\n\n @classmethod\n def default_config_path(cls, type=\"default\"):\n return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])\n\n def _download_data(self):\n self._download_ann()\n self._download_vis()\n\n def _download_ann(self):\n \"\"\"\n Download annotation files if necessary.\n All the vision-language datasets should have annotations of unified format.\n\n storage_path can be:\n (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.\n (2) basename/dirname: will be suffixed with base name of URL if dirname is provided.\n\n Local annotation paths should be relative.\n \"\"\"\n anns = self.config.build_info.annotations\n\n splits = anns.keys()\n\n cache_root = registry.get_path(\"cache_root\")\n\n for split in splits:\n info = anns[split]\n\n urls, storage_paths = info.get(\"url\", None), info.storage\n\n if isinstance(urls, str):\n urls = [urls]\n if isinstance(storage_paths, str):\n storage_paths = [storage_paths]\n\n assert len(urls) == len(storage_paths)\n\n for url_or_filename, storage_path in zip(urls, storage_paths):\n # if storage_path is relative, make it full by prefixing with cache_root.\n if not os.path.isabs(storage_path):\n storage_path = os.path.join(cache_root, storage_path)\n\n dirname = os.path.dirname(storage_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if os.path.isfile(url_or_filename):\n src, dst = url_or_filename, storage_path\n if not os.path.exists(dst):\n shutil.copyfile(src=src, dst=dst)\n else:\n logging.info(\"Using existing file {}.\".format(dst))\n else:\n if os.path.isdir(storage_path):\n # if only dirname is provided, suffix with basename of URL.\n raise ValueError(\n \"Expecting storage_path to be a file path, got directory {}\".format(\n storage_path\n )\n )\n else:\n filename = os.path.basename(storage_path)\n\n download_url(url=url_or_filename, root=dirname, filename=filename)\n\n def _download_vis(self):\n\n storage_path = self.config.build_info.get(self.data_type).storage\n storage_path = utils.get_cache_path(storage_path)\n\n if not os.path.exists(storage_path):\n warnings.warn(\n f\"\"\"\n The specified path {storage_path} for visual inputs does not exist.\n Please provide a correct path to the visual inputs or\n refer to datasets/download_scripts/README.md for downloading instructions.\n \"\"\"\n )\n\n def build(self):\n \"\"\"\n Create by split datasets inheriting torch.utils.data.Datasets.\n\n # build() can be dataset-specific. Overwrite to customize.\n \"\"\"\n self.build_processors()\n\n build_info = self.config.build_info\n\n ann_info = build_info.annotations\n vis_info = build_info.get(self.data_type)\n\n datasets = dict()\n for split in ann_info.keys():\n if split not in [\"train\", \"val\", \"test\"]:\n continue\n\n is_train = split == \"train\"\n\n # processors\n vis_processor = (\n self.vis_processors[\"train\"]\n if is_train\n else self.vis_processors[\"eval\"]\n )\n text_processor = (\n self.text_processors[\"train\"]\n if is_train\n else self.text_processors[\"eval\"]\n )\n\n # annotation path\n ann_paths = ann_info.get(split).storage\n if isinstance(ann_paths, str):\n ann_paths = [ann_paths]\n\n abs_ann_paths = []\n for ann_path in ann_paths:\n if not os.path.isabs(ann_path):\n ann_path = utils.get_cache_path(ann_path)\n abs_ann_paths.append(ann_path)\n ann_paths = abs_ann_paths\n\n # visual data storage path\n vis_path = os.path.join(vis_info.storage, split)\n\n if not os.path.isabs(vis_path):\n # vis_path = os.path.join(utils.get_cache_path(), vis_path)\n vis_path = utils.get_cache_path(vis_path)\n\n if not os.path.exists(vis_path):\n warnings.warn(\"storage path {} does not exist.\".format(vis_path))\n\n # create datasets\n dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls\n datasets[split] = dataset_cls(\n vis_processor=vis_processor,\n text_processor=text_processor,\n ann_paths=ann_paths,\n vis_root=vis_path,\n )\n\n return datasets" }, { "identifier": "LaionDataset", "path": "minigpt4/datasets/datasets/laion_dataset.py", "snippet": "class LaionDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }" }, { "identifier": "CCSBUDataset", "path": "minigpt4/datasets/datasets/cc_sbu_dataset.py", "snippet": "class CCSBUDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }" }, { "identifier": "CCSBUAlignDataset", "path": "minigpt4/datasets/datasets/cc_sbu_dataset.py", "snippet": "class CCSBUAlignDataset(CaptionDataset):\n\n def __getitem__(self, index):\n\n # TODO this assumes image input, not general enough\n ann = self.annotation[index]\n\n img_file = '{}.jpg'.format(ann[\"image_id\"])\n image_path = os.path.join(self.vis_root, img_file)\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n caption = ann[\"caption\"]\n\n return {\n \"image\": image,\n \"answer\": caption,\n \"image_id\": self.img_ids[ann[\"image_id\"]],\n }" }, { "identifier": "TextCapDataset", "path": "minigpt4/datasets/datasets/text_caps.py", "snippet": "class TextCapDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n 'Briefly describe this image.',\n 'Provide a concise depiction of this image.',\n 'Present a short description of this image.',\n 'Summarize this image in a few words.',\n 'A short image caption:',\n 'A short image description:',\n 'A photo of ',\n 'An image that shows ',\n 'Write a short description for the image. ',\n 'Write a description for the photo.',\n 'Provide a description of what is presented in the photo.',\n 'Briefly describe the content of the image.',\n 'Can you briefly explain what you see in the image?',\n 'Could you use a few words to describe what you perceive in the photo?',\n 'Please provide a short depiction of the picture.',\n 'Using language, provide a short account of the image.',\n 'Use a few words to illustrate what is happening in the picture.',\n ]\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n\n def __len__(self):\n return len(self.ann[\"data\"])\n\n\n def __getitem__(self, index):\n info = self.ann[\"data\"][index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n caption = info[\"caption_str\"]\n caption = self.text_processor(caption)\n instruction = \"<Img><ImageHere></Img> [caption] {} \".format(random.choice(self.instruction_pool))\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": caption,\n }" }, { "identifier": "LlavaDetailDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n \n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }" }, { "identifier": "LlavaReasonDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaReasonDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n\n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }" }, { "identifier": "LlavaConversationDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.ann=[]\n\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }" }, { "identifier": "UnnaturalDataset", "path": "minigpt4/datasets/datasets/unnatural_instruction.py", "snippet": "class UnnaturalDataset(Dataset):\n def __init__(self, text_processor, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index][\"instances\"][0]\n instruction = info[\"instruction_with_input\"]\n constraints = info[\"constraints\"]\n answer = info[\"output\"]\n if constraints != None:\n instruction = instruction+\" \"+constraints\n\n return {\n \"instruction_input\": self.text_processor(instruction),\n \"answer\": self.text_processor(answer),\n }" }, { "identifier": "MultiTaskConversationDataset", "path": "minigpt4/datasets/datasets/multitask_conversation.py", "snippet": "class MultiTaskConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }" }, { "identifier": "GroundedDetailDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class GroundedDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[grounding] please describe this image in details',\n '[grounding] describe this image as detailed as possible',\n '[grounding] summarize this image in details',\n '[grounding] give a thorough description of what you see in this image',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n # image_file = 'COCO_train2014_{}.jpg'.format(info['image_id'])\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['grounded_caption']\n instruction = random.choice(self.instruction_pool)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "CaptionToObjectDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class CaptionToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"caption\"]\n answer = info[\"output\"]\n\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"CaptionToObject instruction\", instruction)\n print(\"CaptionToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "PhraseToObjectDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class PhraseToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"phrase\"]\n answer = \"<p>\"+input+\"</p> \"+info[\"bbox\"]\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"PhraseToObject instruction\", instruction)\n print(\"PhraseToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "ReferVisualGenomeDataset", "path": "minigpt4/datasets/datasets/vg_dataset.py", "snippet": "class ReferVisualGenomeDataset(Dataset):\n def __init__(self, vis_processor, text_processor, data_dir):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.data_dir = data_dir\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n all_regions = local.get_all_region_descriptions(self.data_dir)\n all_regions = [region for regions in all_regions for region in regions]\n\n # follow OFA practice, only regions smaller than 16384 pixels are used for refer\n self.regions = [region for region in all_regions if region.width * region.height < 16384]\n\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.regions)\n\n def preprocess(self, index):\n region = self.regions[index]\n image_file = region.image.url.split('/')[-2:]\n image_path = os.path.join(self.data_dir, *image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [100,100]\n\n sample_sentence = region.phrase\n refer_sentence = self.text_processor(sample_sentence)\n\n bbox = [region.x, region.y, region.width, region.height]\n\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": region.image.id,\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }" }, { "identifier": "ReferCOCODataset", "path": "minigpt4/datasets/datasets/coco_dataset.py", "snippet": "class ReferCOCODataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path, dataset='refcoco', splitBy='unc'):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.refer = REFER(ann_path, vis_root, dataset, splitBy)\n self.ref_ids = self.refer.getRefIds(split=\"train\")\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.ref_ids)\n\n def preprocess(self, index):\n ref_id = self.ref_ids[index]\n ref = self.refer.loadRefs(ref_id)[0]\n\n image_file = 'COCO_train2014_{:0>12}.jpg'.format(ref[\"image_id\"])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [image.shape[1], image.shape[2]]\n\n image_new_size = [100,100]\n\n sample_sentence = random.choice(ref['sentences'])['raw']\n refer_sentence = self.text_processor(sample_sentence)\n\n\n bbox = self.refer.getRefBox(ref['ref_id'])\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": ref['image_id'],\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }" }, { "identifier": "InvReferCOCODataset", "path": "minigpt4/datasets/datasets/coco_dataset.py", "snippet": "class InvReferCOCODataset(ReferCOCODataset):\n def __init__(self, *args, **kwargs):\n super(InvReferCOCODataset, self).__init__(*args, **kwargs)\n\n self.instruction_pool = [\n \"[identify] {}\",\n \"[identify] what object is in this location {}\",\n \"[identify] identify the object present at this location {}\",\n \"[identify] what is it in {}\",\n \"[identify] describe this object in {}\",\n \"[identify] this {} is\",\n \"[identify] the object in {} is\",\n ]\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n\n instruction = random.choice(self.instruction_pool).format(data['bbox'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n \n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['refer_sentence']),\n \"image_id\": data['image_id'],\n }" }, { "identifier": "GQADataset", "path": "minigpt4/datasets/datasets/gqa_datasets.py", "snippet": "class GQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def __getitem__(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n instruction = random.choice(self.instruction_pool).format(question)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n answers = self.text_processor(ann[\"answer\"])\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answers,\n }" }, { "identifier": "AOKVQADataset", "path": "minigpt4/datasets/datasets/aok_vqa_datasets.py", "snippet": "class AOKVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n answer_key = \"direct_answers\"\n\n answer_weight = {}\n for answer in ann[answer_key]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[answer_key])\n else:\n answer_weight[answer] = 1 / len(ann[answer_key])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n return {\n \"image\": image,\n \"question\": question,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n question = self.text_processor(data[\"question\"])\n instruction = random.choice(self.instruction_pool).format(question)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n answer = self.text_processor(data['answer'])\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": answer,\n }" }, { "identifier": "COCOVQADataset", "path": "minigpt4/datasets/datasets/coco_vqa_datasets.py", "snippet": "class COCOVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n question_id = ann[\"question_id\"]\n\n answer_weight = {}\n for answer in ann[\"answer\"]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[\"answer\"])\n else:\n answer_weight[answer] = 1 / len(ann[\"answer\"])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n\n return {\n \"image\": image,\n \"question\": question,\n \"question_id\": question_id,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n instruction = random.choice(self.instruction_pool).format(data['question'])\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"question_id\": data[\"question_id\"],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['answer']),\n }" }, { "identifier": "OCRVQADataset", "path": "minigpt4/datasets/datasets/ocrvqa_dataset.py", "snippet": "class OCRVQADataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n self.data = self.create_data(ann_path)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def create_data(self, ann_path):\n processed_data = []\n with open(ann_path, 'r') as f:\n data = json.load(f)\n for k in data.keys():\n if data[k]['split'] != 1: continue # 1 for training, 2 for validation, 3 for test\n ext = os.path.splitext(data[k]['imageURL'])[1]\n imageFile = k + ext\n assert len(data[k]['questions']) == len(data[k]['answers'])\n for q, a in zip(data[k]['questions'], data[k]['answers']):\n processed_data.append(\n {'question': q,\n 'answer': a,\n 'image_path': imageFile,\n 'image_id': k,\n 'title': data[k]['title'],\n 'genre': data[k]['genre'],\n }\n )\n return processed_data\n\n def __len__(self):\n return len(self.data)" }, { "identifier": "COCOCapDataset", "path": "minigpt4/datasets/datasets/coco_caption.py", "snippet": "class COCOCapEvalDataset(CaptionEvalDataset):\nclass NoCapsEvalDataset(CaptionEvalDataset):\nclass RefCOCOEvalData(torch.utils.data.Dataset):\nclass EvalCaptionData(torch.utils.data.Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __new__(cls, *args, **kwargs):\n def __len__(self):\n def __getitem__(self, idx):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __len__(self):\n def __getitem__(self, idx):" } ]
import os import logging import warnings from minigpt4.common.registry import registry from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder from minigpt4.datasets.datasets.laion_dataset import LaionDataset from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset from minigpt4.datasets.datasets.text_caps import TextCapDataset from minigpt4.datasets.datasets.llava_dataset import LlavaDetailDataset, LlavaReasonDataset, LlavaConversationDataset from minigpt4.datasets.datasets.unnatural_instruction import UnnaturalDataset from minigpt4.datasets.datasets.multitask_conversation import MultiTaskConversationDataset from minigpt4.datasets.datasets.flickr import GroundedDetailDataset,CaptionToObjectDataset,PhraseToObjectDataset from minigpt4.datasets.datasets.vg_dataset import ReferVisualGenomeDataset from minigpt4.datasets.datasets.coco_dataset import ReferCOCODataset, InvReferCOCODataset from minigpt4.datasets.datasets.gqa_datasets import GQADataset from minigpt4.datasets.datasets.aok_vqa_datasets import AOKVQADataset from minigpt4.datasets.datasets.coco_vqa_datasets import COCOVQADataset from minigpt4.datasets.datasets.ocrvqa_dataset import OCRVQADataset from minigpt4.datasets.datasets.coco_caption import COCOCapDataset
11,982
DATASET_CONFIG_DICT = {"default": "configs/datasets/aokvqa/defaults.yaml"} @registry.register_builder("gqa") class GQABuilder(BaseDatasetBuilder): train_dataset_cls = GQADataset DATASET_CONFIG_DICT = { "default": "configs/datasets/gqa/balanced_val.yaml", } @registry.register_builder("flickr_grounded_caption") class GroundedCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = GroundedDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_CaptionToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = CaptionToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/caption_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_ObjectToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = PhraseToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/object_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class DocumentVQABuilder(BaseDatasetBuilder): def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], vis_root=build_info.image_path, ann_path=build_info.ann_path ) return datasets @registry.register_builder("ocrvqa") class OCRVQABuilder(DocumentVQABuilder):
@registry.register_builder("multitask_conversation") class MultitaskConversationBuilder(BaseDatasetBuilder): train_dataset_cls = MultiTaskConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/multitask_conversation/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("unnatural_instruction") class UnnaturalInstructionBuilder(BaseDatasetBuilder): train_dataset_cls = UnnaturalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/nlp/unnatural_instruction.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( text_processor=self.text_processors["train"], ann_path=build_info.ann_path, ) return datasets @registry.register_builder("llava_detail") class LlavaDetailBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/detail.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_reason") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaReasonDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/reason.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_conversation") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/conversation.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class AllRefCOCOBuilder(BaseDatasetBuilder): def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info image_path = build_info.image_path ann_path = build_info.ann_path datasets = dict() if not os.path.exists(image_path): warnings.warn("image path {} does not exist.".format(image_path)) if not os.path.exists(ann_path): warnings.warn("ann path {} does not exist.".format(ann_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=ann_path, vis_root=image_path, dataset=build_info.dataset, splitBy=build_info.splitBy ) return datasets @registry.register_builder("refcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcoco.yaml", } @registry.register_builder("refcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocop.yaml", } @registry.register_builder("refcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocog.yaml", } @registry.register_builder("invrefcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcoco.yaml", } @registry.register_builder("invrefcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocop.yaml", } @registry.register_builder("invrefcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocog.yaml", } @registry.register_builder("refvg") class RefVisualGenomeBuilder(BaseDatasetBuilder): train_dataset_cls = ReferVisualGenomeDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/vg/ref.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info data_dir = build_info.data_dir datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], data_dir=data_dir, ) return datasets @registry.register_builder("textcaps_caption") class TextcapCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = TextCapDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/textcaps/caption.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("coco_vqa") class COCOVQABuilder(BaseDatasetBuilder): train_dataset_cls = COCOVQADataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco/defaults_vqa.yaml", } @registry.register_builder("ok_vqa") class OKVQABuilder(COCOVQABuilder): DATASET_CONFIG_DICT = { "default": "configs/datasets/okvqa/defaults.yaml", } @registry.register_builder("aok_vqa") class AOKVQABuilder(BaseDatasetBuilder): train_dataset_cls = AOKVQADataset DATASET_CONFIG_DICT = {"default": "configs/datasets/aokvqa/defaults.yaml"} @registry.register_builder("gqa") class GQABuilder(BaseDatasetBuilder): train_dataset_cls = GQADataset DATASET_CONFIG_DICT = { "default": "configs/datasets/gqa/balanced_val.yaml", } @registry.register_builder("flickr_grounded_caption") class GroundedCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = GroundedDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_CaptionToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = CaptionToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/caption_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_ObjectToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = PhraseToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/object_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class DocumentVQABuilder(BaseDatasetBuilder): def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], vis_root=build_info.image_path, ann_path=build_info.ann_path ) return datasets @registry.register_builder("ocrvqa") class OCRVQABuilder(DocumentVQABuilder):
train_dataset_cls = OCRVQADataset
20
2023-12-28 05:47:18+00:00
16k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/models/attention.py
[ { "identifier": "USE_PEFT_BACKEND", "path": "diffusers/src/diffusers/utils/constants.py", "snippet": "USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version" }, { "identifier": "maybe_allow_in_graph", "path": "diffusers/src/diffusers/utils/torch_utils.py", "snippet": "def maybe_allow_in_graph(cls):\n return cls" }, { "identifier": "GEGLU", "path": "diffusers/src/diffusers/models/activations.py", "snippet": "class GEGLU(nn.Module):\n r\"\"\"\n A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.\n\n Parameters:\n dim_in (`int`): The number of channels in the input.\n dim_out (`int`): The number of channels in the output.\n \"\"\"\n\n def __init__(self, dim_in: int, dim_out: int):\n super().__init__()\n linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear\n\n self.proj = linear_cls(dim_in, dim_out * 2)\n\n def gelu(self, gate: torch.Tensor) -> torch.Tensor:\n if gate.device.type != \"mps\":\n return F.gelu(gate)\n # mps: gelu is not implemented for float16\n return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype)\n\n def forward(self, hidden_states, scale: float = 1.0):\n args = () if USE_PEFT_BACKEND else (scale,)\n hidden_states, gate = self.proj(hidden_states, *args).chunk(2, dim=-1)\n return hidden_states * self.gelu(gate)" }, { "identifier": "GELU", "path": "diffusers/src/diffusers/models/activations.py", "snippet": "class GELU(nn.Module):\n r\"\"\"\n GELU activation function with tanh approximation support with `approximate=\"tanh\"`.\n\n Parameters:\n dim_in (`int`): The number of channels in the input.\n dim_out (`int`): The number of channels in the output.\n approximate (`str`, *optional*, defaults to `\"none\"`): If `\"tanh\"`, use tanh approximation.\n \"\"\"\n\n def __init__(self, dim_in: int, dim_out: int, approximate: str = \"none\"):\n super().__init__()\n self.proj = nn.Linear(dim_in, dim_out)\n self.approximate = approximate\n\n def gelu(self, gate: torch.Tensor) -> torch.Tensor:\n if gate.device.type != \"mps\":\n return F.gelu(gate, approximate=self.approximate)\n # mps: gelu is not implemented for float16\n return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)\n\n def forward(self, hidden_states):\n hidden_states = self.proj(hidden_states)\n hidden_states = self.gelu(hidden_states)\n return hidden_states" }, { "identifier": "ApproximateGELU", "path": "diffusers/src/diffusers/models/activations.py", "snippet": "class ApproximateGELU(nn.Module):\n r\"\"\"\n The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this\n [paper](https://arxiv.org/abs/1606.08415).\n\n Parameters:\n dim_in (`int`): The number of channels in the input.\n dim_out (`int`): The number of channels in the output.\n \"\"\"\n\n def __init__(self, dim_in: int, dim_out: int):\n super().__init__()\n self.proj = nn.Linear(dim_in, dim_out)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.proj(x)\n return x * torch.sigmoid(1.702 * x)" }, { "identifier": "Attention", "path": "diffusers/src/diffusers/models/attention_processor.py", "snippet": "class Attention(nn.Module):\n r\"\"\"\n A cross attention layer.\n\n Parameters:\n query_dim (`int`):\n The number of channels in the query.\n cross_attention_dim (`int`, *optional*):\n The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.\n heads (`int`, *optional*, defaults to 8):\n The number of heads to use for multi-head attention.\n dim_head (`int`, *optional*, defaults to 64):\n The number of channels in each head.\n dropout (`float`, *optional*, defaults to 0.0):\n The dropout probability to use.\n bias (`bool`, *optional*, defaults to False):\n Set to `True` for the query, key, and value linear layers to contain a bias parameter.\n upcast_attention (`bool`, *optional*, defaults to False):\n Set to `True` to upcast the attention computation to `float32`.\n upcast_softmax (`bool`, *optional*, defaults to False):\n Set to `True` to upcast the softmax computation to `float32`.\n cross_attention_norm (`str`, *optional*, defaults to `None`):\n The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`.\n cross_attention_norm_num_groups (`int`, *optional*, defaults to 32):\n The number of groups to use for the group norm in the cross attention.\n added_kv_proj_dim (`int`, *optional*, defaults to `None`):\n The number of channels to use for the added key and value projections. If `None`, no projection is used.\n norm_num_groups (`int`, *optional*, defaults to `None`):\n The number of groups to use for the group norm in the attention.\n spatial_norm_dim (`int`, *optional*, defaults to `None`):\n The number of channels to use for the spatial normalization.\n out_bias (`bool`, *optional*, defaults to `True`):\n Set to `True` to use a bias in the output linear layer.\n scale_qk (`bool`, *optional*, defaults to `True`):\n Set to `True` to scale the query and key by `1 / sqrt(dim_head)`.\n only_cross_attention (`bool`, *optional*, defaults to `False`):\n Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if\n `added_kv_proj_dim` is not `None`.\n eps (`float`, *optional*, defaults to 1e-5):\n An additional value added to the denominator in group normalization that is used for numerical stability.\n rescale_output_factor (`float`, *optional*, defaults to 1.0):\n A factor to rescale the output by dividing it with this value.\n residual_connection (`bool`, *optional*, defaults to `False`):\n Set to `True` to add the residual connection to the output.\n _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`):\n Set to `True` if the attention block is loaded from a deprecated state dict.\n processor (`AttnProcessor`, *optional*, defaults to `None`):\n The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and\n `AttnProcessor` otherwise.\n \"\"\"\n\n def __init__(\n self,\n query_dim: int,\n cross_attention_dim: Optional[int] = None,\n heads: int = 8,\n dim_head: int = 64,\n dropout: float = 0.0,\n bias: bool = False,\n upcast_attention: bool = False,\n upcast_softmax: bool = False,\n cross_attention_norm: Optional[str] = None,\n cross_attention_norm_num_groups: int = 32,\n added_kv_proj_dim: Optional[int] = None,\n norm_num_groups: Optional[int] = None,\n spatial_norm_dim: Optional[int] = None,\n out_bias: bool = True,\n scale_qk: bool = True,\n only_cross_attention: bool = False,\n eps: float = 1e-5,\n rescale_output_factor: float = 1.0,\n residual_connection: bool = False,\n _from_deprecated_attn_block: bool = False,\n processor: Optional[\"AttnProcessor\"] = None,\n ):\n super().__init__()\n self.inner_dim = dim_head * heads\n self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim\n self.upcast_attention = upcast_attention\n self.upcast_softmax = upcast_softmax\n self.rescale_output_factor = rescale_output_factor\n self.residual_connection = residual_connection\n self.dropout = dropout\n\n # we make use of this private variable to know whether this class is loaded\n # with an deprecated state dict so that we can convert it on the fly\n self._from_deprecated_attn_block = _from_deprecated_attn_block\n\n self.scale_qk = scale_qk\n self.scale = dim_head**-0.5 if self.scale_qk else 1.0\n\n self.heads = heads\n # for slice_size > 0 the attention score computation\n # is split across the batch axis to save memory\n # You can set slice_size with `set_attention_slice`\n self.sliceable_head_dim = heads\n\n self.added_kv_proj_dim = added_kv_proj_dim\n self.only_cross_attention = only_cross_attention\n\n if self.added_kv_proj_dim is None and self.only_cross_attention:\n raise ValueError(\n \"`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`.\"\n )\n\n if norm_num_groups is not None:\n self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True)\n else:\n self.group_norm = None\n\n if spatial_norm_dim is not None:\n self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim)\n else:\n self.spatial_norm = None\n\n if cross_attention_norm is None:\n self.norm_cross = None\n elif cross_attention_norm == \"layer_norm\":\n self.norm_cross = nn.LayerNorm(self.cross_attention_dim)\n elif cross_attention_norm == \"group_norm\":\n if self.added_kv_proj_dim is not None:\n # The given `encoder_hidden_states` are initially of shape\n # (batch_size, seq_len, added_kv_proj_dim) before being projected\n # to (batch_size, seq_len, cross_attention_dim). The norm is applied\n # before the projection, so we need to use `added_kv_proj_dim` as\n # the number of channels for the group norm.\n norm_cross_num_channels = added_kv_proj_dim\n else:\n norm_cross_num_channels = self.cross_attention_dim\n\n self.norm_cross = nn.GroupNorm(\n num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True\n )\n else:\n raise ValueError(\n f\"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'\"\n )\n\n if USE_PEFT_BACKEND:\n linear_cls = nn.Linear\n else:\n linear_cls = LoRACompatibleLinear\n\n self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias)\n\n if not self.only_cross_attention:\n # only relevant for the `AddedKVProcessor` classes\n self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)\n self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)\n else:\n self.to_k = None\n self.to_v = None\n\n if self.added_kv_proj_dim is not None:\n self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim)\n self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim)\n\n self.to_out = nn.ModuleList([])\n self.to_out.append(linear_cls(self.inner_dim, query_dim, bias=out_bias))\n self.to_out.append(nn.Dropout(dropout))\n\n # set attention processor\n # We use the AttnProcessor2_0 by default when torch 2.x is used which uses\n # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention\n # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1\n if processor is None:\n processor = (\n AttnProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") and self.scale_qk else AttnProcessor()\n )\n self.set_processor(processor)\n\n def set_use_memory_efficient_attention_xformers(\n self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None\n ) -> None:\n r\"\"\"\n Set whether to use memory efficient attention from `xformers` or not.\n\n Args:\n use_memory_efficient_attention_xformers (`bool`):\n Whether to use memory efficient attention from `xformers` or not.\n attention_op (`Callable`, *optional*):\n The attention operation to use. Defaults to `None` which uses the default attention operation from\n `xformers`.\n \"\"\"\n is_lora = hasattr(self, \"processor\") and isinstance(\n self.processor,\n LORA_ATTENTION_PROCESSORS,\n )\n is_custom_diffusion = hasattr(self, \"processor\") and isinstance(\n self.processor,\n (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0),\n )\n is_added_kv_processor = hasattr(self, \"processor\") and isinstance(\n self.processor,\n (\n AttnAddedKVProcessor,\n AttnAddedKVProcessor2_0,\n SlicedAttnAddedKVProcessor,\n XFormersAttnAddedKVProcessor,\n LoRAAttnAddedKVProcessor,\n ),\n )\n\n if use_memory_efficient_attention_xformers:\n if is_added_kv_processor and (is_lora or is_custom_diffusion):\n raise NotImplementedError(\n f\"Memory efficient attention is currently not supported for LoRA or custom diffusion for attention processor type {self.processor}\"\n )\n if not is_xformers_available():\n raise ModuleNotFoundError(\n (\n \"Refer to https://github.com/facebookresearch/xformers for more information on how to install\"\n \" xformers\"\n ),\n name=\"xformers\",\n )\n elif not torch.cuda.is_available():\n raise ValueError(\n \"torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is\"\n \" only available for GPU \"\n )\n else:\n try:\n # Make sure we can run the memory efficient attention\n _ = xformers.ops.memory_efficient_attention(\n torch.randn((1, 2, 40), device=\"cuda\"),\n torch.randn((1, 2, 40), device=\"cuda\"),\n torch.randn((1, 2, 40), device=\"cuda\"),\n )\n except Exception as e:\n raise e\n\n if is_lora:\n # TODO (sayakpaul): should we throw a warning if someone wants to use the xformers\n # variant when using PT 2.0 now that we have LoRAAttnProcessor2_0?\n processor = LoRAXFormersAttnProcessor(\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n rank=self.processor.rank,\n attention_op=attention_op,\n )\n processor.load_state_dict(self.processor.state_dict())\n processor.to(self.processor.to_q_lora.up.weight.device)\n elif is_custom_diffusion:\n processor = CustomDiffusionXFormersAttnProcessor(\n train_kv=self.processor.train_kv,\n train_q_out=self.processor.train_q_out,\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n attention_op=attention_op,\n )\n processor.load_state_dict(self.processor.state_dict())\n if hasattr(self.processor, \"to_k_custom_diffusion\"):\n processor.to(self.processor.to_k_custom_diffusion.weight.device)\n elif is_added_kv_processor:\n # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP\n # which uses this type of cross attention ONLY because the attention mask of format\n # [0, ..., -10.000, ..., 0, ...,] is not supported\n # throw warning\n logger.info(\n \"Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation.\"\n )\n processor = XFormersAttnAddedKVProcessor(attention_op=attention_op)\n else:\n processor = XFormersAttnProcessor(attention_op=attention_op)\n else:\n if is_lora:\n attn_processor_class = (\n LoRAAttnProcessor2_0 if hasattr(F, \"scaled_dot_product_attention\") else LoRAAttnProcessor\n )\n processor = attn_processor_class(\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n rank=self.processor.rank,\n )\n processor.load_state_dict(self.processor.state_dict())\n processor.to(self.processor.to_q_lora.up.weight.device)\n elif is_custom_diffusion:\n attn_processor_class = (\n CustomDiffusionAttnProcessor2_0\n if hasattr(F, \"scaled_dot_product_attention\")\n else CustomDiffusionAttnProcessor\n )\n processor = attn_processor_class(\n train_kv=self.processor.train_kv,\n train_q_out=self.processor.train_q_out,\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n )\n processor.load_state_dict(self.processor.state_dict())\n if hasattr(self.processor, \"to_k_custom_diffusion\"):\n processor.to(self.processor.to_k_custom_diffusion.weight.device)\n else:\n # set attention processor\n # We use the AttnProcessor2_0 by default when torch 2.x is used which uses\n # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention\n # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1\n processor = (\n AttnProcessor2_0()\n if hasattr(F, \"scaled_dot_product_attention\") and self.scale_qk\n else AttnProcessor()\n )\n\n self.set_processor(processor)\n\n def set_attention_slice(self, slice_size: int) -> None:\n r\"\"\"\n Set the slice size for attention computation.\n\n Args:\n slice_size (`int`):\n The slice size for attention computation.\n \"\"\"\n if slice_size is not None and slice_size > self.sliceable_head_dim:\n raise ValueError(f\"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.\")\n\n if slice_size is not None and self.added_kv_proj_dim is not None:\n processor = SlicedAttnAddedKVProcessor(slice_size)\n elif slice_size is not None:\n processor = SlicedAttnProcessor(slice_size)\n elif self.added_kv_proj_dim is not None:\n processor = AttnAddedKVProcessor()\n else:\n # set attention processor\n # We use the AttnProcessor2_0 by default when torch 2.x is used which uses\n # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention\n # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1\n processor = (\n AttnProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") and self.scale_qk else AttnProcessor()\n )\n\n self.set_processor(processor)\n\n def set_processor(self, processor: \"AttnProcessor\", _remove_lora: bool = False) -> None:\n r\"\"\"\n Set the attention processor to use.\n\n Args:\n processor (`AttnProcessor`):\n The attention processor to use.\n _remove_lora (`bool`, *optional*, defaults to `False`):\n Set to `True` to remove LoRA layers from the model.\n \"\"\"\n if not USE_PEFT_BACKEND and hasattr(self, \"processor\") and _remove_lora and self.to_q.lora_layer is not None:\n deprecate(\n \"set_processor to offload LoRA\",\n \"0.26.0\",\n \"In detail, removing LoRA layers via calling `set_default_attn_processor` is deprecated. Please make sure to call `pipe.unload_lora_weights()` instead.\",\n )\n # TODO(Patrick, Sayak) - this can be deprecated once PEFT LoRA integration is complete\n # We need to remove all LoRA layers\n # Don't forget to remove ALL `_remove_lora` from the codebase\n for module in self.modules():\n if hasattr(module, \"set_lora_layer\"):\n module.set_lora_layer(None)\n\n # if current processor is in `self._modules` and if passed `processor` is not, we need to\n # pop `processor` from `self._modules`\n if (\n hasattr(self, \"processor\")\n and isinstance(self.processor, torch.nn.Module)\n and not isinstance(processor, torch.nn.Module)\n ):\n logger.info(f\"You are removing possibly trained weights of {self.processor} with {processor}\")\n self._modules.pop(\"processor\")\n\n self.processor = processor\n\n def get_processor(self, return_deprecated_lora: bool = False) -> \"AttentionProcessor\":\n r\"\"\"\n Get the attention processor in use.\n\n Args:\n return_deprecated_lora (`bool`, *optional*, defaults to `False`):\n Set to `True` to return the deprecated LoRA attention processor.\n\n Returns:\n \"AttentionProcessor\": The attention processor in use.\n \"\"\"\n if not return_deprecated_lora:\n return self.processor\n\n # TODO(Sayak, Patrick). The rest of the function is needed to ensure backwards compatible\n # serialization format for LoRA Attention Processors. It should be deleted once the integration\n # with PEFT is completed.\n is_lora_activated = {\n name: module.lora_layer is not None\n for name, module in self.named_modules()\n if hasattr(module, \"lora_layer\")\n }\n\n # 1. if no layer has a LoRA activated we can return the processor as usual\n if not any(is_lora_activated.values()):\n return self.processor\n\n # If doesn't apply LoRA do `add_k_proj` or `add_v_proj`\n is_lora_activated.pop(\"add_k_proj\", None)\n is_lora_activated.pop(\"add_v_proj\", None)\n # 2. else it is not posssible that only some layers have LoRA activated\n if not all(is_lora_activated.values()):\n raise ValueError(\n f\"Make sure that either all layers or no layers have LoRA activated, but have {is_lora_activated}\"\n )\n\n # 3. And we need to merge the current LoRA layers into the corresponding LoRA attention processor\n non_lora_processor_cls_name = self.processor.__class__.__name__\n lora_processor_cls = getattr(import_module(__name__), \"LoRA\" + non_lora_processor_cls_name)\n\n hidden_size = self.inner_dim\n\n # now create a LoRA attention processor from the LoRA layers\n if lora_processor_cls in [LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor]:\n kwargs = {\n \"cross_attention_dim\": self.cross_attention_dim,\n \"rank\": self.to_q.lora_layer.rank,\n \"network_alpha\": self.to_q.lora_layer.network_alpha,\n \"q_rank\": self.to_q.lora_layer.rank,\n \"q_hidden_size\": self.to_q.lora_layer.out_features,\n \"k_rank\": self.to_k.lora_layer.rank,\n \"k_hidden_size\": self.to_k.lora_layer.out_features,\n \"v_rank\": self.to_v.lora_layer.rank,\n \"v_hidden_size\": self.to_v.lora_layer.out_features,\n \"out_rank\": self.to_out[0].lora_layer.rank,\n \"out_hidden_size\": self.to_out[0].lora_layer.out_features,\n }\n\n if hasattr(self.processor, \"attention_op\"):\n kwargs[\"attention_op\"] = self.processor.attention_op\n\n lora_processor = lora_processor_cls(hidden_size, **kwargs)\n lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())\n lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())\n lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())\n lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict())\n elif lora_processor_cls == LoRAAttnAddedKVProcessor:\n lora_processor = lora_processor_cls(\n hidden_size,\n cross_attention_dim=self.add_k_proj.weight.shape[0],\n rank=self.to_q.lora_layer.rank,\n network_alpha=self.to_q.lora_layer.network_alpha,\n )\n lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())\n lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())\n lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())\n lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict())\n\n # only save if used\n if self.add_k_proj.lora_layer is not None:\n lora_processor.add_k_proj_lora.load_state_dict(self.add_k_proj.lora_layer.state_dict())\n lora_processor.add_v_proj_lora.load_state_dict(self.add_v_proj.lora_layer.state_dict())\n else:\n lora_processor.add_k_proj_lora = None\n lora_processor.add_v_proj_lora = None\n else:\n raise ValueError(f\"{lora_processor_cls} does not exist.\")\n\n return lora_processor\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n **cross_attention_kwargs,\n ) -> torch.Tensor:\n r\"\"\"\n The forward method of the `Attention` class.\n\n Args:\n hidden_states (`torch.Tensor`):\n The hidden states of the query.\n encoder_hidden_states (`torch.Tensor`, *optional*):\n The hidden states of the encoder.\n attention_mask (`torch.Tensor`, *optional*):\n The attention mask to use. If `None`, no mask is applied.\n **cross_attention_kwargs:\n Additional keyword arguments to pass along to the cross attention.\n\n Returns:\n `torch.Tensor`: The output of the attention layer.\n \"\"\"\n # The `Attention` class can call different attention processors / attention functions\n # here we simply pass along all tensors to the selected processor class\n # For standard processors that are defined here, `**cross_attention_kwargs` is empty\n return self.processor(\n self,\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n\n def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads`\n is the number of heads initialized while constructing the `Attention` class.\n\n Args:\n tensor (`torch.Tensor`): The tensor to reshape.\n\n Returns:\n `torch.Tensor`: The reshaped tensor.\n \"\"\"\n head_size = self.heads\n batch_size, seq_len, dim = tensor.shape\n tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)\n tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)\n return tensor\n\n def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor:\n r\"\"\"\n Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is\n the number of heads initialized while constructing the `Attention` class.\n\n Args:\n tensor (`torch.Tensor`): The tensor to reshape.\n out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is\n reshaped to `[batch_size * heads, seq_len, dim // heads]`.\n\n Returns:\n `torch.Tensor`: The reshaped tensor.\n \"\"\"\n head_size = self.heads\n batch_size, seq_len, dim = tensor.shape\n tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)\n tensor = tensor.permute(0, 2, 1, 3)\n\n if out_dim == 3:\n tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)\n\n return tensor\n\n def get_attention_scores(\n self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None\n ) -> torch.Tensor:\n r\"\"\"\n Compute the attention scores.\n\n Args:\n query (`torch.Tensor`): The query tensor.\n key (`torch.Tensor`): The key tensor.\n attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.\n\n Returns:\n `torch.Tensor`: The attention probabilities/scores.\n \"\"\"\n dtype = query.dtype\n if self.upcast_attention:\n query = query.float()\n key = key.float()\n\n if attention_mask is None:\n baddbmm_input = torch.empty(\n query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device\n )\n beta = 0\n else:\n baddbmm_input = attention_mask\n beta = 1\n\n attention_scores = torch.baddbmm(\n baddbmm_input,\n query,\n key.transpose(-1, -2),\n beta=beta,\n alpha=self.scale,\n )\n del baddbmm_input\n\n if self.upcast_softmax:\n attention_scores = attention_scores.float()\n\n attention_probs = attention_scores.softmax(dim=-1)\n del attention_scores\n\n attention_probs = attention_probs.to(dtype)\n\n return attention_probs\n\n def prepare_attention_mask(\n self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3\n ) -> torch.Tensor:\n r\"\"\"\n Prepare the attention mask for the attention computation.\n\n Args:\n attention_mask (`torch.Tensor`):\n The attention mask to prepare.\n target_length (`int`):\n The target length of the attention mask. This is the length of the attention mask after padding.\n batch_size (`int`):\n The batch size, which is used to repeat the attention mask.\n out_dim (`int`, *optional*, defaults to `3`):\n The output dimension of the attention mask. Can be either `3` or `4`.\n\n Returns:\n `torch.Tensor`: The prepared attention mask.\n \"\"\"\n head_size = self.heads\n if attention_mask is None:\n return attention_mask\n\n current_length: int = attention_mask.shape[-1]\n if current_length != target_length:\n if attention_mask.device.type == \"mps\":\n # HACK: MPS: Does not support padding by greater than dimension of input tensor.\n # Instead, we can manually construct the padding tensor.\n padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length)\n padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat([attention_mask, padding], dim=2)\n else:\n # TODO: for pipelines such as stable-diffusion, padding cross-attn mask:\n # we want to instead pad by (0, remaining_length), where remaining_length is:\n # remaining_length: int = target_length - current_length\n # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding\n attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)\n\n if out_dim == 3:\n if attention_mask.shape[0] < batch_size * head_size:\n attention_mask = attention_mask.repeat_interleave(head_size, dim=0)\n elif out_dim == 4:\n attention_mask = attention_mask.unsqueeze(1)\n attention_mask = attention_mask.repeat_interleave(head_size, dim=1)\n\n return attention_mask\n\n def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the\n `Attention` class.\n\n Args:\n encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder.\n\n Returns:\n `torch.Tensor`: The normalized encoder hidden states.\n \"\"\"\n assert self.norm_cross is not None, \"self.norm_cross must be defined to call self.norm_encoder_hidden_states\"\n\n if isinstance(self.norm_cross, nn.LayerNorm):\n encoder_hidden_states = self.norm_cross(encoder_hidden_states)\n elif isinstance(self.norm_cross, nn.GroupNorm):\n # Group norm norms along the channels dimension and expects\n # input to be in the shape of (N, C, *). In this case, we want\n # to norm along the hidden dimension, so we need to move\n # (batch_size, sequence_length, hidden_size) ->\n # (batch_size, hidden_size, sequence_length)\n encoder_hidden_states = encoder_hidden_states.transpose(1, 2)\n encoder_hidden_states = self.norm_cross(encoder_hidden_states)\n encoder_hidden_states = encoder_hidden_states.transpose(1, 2)\n else:\n assert False\n\n return encoder_hidden_states" }, { "identifier": "SinusoidalPositionalEmbedding", "path": "diffusers/src/diffusers/models/embeddings.py", "snippet": "class SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"Apply positional information to a sequence of embeddings.\n\n Takes in a sequence of embeddings with shape (batch_size, seq_length, embed_dim) and adds positional embeddings to\n them\n\n Args:\n embed_dim: (int): Dimension of the positional embedding.\n max_seq_length: Maximum sequence length to apply positional embeddings\n\n \"\"\"\n\n def __init__(self, embed_dim: int, max_seq_length: int = 32):\n super().__init__()\n position = torch.arange(max_seq_length).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim))\n pe = torch.zeros(1, max_seq_length, embed_dim)\n pe[0, :, 0::2] = torch.sin(position * div_term)\n pe[0, :, 1::2] = torch.cos(position * div_term)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x):\n _, seq_length, _ = x.shape\n x = x + self.pe[:, :seq_length]\n return x" }, { "identifier": "LoRACompatibleLinear", "path": "diffusers/src/diffusers/models/lora.py", "snippet": "class LoRACompatibleLinear(nn.Linear):\n \"\"\"\n A Linear layer that can be used with LoRA.\n \"\"\"\n\n def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.lora_layer = lora_layer\n\n def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]):\n self.lora_layer = lora_layer\n\n def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):\n if self.lora_layer is None:\n return\n\n dtype, device = self.weight.data.dtype, self.weight.data.device\n\n w_orig = self.weight.data.float()\n w_up = self.lora_layer.up.weight.data.float()\n w_down = self.lora_layer.down.weight.data.float()\n\n if self.lora_layer.network_alpha is not None:\n w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank\n\n fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])\n\n if safe_fusing and torch.isnan(fused_weight).any().item():\n raise ValueError(\n \"This LoRA weight seems to be broken. \"\n f\"Encountered NaN values when trying to fuse LoRA weights for {self}.\"\n \"LoRA weights will not be fused.\"\n )\n\n self.weight.data = fused_weight.to(device=device, dtype=dtype)\n\n # we can drop the lora layer now\n self.lora_layer = None\n\n # offload the up and down matrices to CPU to not blow the memory\n self.w_up = w_up.cpu()\n self.w_down = w_down.cpu()\n self._lora_scale = lora_scale\n\n def _unfuse_lora(self):\n if not (getattr(self, \"w_up\", None) is not None and getattr(self, \"w_down\", None) is not None):\n return\n\n fused_weight = self.weight.data\n dtype, device = fused_weight.dtype, fused_weight.device\n\n w_up = self.w_up.to(device=device).float()\n w_down = self.w_down.to(device).float()\n\n unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])\n self.weight.data = unfused_weight.to(device=device, dtype=dtype)\n\n self.w_up = None\n self.w_down = None\n\n def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:\n if self.lora_layer is None:\n out = super().forward(hidden_states)\n return out\n else:\n out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states))\n return out" }, { "identifier": "AdaLayerNorm", "path": "diffusers/src/diffusers/models/normalization.py", "snippet": "class AdaLayerNorm(nn.Module):\n r\"\"\"\n Norm layer modified to incorporate timestep embeddings.\n\n Parameters:\n embedding_dim (`int`): The size of each embedding vector.\n num_embeddings (`int`): The size of the embeddings dictionary.\n \"\"\"\n\n def __init__(self, embedding_dim: int, num_embeddings: int):\n super().__init__()\n self.emb = nn.Embedding(num_embeddings, embedding_dim)\n self.silu = nn.SiLU()\n self.linear = nn.Linear(embedding_dim, embedding_dim * 2)\n self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False)\n\n def forward(self, x: torch.Tensor, timestep: torch.Tensor) -> torch.Tensor:\n emb = self.linear(self.silu(self.emb(timestep)))\n scale, shift = torch.chunk(emb, 2)\n x = self.norm(x) * (1 + scale) + shift\n return x" }, { "identifier": "AdaLayerNormZero", "path": "diffusers/src/diffusers/models/normalization.py", "snippet": "class AdaLayerNormZero(nn.Module):\n r\"\"\"\n Norm layer adaptive layer norm zero (adaLN-Zero).\n\n Parameters:\n embedding_dim (`int`): The size of each embedding vector.\n num_embeddings (`int`): The size of the embeddings dictionary.\n \"\"\"\n\n def __init__(self, embedding_dim: int, num_embeddings: int):\n super().__init__()\n\n self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim)\n\n self.silu = nn.SiLU()\n self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)\n self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)\n\n def forward(\n self,\n x: torch.Tensor,\n timestep: torch.Tensor,\n class_labels: torch.LongTensor,\n hidden_dtype: Optional[torch.dtype] = None,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype)))\n shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1)\n x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]\n return x, gate_msa, shift_mlp, scale_mlp, gate_mlp" } ]
from typing import Any, Dict, Optional from torch import nn from ..utils import USE_PEFT_BACKEND from ..utils.torch_utils import maybe_allow_in_graph from .activations import GEGLU, GELU, ApproximateGELU from .attention_processor import Attention from .embeddings import SinusoidalPositionalEmbedding from .lora import LoRACompatibleLinear from .normalization import AdaLayerNorm, AdaLayerNormZero import torch
12,521
self.attn1 = Attention( query_dim=time_mix_inner_dim, heads=num_attention_heads, dim_head=attention_head_dim, cross_attention_dim=None, ) # 2. Cross-Attn if cross_attention_dim is not None: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = nn.LayerNorm(time_mix_inner_dim) self.attn2 = Attention( query_dim=time_mix_inner_dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(time_mix_inner_dim) self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu") # let chunk size default to None self._chunk_size = None self._chunk_dim = None def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs): # Sets chunk feed-forward self._chunk_size = chunk_size # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off self._chunk_dim = 1 def forward( self, hidden_states: torch.FloatTensor, num_frames: int, encoder_hidden_states: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] batch_frames, seq_length, channels = hidden_states.shape batch_size = batch_frames // num_frames hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) residual = hidden_states hidden_states = self.norm_in(hidden_states) if self._chunk_size is not None: hidden_states = _chunked_feed_forward(self.ff, hidden_states, self._chunk_dim, self._chunk_size) else: hidden_states = self.ff_in(hidden_states) if self.is_res: hidden_states = hidden_states + residual norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states # 3. Cross-Attention if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states = attn_output + hidden_states # 4. Feed-forward norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.is_res: hidden_states = ff_output + hidden_states else: hidden_states = ff_output hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) return hidden_states class FeedForward(nn.Module): r""" A feed-forward layer. Parameters: dim (`int`): The number of channels in the input. dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. """ def __init__( self, dim: int, dim_out: Optional[int] = None, mult: int = 4, dropout: float = 0.0, activation_fn: str = "geglu", final_dropout: bool = False, ): super().__init__() inner_dim = int(dim * mult) dim_out = dim_out if dim_out is not None else dim
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def _chunked_feed_forward( ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int, lora_scale: Optional[float] = None ): # "feed_forward_chunk_size" can be used to save memory if hidden_states.shape[chunk_dim] % chunk_size != 0: raise ValueError( f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." ) num_chunks = hidden_states.shape[chunk_dim] // chunk_size if lora_scale is None: ff_output = torch.cat( [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], dim=chunk_dim, ) else: # TOOD(Patrick): LoRA scale can be removed once PEFT refactor is complete ff_output = torch.cat( [ff(hid_slice, scale=lora_scale) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], dim=chunk_dim, ) return ff_output @maybe_allow_in_graph class GatedSelfAttentionDense(nn.Module): r""" A gated self-attention dense layer that combines visual features and object features. Parameters: query_dim (`int`): The number of channels in the query. context_dim (`int`): The number of channels in the context. n_heads (`int`): The number of heads to use for attention. d_head (`int`): The number of channels in each head. """ def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int): super().__init__() # we need a linear projection since we need cat visual feature and obj feature self.linear = nn.Linear(context_dim, query_dim) self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head) self.ff = FeedForward(query_dim, activation_fn="geglu") self.norm1 = nn.LayerNorm(query_dim) self.norm2 = nn.LayerNorm(query_dim) self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0))) self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0))) self.enabled = True def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor: if not self.enabled: return x n_visual = x.shape[1] objs = self.linear(objs) x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :] x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x)) return x @maybe_allow_in_graph class BasicTransformerBlock(nn.Module): r""" A basic Transformer block. Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm (: obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. attention_bias (: obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used. double_self_attention (`bool`, *optional*): Whether to use two self-attention layers. In this case no cross attention layers are used. upcast_attention (`bool`, *optional*): Whether to upcast the attention computation to float32. This is useful for mixed precision training. norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether to use learnable elementwise affine parameters for normalization. norm_type (`str`, *optional*, defaults to `"layer_norm"`): The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. final_dropout (`bool` *optional*, defaults to False): Whether to apply a final dropout after the last feed-forward layer. attention_type (`str`, *optional*, defaults to `"default"`): The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. positional_embeddings (`str`, *optional*, defaults to `None`): The type of positional embeddings to apply to. num_positional_embeddings (`int`, *optional*, defaults to `None`): The maximum number of positional embeddings to apply. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, attention_bias: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_elementwise_affine: bool = True, norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single' norm_eps: float = 1e-5, final_dropout: bool = False, attention_type: str = "default", positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ): super().__init__() self.only_cross_attention = only_cross_attention self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" self.use_ada_layer_norm_single = norm_type == "ada_norm_single" self.use_layer_norm = norm_type == "layer_norm" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) if positional_embeddings and (num_positional_embeddings is None): raise ValueError( "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." ) if positional_embeddings == "sinusoidal": self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) else: self.pos_embed = None # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) elif self.use_ada_layer_norm_zero: self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = ( AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) ) self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward if not self.use_ada_layer_norm_single: self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, ) # 4. Fuser if attention_type == "gated" or attention_type == "gated-text-image": self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim) # 5. Scale-shift for PixArt-Alpha. if self.use_ada_layer_norm_single: self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) # let chunk size default to None self._chunk_size = None self._chunk_dim = 0 def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): # Sets chunk feed-forward self._chunk_size = chunk_size self._chunk_dim = dim def forward( self, hidden_states: torch.FloatTensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, timestep: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, class_labels: Optional[torch.LongTensor] = None, ) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] if self.use_ada_layer_norm: norm_hidden_states = self.norm1(hidden_states, timestep) elif self.use_ada_layer_norm_zero: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype ) elif self.use_layer_norm: norm_hidden_states = self.norm1(hidden_states) elif self.use_ada_layer_norm_single: shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1) ).chunk(6, dim=1) norm_hidden_states = self.norm1(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa norm_hidden_states = norm_hidden_states.squeeze(1) else: raise ValueError("Incorrect norm used") if self.pos_embed is not None: norm_hidden_states = self.pos_embed(norm_hidden_states) # 1. Retrieve lora scale. lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 # 2. Prepare GLIGEN inputs cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} gligen_kwargs = cross_attention_kwargs.pop("gligen", None) attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) if self.use_ada_layer_norm_zero: attn_output = gate_msa.unsqueeze(1) * attn_output elif self.use_ada_layer_norm_single: attn_output = gate_msa * attn_output hidden_states = attn_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) # 2.5 GLIGEN Control if gligen_kwargs is not None: hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"]) # 3. Cross-Attention if self.attn2 is not None: if self.use_ada_layer_norm: norm_hidden_states = self.norm2(hidden_states, timestep) elif self.use_ada_layer_norm_zero or self.use_layer_norm: norm_hidden_states = self.norm2(hidden_states) elif self.use_ada_layer_norm_single: # For PixArt norm2 isn't applied here: # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103 norm_hidden_states = hidden_states else: raise ValueError("Incorrect norm") if self.pos_embed is not None and self.use_ada_layer_norm_single is False: norm_hidden_states = self.pos_embed(norm_hidden_states) attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states # 4. Feed-forward if not self.use_ada_layer_norm_single: norm_hidden_states = self.norm3(hidden_states) if self.use_ada_layer_norm_zero: norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self.use_ada_layer_norm_single: norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory ff_output = _chunked_feed_forward( self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size, lora_scale=lora_scale ) else: ff_output = self.ff(norm_hidden_states, scale=lora_scale) if self.use_ada_layer_norm_zero: ff_output = gate_mlp.unsqueeze(1) * ff_output elif self.use_ada_layer_norm_single: ff_output = gate_mlp * ff_output hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states @maybe_allow_in_graph class TemporalBasicTransformerBlock(nn.Module): r""" A basic Transformer block for video like data. Parameters: dim (`int`): The number of channels in the input and output. time_mix_inner_dim (`int`): The number of channels for temporal attention. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. """ def __init__( self, dim: int, time_mix_inner_dim: int, num_attention_heads: int, attention_head_dim: int, cross_attention_dim: Optional[int] = None, ): super().__init__() self.is_res = dim == time_mix_inner_dim self.norm_in = nn.LayerNorm(dim) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn self.norm_in = nn.LayerNorm(dim) self.ff_in = FeedForward( dim, dim_out=time_mix_inner_dim, activation_fn="geglu", ) self.norm1 = nn.LayerNorm(time_mix_inner_dim) self.attn1 = Attention( query_dim=time_mix_inner_dim, heads=num_attention_heads, dim_head=attention_head_dim, cross_attention_dim=None, ) # 2. Cross-Attn if cross_attention_dim is not None: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = nn.LayerNorm(time_mix_inner_dim) self.attn2 = Attention( query_dim=time_mix_inner_dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(time_mix_inner_dim) self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu") # let chunk size default to None self._chunk_size = None self._chunk_dim = None def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs): # Sets chunk feed-forward self._chunk_size = chunk_size # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off self._chunk_dim = 1 def forward( self, hidden_states: torch.FloatTensor, num_frames: int, encoder_hidden_states: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] batch_frames, seq_length, channels = hidden_states.shape batch_size = batch_frames // num_frames hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) residual = hidden_states hidden_states = self.norm_in(hidden_states) if self._chunk_size is not None: hidden_states = _chunked_feed_forward(self.ff, hidden_states, self._chunk_dim, self._chunk_size) else: hidden_states = self.ff_in(hidden_states) if self.is_res: hidden_states = hidden_states + residual norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states # 3. Cross-Attention if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states = attn_output + hidden_states # 4. Feed-forward norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.is_res: hidden_states = ff_output + hidden_states else: hidden_states = ff_output hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) return hidden_states class FeedForward(nn.Module): r""" A feed-forward layer. Parameters: dim (`int`): The number of channels in the input. dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. """ def __init__( self, dim: int, dim_out: Optional[int] = None, mult: int = 4, dropout: float = 0.0, activation_fn: str = "geglu", final_dropout: bool = False, ): super().__init__() inner_dim = int(dim * mult) dim_out = dim_out if dim_out is not None else dim
linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
0
2023-12-28 08:17:40+00:00
16k
FoundationVision/UniRef
detectron2/data/datasets/coco.py
[ { "identifier": "Boxes", "path": "detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "PolygonMasks", "path": "detectron2/structures/masks.py", "snippet": "class PolygonMasks:\n \"\"\"\n This class stores the segmentation masks for all objects in one image, in the form of polygons.\n\n Attributes:\n polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.\n \"\"\"\n\n def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):\n \"\"\"\n Arguments:\n polygons (list[list[np.ndarray]]): The first\n level of the list correspond to individual instances,\n the second level to all the polygons that compose the\n instance, and the third level to the polygon coordinates.\n The third level array should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n \"\"\"\n if not isinstance(polygons, list):\n raise ValueError(\n \"Cannot create PolygonMasks: Expect a list of list of polygons per image. \"\n \"Got '{}' instead.\".format(type(polygons))\n )\n\n def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:\n # Use float64 for higher precision, because why not?\n # Always put polygons on CPU (self.to is a no-op) since they\n # are supposed to be small tensors.\n # May need to change this assumption if GPU placement becomes useful\n if isinstance(t, torch.Tensor):\n t = t.cpu().numpy()\n return np.asarray(t).astype(\"float64\")\n\n def process_polygons(\n polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]\n ) -> List[np.ndarray]:\n if not isinstance(polygons_per_instance, list):\n raise ValueError(\n \"Cannot create polygons: Expect a list of polygons per instance. \"\n \"Got '{}' instead.\".format(type(polygons_per_instance))\n )\n # transform each polygon to a numpy array\n polygons_per_instance = [_make_array(p) for p in polygons_per_instance]\n for polygon in polygons_per_instance:\n if len(polygon) % 2 != 0 or len(polygon) < 6:\n raise ValueError(f\"Cannot create a polygon from {len(polygon)} coordinates.\")\n return polygons_per_instance\n\n self.polygons: List[List[np.ndarray]] = [\n process_polygons(polygons_per_instance) for polygons_per_instance in polygons\n ]\n\n def to(self, *args: Any, **kwargs: Any) -> \"PolygonMasks\":\n return self\n\n @property\n def device(self) -> torch.device:\n return torch.device(\"cpu\")\n\n def get_bounding_boxes(self) -> Boxes:\n \"\"\"\n Returns:\n Boxes: tight bounding boxes around polygon masks.\n \"\"\"\n boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)\n for idx, polygons_per_instance in enumerate(self.polygons):\n minxy = torch.as_tensor([float(\"inf\"), float(\"inf\")], dtype=torch.float32)\n maxxy = torch.zeros(2, dtype=torch.float32)\n for polygon in polygons_per_instance:\n coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)\n minxy = torch.min(minxy, torch.min(coords, dim=0).values)\n maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)\n boxes[idx, :2] = minxy\n boxes[idx, 2:] = maxxy\n return Boxes(boxes)\n\n def nonempty(self) -> torch.Tensor:\n \"\"\"\n Find masks that are non-empty.\n\n Returns:\n Tensor:\n a BoolTensor which represents whether each mask is empty (False) or not (True).\n \"\"\"\n keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]\n return torch.from_numpy(np.asarray(keep, dtype=np.bool))\n\n def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> \"PolygonMasks\":\n \"\"\"\n Support indexing over the instances and return a `PolygonMasks` object.\n `item` can be:\n\n 1. An integer. It will return an object with only one instance.\n 2. A slice. It will return an object with the selected instances.\n 3. A list[int]. It will return an object with the selected instances,\n correpsonding to the indices in the list.\n 4. A vector mask of type BoolTensor, whose length is num_instances.\n It will return an object with the instances whose mask is nonzero.\n \"\"\"\n if isinstance(item, int):\n selected_polygons = [self.polygons[item]]\n elif isinstance(item, slice):\n selected_polygons = self.polygons[item]\n elif isinstance(item, list):\n selected_polygons = [self.polygons[i] for i in item]\n elif isinstance(item, torch.Tensor):\n # Polygons is a list, so we have to move the indices back to CPU.\n if item.dtype == torch.bool:\n assert item.dim() == 1, item.shape\n item = item.nonzero().squeeze(1).cpu().numpy().tolist()\n elif item.dtype in [torch.int32, torch.int64]:\n item = item.cpu().numpy().tolist()\n else:\n raise ValueError(\"Unsupported tensor dtype={} for indexing!\".format(item.dtype))\n selected_polygons = [self.polygons[i] for i in item]\n return PolygonMasks(selected_polygons)\n\n def __iter__(self) -> Iterator[List[np.ndarray]]:\n \"\"\"\n Yields:\n list[ndarray]: the polygons for one instance.\n Each Tensor is a float64 vector representing a polygon.\n \"\"\"\n return iter(self.polygons)\n\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.polygons))\n return s\n\n def __len__(self) -> int:\n return len(self.polygons)\n\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\n \"\"\"\n Crop each mask by the given box, and resize results to (mask_size, mask_size).\n This can be used to prepare training targets for Mask R-CNN.\n\n Args:\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\n mask_size (int): the size of the rasterized mask.\n\n Returns:\n Tensor: A bool tensor of shape (N, mask_size, mask_size), where\n N is the number of predicted boxes for this image.\n \"\"\"\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\n\n device = boxes.device\n # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise\n # (several small tensors for representing a single instance mask)\n boxes = boxes.to(torch.device(\"cpu\"))\n\n results = [\n rasterize_polygons_within_box(poly, box.numpy(), mask_size)\n for poly, box in zip(self.polygons, boxes)\n ]\n \"\"\"\n poly: list[list[float]], the polygons for one instance\n box: a tensor of shape (4,)\n \"\"\"\n if len(results) == 0:\n return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)\n return torch.stack(results, dim=0).to(device=device)\n\n def area(self):\n \"\"\"\n Computes area of the mask.\n Only works with Polygons, using the shoelace formula:\n https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n\n Returns:\n Tensor: a vector, area for each instance\n \"\"\"\n\n area = []\n for polygons_per_instance in self.polygons:\n area_per_instance = 0\n for p in polygons_per_instance:\n area_per_instance += polygon_area(p[0::2], p[1::2])\n area.append(area_per_instance)\n\n return torch.tensor(area)\n\n @staticmethod\n def cat(polymasks_list: List[\"PolygonMasks\"]) -> \"PolygonMasks\":\n \"\"\"\n Concatenates a list of PolygonMasks into a single PolygonMasks\n\n Arguments:\n polymasks_list (list[PolygonMasks])\n\n Returns:\n PolygonMasks: the concatenated PolygonMasks\n \"\"\"\n assert isinstance(polymasks_list, (list, tuple))\n assert len(polymasks_list) > 0\n assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)\n\n cat_polymasks = type(polymasks_list[0])(\n list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))\n )\n return cat_polymasks" }, { "identifier": "RotatedBoxes", "path": "detectron2/structures/rotated_boxes.py", "snippet": "class RotatedBoxes(Boxes):\n \"\"\"\n This structure stores a list of rotated boxes as a Nx5 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx5 matrix. Each row is\n (x_center, y_center, width, height, angle),\n in which angle is represented in degrees.\n While there's no strict range restriction for it,\n the recommended principal range is between [-180, 180) degrees.\n\n Assume we have a horizontal box B = (x_center, y_center, width, height),\n where width is along the x-axis and height is along the y-axis.\n The rotated box B_rot (x_center, y_center, width, height, angle)\n can be seen as:\n\n 1. When angle == 0:\n B_rot == B\n 2. When angle > 0:\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;\n 3. When angle < 0:\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.\n\n Mathematically, since the right-handed coordinate system for image space\n is (y, x), where y is top->down and x is left->right, the 4 vertices of the\n rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from\n the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)\n in the following way (:math:`\\\\theta = angle*\\\\pi/180` is the angle in radians,\n :math:`(y_c, x_c)` is the center of the rectangle):\n\n .. math::\n\n yr_i = \\\\cos(\\\\theta) (y_i - y_c) - \\\\sin(\\\\theta) (x_i - x_c) + y_c,\n\n xr_i = \\\\sin(\\\\theta) (y_i - y_c) + \\\\cos(\\\\theta) (x_i - x_c) + x_c,\n\n which is the standard rigid-body rotation transformation.\n\n Intuitively, the angle is\n (1) the rotation angle from y-axis in image space\n to the height vector (top->down in the box's local coordinate system)\n of the box in CCW, and\n (2) the rotation angle from x-axis in image space\n to the width vector (left->right in the box's local coordinate system)\n of the box in CCW.\n\n More intuitively, consider the following horizontal box ABCD represented\n in (x1, y1, x2, y2): (3, 2, 7, 4),\n covering the [3, 7] x [2, 4] region of the continuous coordinate system\n which looks like this:\n\n .. code:: none\n\n O--------> x\n |\n | A---B\n | | |\n | D---C\n |\n v y\n\n Note that each capital letter represents one 0-dimensional geometric point\n instead of a 'square pixel' here.\n\n In the example above, using (x, y) to represent a point we have:\n\n .. math::\n\n O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)\n\n We name vector AB = vector DC as the width vector in box's local coordinate system, and\n vector AD = vector BC as the height vector in box's local coordinate system. Initially,\n when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis\n in the image space, respectively.\n\n For better illustration, we denote the center of the box as E,\n\n .. code:: none\n\n O--------> x\n |\n | A---B\n | | E |\n | D---C\n |\n v y\n\n where the center E = ((3+7)/2, (2+4)/2) = (5, 3).\n\n Also,\n\n .. math::\n\n width = |AB| = |CD| = 7 - 3 = 4,\n height = |AD| = |BC| = 4 - 2 = 2.\n\n Therefore, the corresponding representation for the same shape in rotated box in\n (x_center, y_center, width, height, angle) format is:\n\n (5, 3, 4, 2, 0),\n\n Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees\n CCW (counter-clockwise) by definition. It looks like this:\n\n .. code:: none\n\n O--------> x\n | B-C\n | | |\n | |E|\n | | |\n | A-D\n v y\n\n The center E is still located at the same point (5, 3), while the vertices\n ABCD are rotated by 90 degrees CCW with regard to E:\n A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)\n\n Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to\n vector AD or vector BC (the top->down height vector in box's local coordinate system),\n or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right\n width vector in box's local coordinate system).\n\n .. math::\n\n width = |AB| = |CD| = 5 - 1 = 4,\n height = |AD| = |BC| = 6 - 4 = 2.\n\n Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)\n by definition? It looks like this:\n\n .. code:: none\n\n O--------> x\n | D-A\n | | |\n | |E|\n | | |\n | C-B\n v y\n\n The center E is still located at the same point (5, 3), while the vertices\n ABCD are rotated by 90 degrees CW with regard to E:\n A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)\n\n .. math::\n\n width = |AB| = |CD| = 5 - 1 = 4,\n height = |AD| = |BC| = 6 - 4 = 2.\n\n This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU\n will be 1. However, these two will generate different RoI Pooling results and\n should not be treated as an identical box.\n\n On the other hand, it's easy to see that (X, Y, W, H, A) is identical to\n (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be\n identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is\n equivalent to rotating the same shape 90 degrees CW.\n\n We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):\n\n .. code:: none\n\n O--------> x\n |\n | C---D\n | | E |\n | B---A\n |\n v y\n\n .. math::\n\n A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),\n\n width = |AB| = |CD| = 7 - 3 = 4,\n height = |AD| = |BC| = 4 - 2 = 2.\n\n Finally, this is a very inaccurate (heavily quantized) illustration of\n how (5, 3, 4, 2, 60) looks like in case anyone wonders:\n\n .. code:: none\n\n O--------> x\n | B\\\n | / C\n | /E /\n | A /\n | `D\n v y\n\n It's still a rectangle with center of (5, 3), width of 4 and height of 2,\n but its angle (and thus orientation) is somewhere between\n (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"RotatedBoxes\":\n \"\"\"\n Clone the RotatedBoxes.\n\n Returns:\n RotatedBoxes\n \"\"\"\n return RotatedBoxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return RotatedBoxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = box[:, 2] * box[:, 3]\n return area\n\n def normalize_angles(self) -> None:\n \"\"\"\n Restrict angles to the range of [-180, 180) degrees\n \"\"\"\n self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0\n\n def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n For RRPN:\n Only clip boxes that are almost horizontal with a tolerance of\n clip_angle_threshold to maintain backward compatibility.\n\n Rotated boxes beyond this threshold are not clipped for two reasons:\n\n 1. There are potentially multiple ways to clip a rotated box to make it\n fit within the image.\n 2. It's tricky to make the entire rectangular box fit within the image\n and still be able to not leave out pixels of interest.\n\n Therefore we rely on ops like RoIAlignRotated to safely handle this.\n\n Args:\n box_size (height, width): The clipping box's size.\n clip_angle_threshold:\n Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),\n we do the clipping as horizontal boxes.\n \"\"\"\n h, w = box_size\n\n # normalize angles to be within (-180, 180] degrees\n self.normalize_angles()\n\n idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]\n\n # convert to (x1, y1, x2, y2)\n x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0\n y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0\n x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0\n y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0\n\n # clip\n x1.clamp_(min=0, max=w)\n y1.clamp_(min=0, max=h)\n x2.clamp_(min=0, max=w)\n y2.clamp_(min=0, max=h)\n\n # convert back to (xc, yc, w, h)\n self.tensor[idx, 0] = (x1 + x2) / 2.0\n self.tensor[idx, 1] = (y1 + y2) / 2.0\n # make sure widths and heights do not increase due to numerical errors\n self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)\n self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor: a binary vector which represents\n whether each box is empty (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2]\n heights = box[:, 3]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"RotatedBoxes\":\n \"\"\"\n Returns:\n RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned RotatedBoxes might share storage with this RotatedBoxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return RotatedBoxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on RotatedBoxes with {} failed to return a matrix!\".format(\n item\n )\n return RotatedBoxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"RotatedBoxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box covering\n [0, width] x [0, height]\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n For RRPN, it might not be necessary to call this function since it's common\n for rotated box to extend to outside of the image boundaries\n (the clip function only clips the near-horizontal boxes)\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n\n cnt_x = self.tensor[..., 0]\n cnt_y = self.tensor[..., 1]\n half_w = self.tensor[..., 2] / 2.0\n half_h = self.tensor[..., 3] / 2.0\n a = self.tensor[..., 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n max_rect_dx = c * half_w + s * half_h\n max_rect_dy = c * half_h + s * half_w\n\n inds_inside = (\n (cnt_x - max_rect_dx >= -boundary_threshold)\n & (cnt_y - max_rect_dy >= -boundary_threshold)\n & (cnt_x + max_rect_dx < width + boundary_threshold)\n & (cnt_y + max_rect_dy < height + boundary_threshold)\n )\n\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return self.tensor[:, :2]\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the rotated box with horizontal and vertical scaling factors\n Note: when scale_factor_x != scale_factor_y,\n the rotated box does not preserve the rectangular shape when the angle\n is not a multiple of 90 degrees under resize transformation.\n Instead, the shape is a parallelogram (that has skew)\n Here we make an approximation by fitting a rotated rectangle to the parallelogram.\n \"\"\"\n self.tensor[:, 0] *= scale_x\n self.tensor[:, 1] *= scale_y\n theta = self.tensor[:, 4] * math.pi / 180.0\n c = torch.cos(theta)\n s = torch.sin(theta)\n\n # In image space, y is top->down and x is left->right\n # Consider the local coordintate system for the rotated box,\n # where the box center is located at (0, 0), and the four vertices ABCD are\n # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)\n # the midpoint of the left edge AD of the rotated box E is:\n # E = (A+D)/2 = (-w / 2, 0)\n # the midpoint of the top edge AB of the rotated box F is:\n # F(0, -h / 2)\n # To get the old coordinates in the global system, apply the rotation transformation\n # (Note: the right-handed coordinate system for image space is yOx):\n # (old_x, old_y) = (s * y + c * x, c * y - s * x)\n # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)\n # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)\n # After applying the scaling factor (sfx, sfy):\n # E(new) = (-sfx * c * w / 2, sfy * s * w / 2)\n # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)\n # The new width after scaling tranformation becomes:\n\n # w(new) = |E(new) - O| * 2\n # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2\n # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w\n # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]\n #\n # For example,\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y\n self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)\n\n # h(new) = |F(new) - O| * 2\n # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2\n # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h\n # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]\n #\n # For example,\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x\n self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)\n\n # The angle is the rotation angle from y-axis in image space to the height\n # vector (top->down in the box's local coordinate system) of the box in CCW.\n #\n # angle(new) = angle_yOx(O - F(new))\n # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )\n # = atan2(sfx * s * h / 2, sfy * c * h / 2)\n # = atan2(sfx * s, sfy * c)\n #\n # For example,\n # when sfx == sfy, angle(new) == atan2(s, c) == angle(old)\n self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi\n\n @classmethod\n def cat(cls, boxes_list: List[\"RotatedBoxes\"]) -> \"RotatedBoxes\":\n \"\"\"\n Concatenates a list of RotatedBoxes into a single RotatedBoxes\n\n Arguments:\n boxes_list (list[RotatedBoxes])\n\n Returns:\n RotatedBoxes: the concatenated RotatedBoxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, RotatedBoxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> torch.device:\n return self.tensor.device\n\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (5,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "DatasetCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" } ]
import contextlib import datetime import io import json import logging import numpy as np import os import shutil import pycocotools.mask as mask_util import detectron2.data.datasets # noqa # add pre-defined metadata import sys from fvcore.common.timer import Timer from iopath.common.file_io import file_lock from PIL import Image from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes from detectron2.utils.file_io import PathManager from .. import DatasetCatalog, MetadataCatalog from pycocotools.coco import COCO from detectron2.utils.logger import setup_logger from detectron2.utils.visualizer import Visualizer
14,206
gt_files = sorted( (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), key=lambda file_path: file2id(gt_root, file_path), ) assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images if len(input_files) != len(gt_files): logger.warn( "Directory {} and {} has {} and {} files, respectively.".format( image_root, gt_root, len(input_files), len(gt_files) ) ) input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] intersect = list(set(input_basenames) & set(gt_basenames)) # sort, otherwise each worker may obtain a list[dict] in different order intersect = sorted(intersect) logger.warn("Will use their intersection of {} files.".format(len(intersect))) input_files = [os.path.join(image_root, f + image_ext) for f in intersect] gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] logger.info( "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) ) dataset_dicts = [] for (img_path, gt_path) in zip(input_files, gt_files): record = {} record["file_name"] = img_path record["sem_seg_file_name"] = gt_path dataset_dicts.append(record) return dataset_dicts def convert_to_coco_dict(dataset_name): """ Convert an instance detection/segmentation or keypoint detection dataset in detectron2's standard format into COCO json format. Generic dataset description can be found here: https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset COCO data format description can be found here: http://cocodataset.org/#format-data Args: dataset_name (str): name of the source dataset Must be registered in DatastCatalog and in detectron2's standard format. Must have corresponding metadata "thing_classes" Returns: coco_dict: serializable dict in COCO json format """ dataset_dicts = DatasetCatalog.get(dataset_name) metadata = MetadataCatalog.get(dataset_name) # unmap the category mapping ids for COCO if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa else: reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa categories = [ {"id": reverse_id_mapper(id), "name": name} for id, name in enumerate(metadata.thing_classes) ] logger.info("Converting dataset dicts into COCO format") coco_images = [] coco_annotations = [] for image_id, image_dict in enumerate(dataset_dicts): coco_image = { "id": image_dict.get("image_id", image_id), "width": int(image_dict["width"]), "height": int(image_dict["height"]), "file_name": str(image_dict["file_name"]), } coco_images.append(coco_image) anns_per_image = image_dict.get("annotations", []) for annotation in anns_per_image: # create a new dict with only COCO fields coco_annotation = {} # COCO requirement: XYWH box format for axis-align and XYWHA for rotated bbox = annotation["bbox"] if isinstance(bbox, np.ndarray): if bbox.ndim != 1: raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") bbox = bbox.tolist() if len(bbox) not in [4, 5]: raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") from_bbox_mode = annotation["bbox_mode"] to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) # COCO requirement: instance area if "segmentation" in annotation: # Computing areas for instances by counting the pixels segmentation = annotation["segmentation"] # TODO: check segmentation type: RLE, BinaryMask or Polygon if isinstance(segmentation, list): polygons = PolygonMasks([segmentation]) area = polygons.area()[0].item() elif isinstance(segmentation, dict): # RLE area = mask_util.area(segmentation).item() else: raise TypeError(f"Unknown segmentation type {type(segmentation)}!") else: # Computing areas using bounding boxes if to_bbox_mode == BoxMode.XYWH_ABS: bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) area = Boxes([bbox_xy]).area()[0].item() else:
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". """ logger = logging.getLogger(__name__) __all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"] def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None, dataset_name_in_dict="coco"): """ Load a json file with COCO's instances annotation format. Currently supports instance detection, instance segmentation, and person keypoints annotations. Args: json_file (str): full path to the json file in COCO instances annotation format. image_root (str or path-like): the directory where the images in this json file exists. dataset_name (str or None): the name of the dataset (e.g., coco_2017_train). When provided, this function will also do the following: * Put "thing_classes" into the metadata associated with this dataset. * Map the category ids into a contiguous range (needed by standard dataset format), and add "thing_dataset_id_to_contiguous_id" to the metadata associated with this dataset. This option should usually be provided, unless users need to load the original json content and apply more processing manually. extra_annotation_keys (list[str]): list of per-annotation keys that should also be loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", "category_id", "segmentation"). The values for these keys will be returned as-is. For example, the densepose annotations are loaded in this way. Returns: list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See `Using Custom Datasets </tutorials/datasets.html>`_ ) when `dataset_name` is not None. If `dataset_name` is None, the returned `category_ids` may be incontiguous and may not conform to the Detectron2 standard format. Notes: 1. This function does not read the image files. The results do not have the "image" field. """ timer = Timer() json_file = PathManager.get_local_path(json_file) with contextlib.redirect_stdout(io.StringIO()): coco_api = COCO(json_file) if timer.seconds() > 1: logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) id_map = None if dataset_name is not None: meta = MetadataCatalog.get(dataset_name) cat_ids = sorted(coco_api.getCatIds()) cats = coco_api.loadCats(cat_ids) # The categories in a custom json file may not be sorted. thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] meta.thing_classes = thing_classes # In COCO, certain category ids are artificially removed, # and by convention they are always ignored. # We deal with COCO's id issue and translate # the category ids to contiguous ids in [0, 80). # It works by looking at the "categories" field in the json, therefore # if users' own json also have incontiguous ids, we'll # apply this mapping as well but print a warning. if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): if "coco" not in dataset_name: logger.warning( """ Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. """ ) id_map = {v: i for i, v in enumerate(cat_ids)} meta.thing_dataset_id_to_contiguous_id = id_map # sort indices for reproducible results img_ids = sorted(coco_api.imgs.keys()) # imgs is a list of dicts, each looks something like: # {'license': 4, # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', # 'file_name': 'COCO_val2014_000000001268.jpg', # 'height': 427, # 'width': 640, # 'date_captured': '2013-11-17 05:57:24', # 'id': 1268} imgs = coco_api.loadImgs(img_ids) # anns is a list[list[dict]], where each dict is an annotation # record for an object. The inner list enumerates the objects in an image # and the outer list enumerates over images. Example of anns[0]: # [{'segmentation': [[192.81, # 247.09, # ... # 219.03, # 249.06]], # 'area': 1035.749, # 'iscrowd': 0, # 'image_id': 1268, # 'bbox': [192.81, 224.8, 74.73, 33.43], # 'category_id': 16, # 'id': 42986}, # ...] anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] total_num_valid_anns = sum([len(x) for x in anns]) total_num_anns = len(coco_api.anns) if total_num_valid_anns < total_num_anns: logger.warning( f"{json_file} contains {total_num_anns} annotations, but only " f"{total_num_valid_anns} of them match to images in the file." ) if "minival" not in json_file: # The popular valminusminival & minival annotations for COCO2014 contain this bug. # However the ratio of buggy annotations there is tiny and does not affect accuracy. # Therefore we explicitly white-list them. ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( json_file ) imgs_anns = list(zip(imgs, anns)) logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) dataset_dicts = [] ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) num_instances_without_valid_segmentation = 0 for (img_dict, anno_dict_list) in imgs_anns: record = {} record["file_name"] = os.path.join(image_root, img_dict["file_name"]) record["height"] = img_dict["height"] record["width"] = img_dict["width"] image_id = record["image_id"] = img_dict["id"] objs = [] for anno in anno_dict_list: # Check that the image_id in this annotation is the same as # the image_id we're looking at. # This fails only when the data parsing logic or the annotation file is buggy. # The original COCO valminusminival2014 & minival2014 annotation files # actually contains bugs that, together with certain ways of using COCO API, # can trigger this assertion. assert anno["image_id"] == image_id assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' obj = {key: anno[key] for key in ann_keys if key in anno} if "bbox" in obj and len(obj["bbox"]) == 0: raise ValueError( f"One annotation of image {image_id} contains empty 'bbox' value! " "This json does not have valid COCO format." ) segm = anno.get("segmentation", None) if segm: # either list[list[float]] or dict(RLE) if isinstance(segm, dict): if isinstance(segm["counts"], list): # convert to compressed RLE segm = mask_util.frPyObjects(segm, *segm["size"]) else: # filter out invalid polygons (< 3 points) segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] if len(segm) == 0: num_instances_without_valid_segmentation += 1 continue # ignore this instance obj["segmentation"] = segm keypts = anno.get("keypoints", None) if keypts: # list[int] for idx, v in enumerate(keypts): if idx % 3 != 2: # COCO's segmentation coordinates are floating points in [0, H or W], # but keypoint coordinates are integers in [0, H-1 or W-1] # Therefore we assume the coordinates are "pixel indices" and # add 0.5 to convert to floating point coordinates. keypts[idx] = v + 0.5 obj["keypoints"] = keypts obj["bbox_mode"] = BoxMode.XYWH_ABS if id_map: annotation_category_id = obj["category_id"] try: obj["category_id"] = id_map[annotation_category_id] except KeyError as e: raise KeyError( f"Encountered category_id={annotation_category_id} " "but this id does not exist in 'categories' of the json file." ) from e objs.append(obj) record["annotations"] = objs record["task"] = "detection" record["dataset_name"] = dataset_name_in_dict dataset_dicts.append(record) if num_instances_without_valid_segmentation > 0: logger.warning( "Filtered out {} instances without valid segmentation. ".format( num_instances_without_valid_segmentation ) + "There might be issues in your dataset generation process. Please " "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully" ) return dataset_dicts def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): """ Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are treated as ground truth annotations and all files under "image_root" with "image_ext" extension as input images. Ground truth and input images are matched using file paths relative to "gt_root" and "image_root" respectively without taking into account file extensions. This works for COCO as well as some other datasets. Args: gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation annotations are stored as images with integer values in pixels that represent corresponding semantic labels. image_root (str): the directory where the input images are. gt_ext (str): file extension for ground truth annotations. image_ext (str): file extension for input images. Returns: list[dict]: a list of dicts in detectron2 standard format without instance-level annotation. Notes: 1. This function does not read the image and ground truth files. The results do not have the "image" and "sem_seg" fields. """ # We match input images with ground truth based on their relative filepaths (without file # extensions) starting from 'image_root' and 'gt_root' respectively. def file2id(folder_path, file_path): # extract relative path starting from `folder_path` image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) # remove file extension image_id = os.path.splitext(image_id)[0] return image_id input_files = sorted( (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), key=lambda file_path: file2id(image_root, file_path), ) gt_files = sorted( (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), key=lambda file_path: file2id(gt_root, file_path), ) assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images if len(input_files) != len(gt_files): logger.warn( "Directory {} and {} has {} and {} files, respectively.".format( image_root, gt_root, len(input_files), len(gt_files) ) ) input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] intersect = list(set(input_basenames) & set(gt_basenames)) # sort, otherwise each worker may obtain a list[dict] in different order intersect = sorted(intersect) logger.warn("Will use their intersection of {} files.".format(len(intersect))) input_files = [os.path.join(image_root, f + image_ext) for f in intersect] gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] logger.info( "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) ) dataset_dicts = [] for (img_path, gt_path) in zip(input_files, gt_files): record = {} record["file_name"] = img_path record["sem_seg_file_name"] = gt_path dataset_dicts.append(record) return dataset_dicts def convert_to_coco_dict(dataset_name): """ Convert an instance detection/segmentation or keypoint detection dataset in detectron2's standard format into COCO json format. Generic dataset description can be found here: https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset COCO data format description can be found here: http://cocodataset.org/#format-data Args: dataset_name (str): name of the source dataset Must be registered in DatastCatalog and in detectron2's standard format. Must have corresponding metadata "thing_classes" Returns: coco_dict: serializable dict in COCO json format """ dataset_dicts = DatasetCatalog.get(dataset_name) metadata = MetadataCatalog.get(dataset_name) # unmap the category mapping ids for COCO if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa else: reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa categories = [ {"id": reverse_id_mapper(id), "name": name} for id, name in enumerate(metadata.thing_classes) ] logger.info("Converting dataset dicts into COCO format") coco_images = [] coco_annotations = [] for image_id, image_dict in enumerate(dataset_dicts): coco_image = { "id": image_dict.get("image_id", image_id), "width": int(image_dict["width"]), "height": int(image_dict["height"]), "file_name": str(image_dict["file_name"]), } coco_images.append(coco_image) anns_per_image = image_dict.get("annotations", []) for annotation in anns_per_image: # create a new dict with only COCO fields coco_annotation = {} # COCO requirement: XYWH box format for axis-align and XYWHA for rotated bbox = annotation["bbox"] if isinstance(bbox, np.ndarray): if bbox.ndim != 1: raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") bbox = bbox.tolist() if len(bbox) not in [4, 5]: raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") from_bbox_mode = annotation["bbox_mode"] to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) # COCO requirement: instance area if "segmentation" in annotation: # Computing areas for instances by counting the pixels segmentation = annotation["segmentation"] # TODO: check segmentation type: RLE, BinaryMask or Polygon if isinstance(segmentation, list): polygons = PolygonMasks([segmentation]) area = polygons.area()[0].item() elif isinstance(segmentation, dict): # RLE area = mask_util.area(segmentation).item() else: raise TypeError(f"Unknown segmentation type {type(segmentation)}!") else: # Computing areas using bounding boxes if to_bbox_mode == BoxMode.XYWH_ABS: bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) area = Boxes([bbox_xy]).area()[0].item() else:
area = RotatedBoxes([bbox]).area()[0].item()
3
2023-12-22 13:31:33+00:00
16k
Con6924/SPM
evaluate_task.py
[ { "identifier": "config", "path": "src/configs/config.py", "snippet": "PRECISION_TYPES = Literal[\"fp32\", \"fp16\", \"bf16\", \"float32\", \"float16\", \"bfloat16\"]\nclass PretrainedModelConfig(BaseModel):\nclass NetworkConfig(BaseModel):\nclass TrainConfig(BaseModel): \nclass SaveConfig(BaseModel):\nclass LoggingConfig(BaseModel):\nclass InferenceConfig(BaseModel):\nclass OtherConfig(BaseModel):\nclass RootConfig(BaseModel):\ndef parse_precision(precision: str) -> torch.dtype:\ndef load_config_from_yaml(config_path: str) -> RootConfig:" }, { "identifier": "RootConfig", "path": "src/configs/config.py", "snippet": "class RootConfig(BaseModel):\n prompts_file: Optional[str] = None\n \n pretrained_model: PretrainedModelConfig\n\n network: Optional[NetworkConfig] = None\n\n train: Optional[TrainConfig] = None\n\n save: Optional[SaveConfig] = None\n\n logging: Optional[LoggingConfig] = None\n\n inference: Optional[InferenceConfig] = None\n\n other: Optional[OtherConfig] = None" }, { "identifier": "GenerationConfig", "path": "src/configs/generation_config.py", "snippet": "class GenerationConfig(BaseModel):\n prompts: list[str] = []\n negative_prompt: str = \"bad anatomy,watermark,extra digit,signature,worst quality,jpeg artifacts,normal quality,low quality,long neck,lowres,error,blurry,missing fingers,fewer digits,missing arms,text,cropped,Humpbacked,bad hands,username\"\n unconditional_prompt: str = \"\"\n width: int = 512\n height: int = 512\n num_inference_steps: int = 30\n guidance_scale: float = 7.5\n seed: int = 2024\n generate_num: int = 1\n\n save_path: str = None # can be a template, e.g. \"path/to/img_{}.png\",\n # then the generated images will be saved as \"path/to/img_0.png\", \"path/to/img_1.png\", ...\n\n def dict(self):\n results = {}\n for attr in vars(self):\n if not attr.startswith(\"_\"):\n results[attr] = getattr(self, attr)\n return results\n \n @staticmethod\n def fix_format(cfg):\n for k, v in cfg.items():\n if isinstance(v, list):\n cfg[k] = v[0]\n elif isinstance(v, torch.Tensor):\n cfg[k] = v.item()" }, { "identifier": "train_util", "path": "src/engine/train_util.py", "snippet": "UNET_IN_CHANNELS = 4 # Stable Diffusion の in_channels は 4 で固定。XLも同じ。\nVAE_SCALE_FACTOR = 8 # 2 ** (len(vae.config.block_out_channels) - 1) = 8\nUNET_ATTENTION_TIME_EMBED_DIM = 256 # XL\nTEXT_ENCODER_2_PROJECTION_DIM = 1280\nUNET_PROJECTION_CLASS_EMBEDDING_INPUT_DIM = 2816\ndef get_random_noise(\n batch_size: int, height: int, width: int, generator: torch.Generator = None\n) -> torch.Tensor:\ndef apply_noise_offset(latents: torch.FloatTensor, noise_offset: float):\ndef get_initial_latents(\n scheduler: SchedulerMixin,\n n_imgs: int,\n height: int,\n width: int,\n n_prompts: int,\n generator=None,\n) -> torch.Tensor:\ndef text_tokenize(\n tokenizer: CLIPTokenizer, # 普通ならひとつ、XLならふたつ!\n prompts: list[str],\n):\ndef text_encode(text_encoder: CLIPTextModel, tokens):\ndef encode_prompts(\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTokenizer,\n prompts: list[str],\n return_tokens: bool = False,\n):\ndef text_encode_xl(\n text_encoder: SDXL_TEXT_ENCODER_TYPE,\n tokens: torch.FloatTensor,\n num_images_per_prompt: int = 1,\n):\ndef encode_prompts_xl(\n tokenizers: list[CLIPTokenizer],\n text_encoders: list[SDXL_TEXT_ENCODER_TYPE],\n prompts: list[str],\n num_images_per_prompt: int = 1,\n) -> tuple[torch.FloatTensor, torch.FloatTensor]:\ndef concat_embeddings(\n unconditional: torch.FloatTensor,\n conditional: torch.FloatTensor,\n n_imgs: int,\n):\ndef predict_noise(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n timestep: int, # 現在のタイムステップ\n latents: torch.FloatTensor,\n text_embeddings: torch.FloatTensor, # uncond な text embed と cond な text embed を結合したもの\n guidance_scale=7.5,\n) -> torch.FloatTensor:\ndef diffusion(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n latents: torch.FloatTensor, # ただのノイズだけのlatents\n text_embeddings: torch.FloatTensor,\n total_timesteps: int = 1000,\n start_timesteps=0,\n **kwargs,\n):\ndef rescale_noise_cfg(\n noise_cfg: torch.FloatTensor, noise_pred_text, guidance_rescale=0.0\n):\ndef predict_noise_xl(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n timestep: int, # 現在のタイムステップ\n latents: torch.FloatTensor,\n text_embeddings: torch.FloatTensor, # uncond な text embed と cond な text embed を結合したもの\n add_text_embeddings: torch.FloatTensor, # pooled なやつ\n add_time_ids: torch.FloatTensor,\n guidance_scale=7.5,\n guidance_rescale=0.7,\n) -> torch.FloatTensor:\ndef diffusion_xl(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n latents: torch.FloatTensor, # ただのノイズだけのlatents\n text_embeddings: tuple[torch.FloatTensor, torch.FloatTensor],\n add_text_embeddings: torch.FloatTensor, # pooled なやつ\n add_time_ids: torch.FloatTensor,\n guidance_scale: float = 1.0,\n total_timesteps: int = 1000,\n start_timesteps=0,\n):\ndef get_add_time_ids(\n height: int,\n width: int,\n dynamic_crops: bool = False,\n dtype: torch.dtype = torch.float32,\n):\ndef get_optimizer(config, trainable_params):\ndef get_scheduler_fix(config, optimizer: Optimizer, num_processes: int = 1):\n def wrap_check_needless_num_warmup_steps(return_vals):\ndef get_random_resolution_in_bucket(bucket_resolution: int = 512) -> tuple[int, int]:\ndef text2img(pipe: DiffusionPipeline,\n prompts: Union[str, list[str]], \n negative_prompt: Union[str, list[str]] = \"\", \n width: int = 512, \n height: int = 512,\n num_inference_steps: int = 30,\n guidance_scale: int = 7.5,\n seed: int = None,\n generate_num: int = 1,\n tag: str = \"\",\n **kwargs):\ndef latent2img(pipe: DiffusionPipeline,\n scheduler,\n noise_pred: torch.FloatTensor,\n latents: torch.FloatTensor,\n timestep: int,\n tag: str = \"ori\",\n **kwargs):" }, { "identifier": "model_util", "path": "src/models/model_util.py", "snippet": "TOKENIZER_V1_MODEL_NAME = \"CompVis/stable-diffusion-v1-4\"\nTOKENIZER_V2_MODEL_NAME = \"stabilityai/stable-diffusion-2-1\"\nAVAILABLE_SCHEDULERS = Literal[\"ddim\", \"ddpm\", \"lms\", \"euler_a\"]\nSDXL_TEXT_ENCODER_TYPE = Union[CLIPTextModel, CLIPTextModelWithProjection]\nDIFFUSERS_CACHE_DIR = \".cache/\" # if you want to change the cache dir, change this\nLOCAL_ONLY = False # if you want to use only local files, change this\ndef load_diffusers_model(\n pretrained_model_name_or_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel,]:\ndef load_checkpoint_model(\n checkpoint_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, DiffusionPipeline]:\ndef load_models(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n v2: bool = False,\n v_pred: bool = False,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, SchedulerMixin, DiffusionPipeline, ]:\ndef load_diffusers_model_xl(\n pretrained_model_name_or_path: str,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel,]:\ndef load_checkpoint_model_xl(\n checkpoint_path: str,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel, DiffusionPipeline, ]:\ndef load_models_xl(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[\ndef create_noise_scheduler(\n scheduler_name: AVAILABLE_SCHEDULERS = \"ddpm\",\n prediction_type: Literal[\"epsilon\", \"v_prediction\"] = \"epsilon\",\n) -> SchedulerMixin:" }, { "identifier": "SPMLayer", "path": "src/models/spm.py", "snippet": "class SPMLayer(nn.Module):\n \"\"\"\n replaces forward method of the original Linear, instead of replacing the original Linear module.\n \"\"\"\n\n def __init__(\n self,\n spm_name,\n org_module: nn.Module,\n multiplier=1.0,\n dim=4,\n alpha=1,\n ):\n \"\"\"if alpha == 0 or None, alpha is rank (no scaling).\"\"\"\n super().__init__()\n self.spm_name = spm_name\n self.dim = dim\n\n if org_module.__class__.__name__ == \"Linear\":\n in_dim = org_module.in_features\n out_dim = org_module.out_features\n self.lora_down = nn.Linear(in_dim, dim, bias=False)\n self.lora_up = nn.Linear(dim, out_dim, bias=False)\n\n elif org_module.__class__.__name__ == \"Conv2d\":\n in_dim = org_module.in_channels\n out_dim = org_module.out_channels\n\n self.dim = min(self.dim, in_dim, out_dim)\n if self.dim != dim:\n print(f\"{spm_name} dim (rank) is changed to: {self.dim}\")\n\n kernel_size = org_module.kernel_size\n stride = org_module.stride\n padding = org_module.padding\n self.lora_down = nn.Conv2d(\n in_dim, self.dim, kernel_size, stride, padding, bias=False\n )\n self.lora_up = nn.Conv2d(self.dim, out_dim, (1, 1), (1, 1), bias=False)\n\n if type(alpha) == torch.Tensor:\n alpha = alpha.detach().numpy()\n alpha = dim if alpha is None or alpha == 0 else alpha\n self.scale = alpha / self.dim\n self.register_buffer(\"alpha\", torch.tensor(alpha))\n\n # same as microsoft's\n nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_up.weight)\n\n self.multiplier = multiplier\n self.org_module = org_module # remove in applying\n\n def apply_to(self):\n self.org_forward = self.org_module.forward\n self.org_module.forward = self.forward\n del self.org_module\n\n def forward(self, x):\n return (\n self.org_forward(x)\n + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale\n )" }, { "identifier": "SPMNetwork", "path": "src/models/spm.py", "snippet": "class SPMNetwork(nn.Module):\n UNET_TARGET_REPLACE_MODULE_TRANSFORMER = [\n \"Transformer2DModel\",\n ]\n UNET_TARGET_REPLACE_MODULE_CONV = [\n \"ResnetBlock2D\",\n \"Downsample2D\",\n \"Upsample2D\",\n ]\n\n SPM_PREFIX_UNET = \"lora_unet\" # aligning with SD webui usage\n DEFAULT_TARGET_REPLACE = UNET_TARGET_REPLACE_MODULE_TRANSFORMER\n\n def __init__(\n self,\n unet: UNet2DConditionModel,\n rank: int = 4,\n multiplier: float = 1.0,\n alpha: float = 1.0,\n module = SPMLayer,\n module_kwargs = None,\n ) -> None:\n super().__init__()\n\n self.multiplier = multiplier\n self.dim = rank\n self.alpha = alpha\n\n self.module = module\n self.module_kwargs = module_kwargs or {}\n\n # unet spm\n self.unet_spm_layers = self.create_modules(\n SPMNetwork.SPM_PREFIX_UNET,\n unet,\n SPMNetwork.DEFAULT_TARGET_REPLACE,\n self.dim,\n self.multiplier,\n )\n print(f\"Create SPM for U-Net: {len(self.unet_spm_layers)} modules.\")\n\n spm_names = set()\n for spm_layer in self.unet_spm_layers:\n assert (\n spm_layer.spm_name not in spm_names\n ), f\"duplicated SPM layer name: {spm_layer.spm_name}. {spm_names}\"\n spm_names.add(spm_layer.spm_name)\n\n for spm_layer in self.unet_spm_layers:\n spm_layer.apply_to()\n self.add_module(\n spm_layer.spm_name,\n spm_layer,\n )\n\n del unet\n\n torch.cuda.empty_cache()\n\n def create_modules(\n self,\n prefix: str,\n root_module: nn.Module,\n target_replace_modules: List[str],\n rank: int,\n multiplier: float,\n ) -> list:\n spm_layers = []\n\n for name, module in root_module.named_modules():\n if module.__class__.__name__ in target_replace_modules:\n for child_name, child_module in module.named_modules():\n if child_module.__class__.__name__ in [\"Linear\", \"Conv2d\"]:\n spm_name = prefix + \".\" + name + \".\" + child_name\n spm_name = spm_name.replace(\".\", \"_\")\n print(f\"{spm_name}\")\n spm_layer = self.module(\n spm_name, child_module, multiplier, rank, self.alpha, **self.module_kwargs\n )\n spm_layers.append(spm_layer)\n\n return spm_layers\n\n def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):\n all_params = []\n\n if self.unet_spm_layers:\n params = []\n [params.extend(spm_layer.parameters()) for spm_layer in self.unet_spm_layers]\n param_data = {\"params\": params}\n if default_lr is not None:\n param_data[\"lr\"] = default_lr\n all_params.append(param_data)\n\n return all_params\n\n def save_weights(self, file, dtype=None, metadata: Optional[dict] = None):\n state_dict = self.state_dict()\n\n if dtype is not None:\n for key in list(state_dict.keys()):\n v = state_dict[key]\n v = v.detach().clone().to(\"cpu\").to(dtype)\n state_dict[key] = v\n\n for key in list(state_dict.keys()):\n if not key.startswith(\"lora\"):\n del state_dict[key]\n\n if os.path.splitext(file)[1] == \".safetensors\":\n save_file(state_dict, file, metadata)\n else:\n torch.save(state_dict, file)\n\n def __enter__(self):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 1.0\n\n def __exit__(self, exc_type, exc_value, tb):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 0" }, { "identifier": "load_state_dict", "path": "src/models/merge_spm.py", "snippet": "def load_state_dict(file_name, dtype):\n if os.path.splitext(file_name)[1] == \".safetensors\":\n sd = load_file(file_name)\n metadata = load_metadata_from_safetensors(file_name)\n else:\n sd = torch.load(file_name, map_location=\"cpu\")\n metadata = {}\n\n for key in list(sd.keys()):\n if type(sd[key]) == torch.Tensor:\n sd[key] = sd[key].to(dtype)\n\n return sd, metadata" }, { "identifier": "SLDPipeline", "path": "src/misc/sld_pipeline.py", "snippet": "class SLDPipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline for text-to-image generation using Safe Latent Diffusion.\n\n The implementation is based on the [`StableDiffusionPipeline`]\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPFeatureExtractor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n ],\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPFeatureExtractor,\n ):\n super().__init__()\n safety_concept: Optional[str] = 'hate, harassment, violence, suffering, humiliation, harm, suicide, ' \\\n 'sexual, nudity, bodily fluids, blood, obscene gestures, illegal activity, ' \\\n 'drug use, theft, vandalism, weapons, child abuse, brutality, cruelty'\n\n if hasattr(scheduler.config, \"steps_offset\") and scheduler.config.steps_offset != 1:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`\"\n f\" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure \"\n \"to update the config accordingly as leaving `steps_offset` might led to incorrect results\"\n \" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,\"\n \" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`\"\n \" file\"\n )\n deprecate(\"steps_offset!=1\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"steps_offset\"] = 1\n scheduler._internal_dict = FrozenDict(new_config)\n\n if hasattr(scheduler.config, \"clip_sample\") and scheduler.config.clip_sample is True:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`.\"\n \" `clip_sample` should be set to False in the configuration file. Please make sure to update the\"\n \" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in\"\n \" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very\"\n \" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file\"\n )\n deprecate(\"clip_sample not set\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"clip_sample\"] = False\n scheduler._internal_dict = FrozenDict(new_config)\n\n if safety_checker is None:\n logger.warn(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self._safety_text_concept = safety_concept\n\n @property\n def safety_concept(self):\n r\"\"\"\n Getter method for the safety concept used with SLD\n\n Returns:\n `str`:\n The text describing the safety concept\n \"\"\"\n return self._safety_text_concept\n\n @safety_concept.setter\n def safety_concept(self, concept):\n r\"\"\"\n Setter method for the safety concept used with SLD\n\n Args:\n concept (`str`):\n The text of the new safety concept\n \"\"\"\n self._safety_text_concept = concept\n\n def enable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Enable memory efficient attention as implemented in xformers.\n\n When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference\n time. Speed up at training time is not guaranteed.\n\n Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention\n is used.\n \"\"\"\n self.unet.set_use_memory_efficient_attention_xformers(True)\n\n def disable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Disable memory efficient attention as implemented in xformers.\n \"\"\"\n self.unet.set_use_memory_efficient_attention_xformers(False)\n\n def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = \"auto\"):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,\n `attention_head_dim` must be a multiple of `slice_size`.\n \"\"\"\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = self.unet.config.attention_head_dim // 2\n self.unet.set_attention_slice(slice_size)\n\n def disable_attention_slicing(self):\n r\"\"\"\n Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go\n back to computing attention in one step.\n \"\"\"\n # set slice_size = `None` to disable `attention slicing`\n self.enable_attention_slicing(None)\n\n def enable_sequential_cpu_offload(self):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(\"cuda\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:\n if cpu_offloaded_model is not None:\n cpu_offload(cpu_offloaded_model, device)\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n height: int = 512,\n width: int = 512,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[torch.Generator] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n sld_guidance_scale: Optional[float] = 1000,\n sld_warmup_steps: Optional[int] = 10,\n sld_threshold: Optional[float] = 0.01,\n sld_momentum_scale: Optional[float] = 0.3,\n sld_mom_beta: Optional[float] = 0.4,\n **kwargs,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n height (`int`, *optional*, defaults to 512):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to 512):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored\n if `guidance_scale` is less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator`, *optional*):\n A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation\n deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n sld_guidance_scale (`float`, *optional*, defaults to 1000):\n The guidance scale of safe latent diffusion. If set to be less than 1, safety guidance will be disabled.\n sld_warmup_steps (`int`, *optional*, defaults to 10):\n Number of warmup steps for safety guidance. SLD will only be applied for diffusion steps greater\n than `sld_warmup_steps`.\n sld_threshold (`float`, *optional*, defaults to 0.01):\n Threshold that separates the hyperplane between appropriate and inappropriate images.\n sld_momentum_scale (`float`, *optional*, defaults to 0.3):\n Scale of the SLD momentum to be added to the safety guidance at each diffusion step.\n If set to 0.0 momentum will be disabled. Momentum is already built up during warmup,\n i.e. for diffusion steps smaller than `sld_warmup_steps`.\n sld_mom_beta (`float`, *optional*, defaults to 0.4):\n Defines how safety guidance momentum builds up. `sld_mom_beta` indicates how much of the previous\n momentum will be kept. Momentum is already built up during warmup, i.e. for diffusion steps smaller than\n `sld_warmup_steps`.\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n if isinstance(prompt, str):\n batch_size = 1\n elif isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n enable_safety_guidance = True\n if sld_guidance_scale < 1:\n enable_safety_guidance = False\n logger.warn('You have disabled safety guidance.')\n\n # get prompt text embeddings\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n\n if text_input_ids.shape[-1] > self.tokenizer.model_max_length:\n removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]\n text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]\n\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = text_embeddings.shape\n text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)\n text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n max_length = text_input_ids.shape[-1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = uncond_embeddings.shape[1]\n uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)\n uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # Encode the safety concept text\n if enable_safety_guidance:\n safety_concept_input = self.tokenizer(\n [self._safety_text_concept],\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n safety_embeddings = self.text_encoder(safety_concept_input.input_ids.to(self.device))[0]\n\n # duplicate safety embeddings for each generation per prompt, using mps friendly method\n seq_len = safety_embeddings.shape[1]\n safety_embeddings = safety_embeddings.repeat(batch_size, num_images_per_prompt, 1)\n safety_embeddings = safety_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings, safety_embeddings])\n\n else:\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n # get the initial random noise unless the user supplied it\n\n # Unlike in other pipelines, latents need to be generated in the target device\n # for 1-to-1 results reproducibility with the CompVis implementation.\n # However this currently doesn't work in `mps`.\n latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)\n latents_dtype = text_embeddings.dtype\n if latents is None:\n if self.device.type == \"mps\":\n # randn does not work reproducibly on mps\n latents = torch.randn(latents_shape, generator=generator, device=\"cpu\", dtype=latents_dtype).to(\n self.device\n )\n else:\n latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)\n else:\n if latents.shape != latents_shape:\n raise ValueError(f\"Unexpected latents shape, got {latents.shape}, expected {latents_shape}\")\n latents = latents.to(self.device)\n\n # set timesteps\n self.scheduler.set_timesteps(num_inference_steps)\n timesteps = self.scheduler.timesteps\n\n # Some schedulers like PNDM have timesteps as arrays\n # It's more optimized to move all timesteps to correct device beforehand\n timesteps_tensor = self.scheduler.timesteps.to(self.device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n\n safety_momentum = None\n\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * (3 if enable_safety_guidance else 2)) \\\n if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_out = noise_pred.chunk((3 if enable_safety_guidance else 2))\n noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1]\n\n # default classifier free guidance\n noise_guidance = (noise_pred_text - noise_pred_uncond)\n\n # Perform SLD guidance\n if enable_safety_guidance:\n if safety_momentum is None:\n safety_momentum = torch.zeros_like(noise_guidance)\n noise_pred_safety_concept = noise_pred_out[2]\n\n # Equation 6\n scale = torch.clamp(\n torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.)\n\n # Equation 6\n safety_concept_scale = torch.where(\n (noise_pred_text - noise_pred_safety_concept) >= sld_threshold,\n torch.zeros_like(scale), scale)\n\n # Equation 4\n noise_guidance_safety = torch.mul(\n (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale)\n\n # Equation 7\n noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum\n\n # Equation 8\n safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety\n\n if i >= sld_warmup_steps: # Warmup\n # Equation 3\n noise_guidance = noise_guidance - noise_guidance_safety\n\n noise_pred = noise_pred_uncond + guidance_scale * noise_guidance\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents).sample\n\n image = (image / 2 + 0.5).clamp(0, 1)\n\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n\n if self.safety_checker is not None:\n safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors=\"pt\").to(\n self.device\n )\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)\n )\n else:\n has_nsfw_concept = None\n\n if output_type == \"pil\":\n image = self.numpy_to_pil(image)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return SLDPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept,\n applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None)" } ]
import argparse import gc import warnings import torch from pathlib import Path from typing import Literal from torch.utils.data import DataLoader from accelerate import PartialState, Accelerator from src.configs import config from src.configs.config import RootConfig from src.configs.generation_config import GenerationConfig from src.engine import train_util from src.evaluation import * from src.models import model_util from src.models.spm import SPMLayer, SPMNetwork from src.models.merge_spm import load_state_dict from src.misc.sld_pipeline import SLDPipeline
11,901
if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model( base_model, v2=v2, weight_dtype=weight_dtype, device=distributed_state.device, ) special_token_ids = set( tokenizer.convert_tokens_to_ids(tokenizer.special_tokens_map.values()) ) text_encoder.to(distributed_state.device, dtype=weight_dtype) text_encoder.eval() unet.to(distributed_state.device, dtype=weight_dtype) unet.enable_xformers_memory_efficient_attention() unet.requires_grad_(False) unet.eval() if len(spm_model_paths) > 0: # load the SPM models spms, metadatas = zip( *[ load_state_dict(spm_model_path, weight_dtype) for spm_model_path in spm_model_paths ] ) # check if SPMs are compatible assert all([metadata["rank"] == metadatas[0]["rank"] for metadata in metadatas]) # get the erased concept erased_prompts = [md["prompts"].split(",") for md in metadatas] erased_prompts_count = [len(ep) for ep in erased_prompts] print(f"Erased prompts: {erased_prompts}") erased_prompts_flatten = [item for sublist in erased_prompts for item in sublist]
DIFFUSERS_CACHE_DIR = ".cache/" UNET_NAME = "unet" TEXT_ENCODER_NAME = "text_encoder" MATCHING_METRICS = Literal[ "clipcos", "clipcos_tokenuni", "tokenuni", "allone", ] distributed_state = PartialState() accelerator = Accelerator() def flush(): torch.cuda.empty_cache() gc.collect() def parse_extra_args(extra_args): if extra_args is None or extra_args == ['']: return {} extra_args_dict = {} for extra_arg in extra_args: key, value = extra_arg.split("=") # convert value to various types if value.isdigit(): value = int(value) elif value.replace(".", "", 1).isdigit(): value = float(value) elif value[0] == "[" and value[-1] == "]": value = [i.replace('+', ' ') for i in value[1:-1].split(",")] value = [v.strip() for v in value] if value[0].isdigit(): value = [int(v) for v in value] elif value[0].replace(".", "", 1).isdigit(): value = [float(v) for v in value] extra_args_dict[key] = value return extra_args_dict def get_dataloader(args, num_processes=1): # parse task_args arguments task_args = parse_extra_args(args.task_args) task_args["save_folder"] = args.img_save_path task_args["output_path"] = args.save_path # parse generation arguments cfg = parse_extra_args(args.generation_cfg) cfg = GenerationConfig(**cfg) dataset_class = None if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model( base_model, v2=v2, weight_dtype=weight_dtype, device=distributed_state.device, ) special_token_ids = set( tokenizer.convert_tokens_to_ids(tokenizer.special_tokens_map.values()) ) text_encoder.to(distributed_state.device, dtype=weight_dtype) text_encoder.eval() unet.to(distributed_state.device, dtype=weight_dtype) unet.enable_xformers_memory_efficient_attention() unet.requires_grad_(False) unet.eval() if len(spm_model_paths) > 0: # load the SPM models spms, metadatas = zip( *[ load_state_dict(spm_model_path, weight_dtype) for spm_model_path in spm_model_paths ] ) # check if SPMs are compatible assert all([metadata["rank"] == metadatas[0]["rank"] for metadata in metadatas]) # get the erased concept erased_prompts = [md["prompts"].split(",") for md in metadatas] erased_prompts_count = [len(ep) for ep in erased_prompts] print(f"Erased prompts: {erased_prompts}") erased_prompts_flatten = [item for sublist in erased_prompts for item in sublist]
erased_prompt_embeds, erased_prompt_tokens = train_util.encode_prompts(
3
2023-12-26 03:19:16+00:00
16k
dakpinaroglu/Frame2seq
frame2seq/openfold/model/structure_module.py
[ { "identifier": "Linear", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n init: str = \"default\",\n init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n init:\n The initializer to use. Choose from:\n\n \"default\": LeCun fan-in truncated normal initialization\n \"relu\": He initialization w/ truncated normal distribution\n \"glorot\": Fan-average Glorot uniform initialization\n \"gating\": Weights=0, Bias=1\n \"normal\": Normal initialization with std=1/sqrt(fan_in)\n \"final\": Weights=0, Bias=0\n\n Overridden by init_fn if the latter is not None.\n init_fn:\n A custom initializer taking weight and bias as inputs.\n Overrides init if not None.\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)\n\n with torch.no_grad():\n if init_fn is not None:\n init_fn(self.weight, self.bias)\n else:\n if init == \"default\":\n lecun_normal_init_(self.weight)\n elif init == \"relu\":\n he_normal_init_(self.weight)\n elif init == \"glorot\":\n glorot_uniform_init_(self.weight)\n elif init == \"gating\":\n gating_init_(self.weight)\n if bias:\n self.bias.fill_(1.0)\n elif init == \"normal\":\n normal_init_(self.weight)\n elif init == \"final\":\n final_init_(self.weight)\n else:\n raise ValueError(\"Invalid init string.\")" }, { "identifier": "LayerNorm", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n d = x.dtype\n # deepspeed_is_initialized = (\n # deepspeed_is_installed and \n # deepspeed.utils.is_initialized()\n # )\n # if(d is torch.bfloat16 and not deepspeed_is_initialized):\n # with torch.cuda.amp.autocast(enabled=False):\n # out = nn.functional.layer_norm(\n # x, \n # self.c_in, \n # self.weight.to(dtype=d), \n # self.bias.to(dtype=d), \n # self.eps\n # )\n # else:\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "ipa_point_weights_init_", "path": "frame2seq/openfold/model/primitives.py", "snippet": "def ipa_point_weights_init_(weights):\n with torch.no_grad():\n softplus_inverse_1 = 0.541324854612918\n weights.fill_(softplus_inverse_1)" }, { "identifier": "restype_rigid_group_default_frame", "path": "frame2seq/openfold/np/residue_constants.py", "snippet": "def load_stereo_chemical_props() -> Tuple[\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(\n sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False\n) -> np.ndarray:\ndef _make_standard_atom_mask() -> np.ndarray:\ndef chi_angle_atom(atom_index: int) -> np.ndarray:\ndef _make_rigid_transformation_4x4(ex, ey, translation):\ndef _make_rigid_group_constants():\ndef make_atom14_dists_bounds(\n overlap_tolerance=1.5, bond_length_tolerance_factor=15\n):\ndef _make_atom14_ambiguity_feats():\ndef aatype_to_str_sequence(aatype):\nHHBLITS_AA_TO_ID = {\n \"A\": 0,\n \"B\": 2,\n \"C\": 1,\n \"D\": 2,\n \"E\": 3,\n \"F\": 4,\n \"G\": 5,\n \"H\": 6,\n \"I\": 7,\n \"J\": 20,\n \"K\": 8,\n \"L\": 9,\n \"M\": 10,\n \"N\": 11,\n \"O\": 20,\n \"P\": 12,\n \"Q\": 13,\n \"R\": 14,\n \"S\": 15,\n \"T\": 16,\n \"U\": 1,\n \"V\": 17,\n \"W\": 18,\n \"X\": 20,\n \"Y\": 19,\n \"Z\": 3,\n \"-\": 21,\n}\nID_TO_HHBLITS_AA = {\n 0: \"A\",\n 1: \"C\", # Also U.\n 2: \"D\", # Also B.\n 3: \"E\", # Also Z.\n 4: \"F\",\n 5: \"G\",\n 6: \"H\",\n 7: \"I\",\n 8: \"K\",\n 9: \"L\",\n 10: \"M\",\n 11: \"N\",\n 12: \"P\",\n 13: \"Q\",\n 14: \"R\",\n 15: \"S\",\n 16: \"T\",\n 17: \"V\",\n 18: \"W\",\n 19: \"Y\",\n 20: \"X\", # Includes J and O.\n 21: \"-\",\n}\nMAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(\n restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])\n for i in range(len(restypes_with_x_and_gap))\n)\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()" }, { "identifier": "frames_and_literature_positions_to_atom14_pos", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def frames_and_literature_positions_to_atom14_pos(\n r: Rigid,\n aatype: torch.Tensor,\n default_frames,\n group_idx,\n atom_mask,\n lit_positions,\n):\n # [*, N, 14, 4, 4]\n default_4x4 = default_frames[aatype, ...]\n\n # [*, N, 14]\n group_mask = group_idx[aatype, ...]\n\n # [*, N, 14, 8]\n group_mask = nn.functional.one_hot(\n group_mask,\n num_classes=default_frames.shape[-3],\n )\n\n # [*, N, 14, 8]\n t_atoms_to_global = r[..., None, :] * group_mask\n\n # [*, N, 14]\n t_atoms_to_global = t_atoms_to_global.map_tensor_fn(\n lambda x: torch.sum(x, dim=-1)\n )\n\n # [*, N, 14, 1]\n atom_mask = atom_mask[aatype, ...].unsqueeze(-1)\n\n # [*, N, 14, 3]\n lit_positions = lit_positions[aatype, ...]\n pred_positions = t_atoms_to_global.apply(lit_positions)\n pred_positions = pred_positions * atom_mask\n\n return pred_positions" }, { "identifier": "torsion_angles_to_frames", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def torsion_angles_to_frames(\n r: Rigid,\n alpha: torch.Tensor,\n aatype: torch.Tensor,\n rrgdf: torch.Tensor,\n):\n # [*, N, 8, 4, 4]\n default_4x4 = rrgdf[aatype, ...]\n\n # [*, N, 8] transformations, i.e.\n # One [*, N, 8, 3, 3] rotation matrix and\n # One [*, N, 8, 3] translation matrix\n default_r = r.from_tensor_4x4(default_4x4)\n\n bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))\n bb_rot[..., 1] = 1\n\n # [*, N, 8, 2]\n alpha = torch.cat(\n [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2\n )\n\n # [*, N, 8, 3, 3]\n # Produces rotation matrices of the form:\n # [\n # [1, 0 , 0 ],\n # [0, a_2,-a_1],\n # [0, a_1, a_2]\n # ]\n # This follows the original code rather than the supplement, which uses\n # different indices.\n\n all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)\n all_rots[..., 0, 0] = 1\n all_rots[..., 1, 1] = alpha[..., 1]\n all_rots[..., 1, 2] = -alpha[..., 0]\n all_rots[..., 2, 1:] = alpha\n\n all_rots = Rigid(Rotation(rot_mats=all_rots), None)\n\n all_frames = default_r.compose(all_rots)\n\n chi2_frame_to_frame = all_frames[..., 5]\n chi3_frame_to_frame = all_frames[..., 6]\n chi4_frame_to_frame = all_frames[..., 7]\n\n chi1_frame_to_bb = all_frames[..., 4]\n chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)\n chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)\n chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)\n\n all_frames_to_bb = Rigid.cat(\n [\n all_frames[..., :5],\n chi2_frame_to_bb.unsqueeze(-1),\n chi3_frame_to_bb.unsqueeze(-1),\n chi4_frame_to_bb.unsqueeze(-1),\n ],\n dim=-1,\n )\n\n all_frames_to_global = r[..., None].compose(all_frames_to_bb)\n\n return all_frames_to_global" }, { "identifier": "is_fp16_enabled", "path": "frame2seq/openfold/utils/precision_utils.py", "snippet": "def is_fp16_enabled():\n # Autocast world\n try:\n fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16\n fp16_enabled = fp16_enabled and torch.is_autocast_enabled()\n except AttributeError:\n fp16_enabled = False\n\n return fp16_enabled" }, { "identifier": "Rotation", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rotation:\n \"\"\"\n A 3D rotation. Depending on how the object is initialized, the\n rotation is represented by either a rotation matrix or a\n quaternion, though both formats are made available by helper functions.\n To simplify gradient computation, the underlying format of the\n rotation cannot be changed in-place. Like Rigid, the class is designed\n to mimic the behavior of a torch Tensor, almost as if each Rotation\n object were a tensor of rotations, in one format or another.\n \"\"\"\n def __init__(self,\n rot_mats: Optional[torch.Tensor] = None,\n quats: Optional[torch.Tensor] = None,\n normalize_quats: bool = True,\n ):\n \"\"\"\n Args:\n rot_mats:\n A [*, 3, 3] rotation matrix tensor. Mutually exclusive with\n quats\n quats:\n A [*, 4] quaternion. Mutually exclusive with rot_mats. If\n normalize_quats is not True, must be a unit quaternion\n normalize_quats:\n If quats is specified, whether to normalize quats\n \"\"\"\n if((rot_mats is None and quats is None) or \n (rot_mats is not None and quats is not None)):\n raise ValueError(\"Exactly one input argument must be specified\")\n\n if((rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or \n (quats is not None and quats.shape[-1] != 4)):\n raise ValueError(\n \"Incorrectly shaped rotation matrix or quaternion\"\n )\n\n # Force full-precision\n if(quats is not None):\n quats = quats.to(dtype=torch.float32)\n if(rot_mats is not None):\n rot_mats = rot_mats.to(dtype=torch.float32)\n\n if(quats is not None and normalize_quats):\n quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n\n self._rot_mats = rot_mats\n self._quats = quats\n\n @staticmethod\n def identity(\n shape,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rotation:\n \"\"\"\n Returns an identity Rotation.\n\n Args:\n shape:\n The \"shape\" of the resulting Rotation object. See documentation\n for the shape property\n dtype:\n The torch dtype for the rotation\n device:\n The torch device for the new rotation\n requires_grad:\n Whether the underlying tensors in the new rotation object\n should require gradient computation\n fmt:\n One of \"quat\" or \"rot_mat\". Determines the underlying format\n of the new object's rotation \n Returns:\n A new identity rotation\n \"\"\"\n if(fmt == \"rot_mat\"):\n rot_mats = identity_rot_mats(\n shape, dtype, device, requires_grad,\n )\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(fmt == \"quat\"):\n quats = identity_quats(shape, dtype, device, requires_grad)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(f\"Invalid format: f{fmt}\")\n\n # Magic methods\n\n def __getitem__(self, index: Any) -> Rotation:\n \"\"\"\n Allows torch-style indexing over the virtual shape of the rotation\n object. See documentation for the shape property.\n\n Args:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n Returns:\n The indexed rotation\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif(self._quats is not None):\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Pointwise left multiplication of the rotation with a tensor. Can be\n used to e.g. mask the Rotation.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats * right[..., None, None]\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats * right[..., None]\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Reverse pointwise multiplication of the rotation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n \n # Properties\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the virtual shape of the rotation object. This shape is\n defined as the batch dimensions of the underlying rotation matrix\n or quaternion. If the Rotation was initialized with a [10, 3, 3]\n rotation matrix tensor, for example, the resulting shape would be\n [10].\n \n Returns:\n The virtual shape of the rotation object\n \"\"\"\n s = None\n if(self._quats is not None):\n s = self._quats.shape[:-1]\n else:\n s = self._rot_mats.shape[:-2]\n\n return s\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Returns the dtype of the underlying rotation.\n\n Returns:\n The dtype of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.dtype\n elif(self._quats is not None):\n return self._quats.dtype\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n The device of the underlying rotation\n\n Returns:\n The device of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.device\n elif(self._quats is not None):\n return self._quats.device\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def requires_grad(self) -> bool:\n \"\"\"\n Returns the requires_grad property of the underlying rotation\n\n Returns:\n The requires_grad property of the underlying tensor\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.requires_grad\n elif(self._quats is not None):\n return self._quats.requires_grad\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_rot_mats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a rotation matrix tensor.\n\n Returns:\n The rotation as a rotation matrix tensor\n \"\"\"\n rot_mats = self._rot_mats\n if(rot_mats is None):\n if(self._quats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n rot_mats = quat_to_rot(self._quats)\n\n return rot_mats \n\n def get_quats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a quaternion tensor.\n\n Depending on whether the Rotation was initialized with a\n quaternion, this function may call torch.linalg.eigh.\n\n Returns:\n The rotation as a quaternion tensor.\n \"\"\"\n quats = self._quats\n if(quats is None):\n if(self._rot_mats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n quats = rot_to_quat(self._rot_mats)\n\n return quats\n\n def get_cur_rot(self) -> torch.Tensor:\n \"\"\"\n Return the underlying rotation in its current form\n\n Returns:\n The stored rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats\n elif(self._quats is not None):\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n # Rotation functions\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor, \n normalize_quats: bool = True\n ) -> Rotation:\n \"\"\"\n Returns a new quaternion Rotation after updating the current\n object's underlying rotation with a quaternion update, formatted\n as a [*, 3] tensor whose final three columns represent x, y, z such \n that (1, x, y, z) is the desired (not necessarily unit) quaternion\n update.\n\n Args:\n q_update_vec:\n A [*, 3] quaternion update tensor\n normalize_quats:\n Whether to normalize the output quaternion\n Returns:\n An updated Rotation\n \"\"\"\n quats = self.get_quats()\n new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n return Rotation(\n rot_mats=None, \n quats=new_quats, \n normalize_quats=normalize_quats,\n )\n\n def compose_r(self, r: Rotation) -> Rotation:\n \"\"\"\n Compose the rotation matrices of the current Rotation object with\n those of another.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n r1 = self.get_rot_mats()\n r2 = r.get_rot_mats()\n new_rot_mats = rot_matmul(r1, r2)\n return Rotation(rot_mats=new_rot_mats, quats=None)\n\n def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Compose the quaternions of the current Rotation object with those\n of another.\n\n Depending on whether either Rotation was initialized with\n quaternions, this function may call torch.linalg.eigh.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n q1 = self.get_quats()\n q2 = r.get_quats()\n new_quats = quat_multiply(q1, q2)\n return Rotation(\n rot_mats=None, quats=new_quats, normalize_quats=normalize_quats\n )\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply the current Rotation as a rotation matrix to a set of 3D\n coordinates.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The inverse of the apply() method.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] inverse-rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n inv_rot_mats = invert_rot_mat(rot_mats) \n return rot_vec_mul(inv_rot_mats, pts)\n\n def invert(self) -> Rotation:\n \"\"\"\n Returns the inverse of the current Rotation.\n\n Returns:\n The inverse of the current Rotation\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=invert_rot_mat(self._rot_mats), \n quats=None\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None,\n quats=invert_quat(self._quats),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n # \"Tensor\" stuff\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shape of the Rotation object.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed Rotation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n @staticmethod\n def cat(\n rs: Sequence[Rotation], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates rotations along one of the batch dimensions. Analogous\n to torch.cat().\n\n Note that the output of this operation is always a rotation matrix,\n regardless of the format of input rotations.\n\n Args:\n rs: \n A list of rotation objects\n dim: \n The dimension along which the rotations should be \n concatenated\n Returns:\n A concatenated Rotation object in rotation matrix format\n \"\"\"\n rot_mats = [r.get_rot_mats() for r in rs]\n rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)\n\n return Rotation(rot_mats=rot_mats, quats=None) \n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rotation:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying rotation tensors,\n mapping over the rotation dimension(s). Can be used e.g. to sum out\n a one-hot batch dimension.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rotation \n Returns:\n The transformed Rotation object\n \"\"\" \n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))\n rot_mats = torch.stack(\n list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1\n )\n rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = torch.stack(\n list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1\n )\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n \n def cuda(self) -> Rotation:\n \"\"\"\n Analogous to the cuda() method of torch Tensors\n\n Returns:\n A copy of the Rotation in CUDA memory\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.cuda(),\n normalize_quats=False\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def to(self, \n device: Optional[torch.device], \n dtype: Optional[torch.dtype]\n ) -> Rotation:\n \"\"\"\n Analogous to the to() method of torch Tensors\n\n Args:\n device:\n A torch device\n dtype:\n A torch dtype\n Returns:\n A copy of the Rotation using the new device and dtype\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=self._rot_mats.to(device=device, dtype=dtype), \n quats=None,\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.to(device=device, dtype=dtype),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def detach(self) -> Rotation:\n \"\"\"\n Returns a copy of the Rotation whose underlying Tensor has been\n detached from its torch graph.\n\n Returns:\n A copy of the Rotation whose underlying Tensor has been detached\n from its torch graph\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.detach(), \n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")" }, { "identifier": "Rigid", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" } ]
from functools import reduce from operator import mul from typing import Optional, Tuple, Sequence from frame2seq.openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_ from frame2seq.openfold.np.residue_constants import ( restype_rigid_group_default_frame, restype_atom14_to_rigid_group, restype_atom14_mask, restype_atom14_rigid_group_positions, ) from frame2seq.openfold.utils.feats import ( frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames, ) from frame2seq.openfold.utils.precision_utils import is_fp16_enabled from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) import importlib import math import sys import torch import torch.nn as nn
14,160
self.c_s, self.c_z, self.c_ipa, self.no_heads_ipa, self.no_qk_points, self.no_v_points, inf=self.inf, eps=self.epsilon, ) self.ipa_dropout = nn.Dropout(self.dropout_rate) self.layer_norm_ipa = LayerNorm(self.c_s) self.transition = StructureModuleTransition( self.c_s, self.no_transition_layers, self.dropout_rate, ) self.bb_update = BackboneUpdate(self.c_s) self.angle_resnet = AngleResnet( self.c_s, self.c_resnet, self.no_resnet_blocks, self.no_angles, self.epsilon, ) def forward( self, evoformer_output_dict, aatype, mask=None, inplace_safe=False, _offload_inference=False, ): """ Args: evoformer_output_dict: Dictionary containing: "single": [*, N_res, C_s] single representation "pair": [*, N_res, N_res, C_z] pair representation aatype: [*, N_res] amino acid indices mask: Optional [*, N_res] sequence mask Returns: A dictionary of outputs """ s = evoformer_output_dict["single"] if mask is None: # [*, N] mask = s.new_ones(s.shape[:-1]) # [*, N, C_s] s = self.layer_norm_s(s) # [*, N, N, C_z] z = self.layer_norm_z(evoformer_output_dict["pair"]) z_reference_list = None if(_offload_inference): assert(sys.getrefcount(evoformer_output_dict["pair"]) == 2) evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu() z_reference_list = [z] z = None # [*, N, C_s] s_initial = s s = self.linear_in(s) # [*, N] rigids = Rigid.identity( s.shape[:-1], s.dtype, s.device, self.training, fmt="quat", ) outputs = [] for i in range(self.no_blocks): # [*, N, C_s] s = s + self.ipa( s, z, rigids, mask, inplace_safe=inplace_safe, _offload_inference=_offload_inference, _z_reference_list=z_reference_list ) s = self.ipa_dropout(s) s = self.layer_norm_ipa(s) s = self.transition(s) # [*, N] rigids = rigids.compose_q_update_vec(self.bb_update(s)) # To hew as closely as possible to AlphaFold, we convert our # quaternion-based transformations to rotation-matrix ones # here backb_to_global = Rigid( Rotation( rot_mats=rigids.get_rots().get_rot_mats(), quats=None ), rigids.get_trans(), ) backb_to_global = backb_to_global.scale_translation( self.trans_scale_factor ) # [*, N, 7, 2] unnormalized_angles, angles = self.angle_resnet(s, s_initial)
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu") self.linear_2 = Linear(self.c_hidden, self.c_hidden, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ Implements Algorithm 20, lines 11-14 """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # NOTE: The ReLU's applied to the inputs are absent from the supplement # pseudocode but present in the source. For maximal compatibility with # the pretrained weights, I'm going with the source. # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res] if(is_fp16_enabled()): with torch.cuda.amp.autocast(enabled=False): a = torch.matmul( permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res] ) else: a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.c_hidden)) a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) if(inplace_safe): pt_att *= pt_att else: pt_att = pt_att ** 2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view( *((1,) * len(pt_att.shape[:-2]) + (-1, 1)) ) head_weights = head_weights * math.sqrt( 1.0 / (3 * (self.no_qk_points * 9.0 / 2)) ) if(inplace_safe): pt_att *= head_weights else: pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.inf * (square_mask - 1) """ Frame2seq implementation of IPA regularization via attention dropout """ if attn_drop_rate > 0.0: random_square_mask = torch.rand(square_mask.shape, device=square_mask.device) random_square_mask = self.inf * -1 * (random_square_mask < attn_drop_rate) square_mask += random_square_mask # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) if(inplace_safe): a += pt_att del pt_att a += square_mask.unsqueeze(-3) # in-place softmax attn_core_inplace_cuda.forward_( a, reduce(mul, a.shape[:-1]), a.shape[-1], ) else: a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) ################ # Compute output ################ # [*, N_res, H, C_hidden] o = torch.matmul( a, v.transpose(-2, -3).to(dtype=a.dtype) ).transpose(-2, -3) # [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] if(inplace_safe): v_pts = permute_final_dims(v_pts, (1, 3, 0, 2)) o_pt = [ torch.matmul(a, v.to(a.dtype)) for v in torch.unbind(v_pts, dim=-3) ] o_pt = torch.stack(o_pt, dim=-3) else: o_pt = torch.sum( ( a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :] ), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims( torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2 ) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if(_offload_inference): z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat( (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1 ).to(dtype=z[0].dtype) ) return s class BackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, c_s): """ Args: c_s: Single representation channel dimension """ super(BackboneUpdate, self).__init__() self.c_s = c_s self.linear = Linear(self.c_s, 6, init="final") def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class StructureModuleTransitionLayer(nn.Module): def __init__(self, c): super(StructureModuleTransitionLayer, self).__init__() self.c = c self.linear_1 = Linear(self.c, self.c, init="relu") self.linear_2 = Linear(self.c, self.c, init="relu") self.linear_3 = Linear(self.c, self.c, init="final") self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class StructureModuleTransition(nn.Module): def __init__(self, c, num_layers, dropout_rate): super(StructureModuleTransition, self).__init__() self.c = c self.num_layers = num_layers self.dropout_rate = dropout_rate self.layers = nn.ModuleList() for _ in range(self.num_layers): l = StructureModuleTransitionLayer(self.c) self.layers.append(l) self.dropout = nn.Dropout(self.dropout_rate) self.layer_norm = LayerNorm(self.c) def forward(self, s): for l in self.layers: s = l(s) s = self.dropout(s) s = self.layer_norm(s) return s class StructureModule(nn.Module): def __init__( self, c_s, c_z, c_ipa, c_resnet, no_heads_ipa, no_qk_points, no_v_points, dropout_rate, no_blocks, no_transition_layers, no_resnet_blocks, no_angles, trans_scale_factor, epsilon, inf, **kwargs, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_ipa: IPA hidden channel dimension c_resnet: Angle resnet (Alg. 23 lines 11-14) hidden channel dimension no_heads_ipa: Number of IPA heads no_qk_points: Number of query/key points to generate during IPA no_v_points: Number of value points to generate during IPA dropout_rate: Dropout rate used throughout the layer no_blocks: Number of structure module blocks no_transition_layers: Number of layers in the single representation transition (Alg. 23 lines 8-9) no_resnet_blocks: Number of blocks in the angle resnet no_angles: Number of angles to generate in the angle resnet trans_scale_factor: Scale of single representation transition hidden dimension epsilon: Small number used in angle resnet normalization inf: Large number used for attention masking """ super(StructureModule, self).__init__() self.c_s = c_s self.c_z = c_z self.c_ipa = c_ipa self.c_resnet = c_resnet self.no_heads_ipa = no_heads_ipa self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.dropout_rate = dropout_rate self.no_blocks = no_blocks self.no_transition_layers = no_transition_layers self.no_resnet_blocks = no_resnet_blocks self.no_angles = no_angles self.trans_scale_factor = trans_scale_factor self.epsilon = epsilon self.inf = inf # Buffers to be lazily initialized later # self.default_frames # self.group_idx # self.atom_mask # self.lit_positions self.layer_norm_s = LayerNorm(self.c_s) self.layer_norm_z = LayerNorm(self.c_z) self.linear_in = Linear(self.c_s, self.c_s) self.ipa = InvariantPointAttention( self.c_s, self.c_z, self.c_ipa, self.no_heads_ipa, self.no_qk_points, self.no_v_points, inf=self.inf, eps=self.epsilon, ) self.ipa_dropout = nn.Dropout(self.dropout_rate) self.layer_norm_ipa = LayerNorm(self.c_s) self.transition = StructureModuleTransition( self.c_s, self.no_transition_layers, self.dropout_rate, ) self.bb_update = BackboneUpdate(self.c_s) self.angle_resnet = AngleResnet( self.c_s, self.c_resnet, self.no_resnet_blocks, self.no_angles, self.epsilon, ) def forward( self, evoformer_output_dict, aatype, mask=None, inplace_safe=False, _offload_inference=False, ): """ Args: evoformer_output_dict: Dictionary containing: "single": [*, N_res, C_s] single representation "pair": [*, N_res, N_res, C_z] pair representation aatype: [*, N_res] amino acid indices mask: Optional [*, N_res] sequence mask Returns: A dictionary of outputs """ s = evoformer_output_dict["single"] if mask is None: # [*, N] mask = s.new_ones(s.shape[:-1]) # [*, N, C_s] s = self.layer_norm_s(s) # [*, N, N, C_z] z = self.layer_norm_z(evoformer_output_dict["pair"]) z_reference_list = None if(_offload_inference): assert(sys.getrefcount(evoformer_output_dict["pair"]) == 2) evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu() z_reference_list = [z] z = None # [*, N, C_s] s_initial = s s = self.linear_in(s) # [*, N] rigids = Rigid.identity( s.shape[:-1], s.dtype, s.device, self.training, fmt="quat", ) outputs = [] for i in range(self.no_blocks): # [*, N, C_s] s = s + self.ipa( s, z, rigids, mask, inplace_safe=inplace_safe, _offload_inference=_offload_inference, _z_reference_list=z_reference_list ) s = self.ipa_dropout(s) s = self.layer_norm_ipa(s) s = self.transition(s) # [*, N] rigids = rigids.compose_q_update_vec(self.bb_update(s)) # To hew as closely as possible to AlphaFold, we convert our # quaternion-based transformations to rotation-matrix ones # here backb_to_global = Rigid( Rotation( rot_mats=rigids.get_rots().get_rot_mats(), quats=None ), rigids.get_trans(), ) backb_to_global = backb_to_global.scale_translation( self.trans_scale_factor ) # [*, N, 7, 2] unnormalized_angles, angles = self.angle_resnet(s, s_initial)
all_frames_to_global = self.torsion_angles_to_frames(
5
2023-12-25 09:29:36+00:00
16k
KyanChen/TTP
mmpretrain/models/multimodal/clip/clip.py
[ { "identifier": "CIFAR100_CATEGORIES", "path": "mmpretrain/datasets/categories.py", "snippet": "CIFAR100_CATEGORIES = (\n 'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle',\n 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel',\n 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock',\n 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur',\n 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster',\n 'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion',\n 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain',\n 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree',\n 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy',\n 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket',\n 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail',\n 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper',\n 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train',\n 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf',\n 'woman', 'worm')" }, { "identifier": "IMAGENET_SIMPLE_CATEGORIES", "path": "mmpretrain/datasets/categories.py", "snippet": "IMAGENET_SIMPLE_CATEGORIES = (\n 'tench', 'goldfish', 'great white shark', 'tiger shark',\n 'hammerhead shark', 'electric ray', 'stingray', 'rooster', 'hen',\n 'ostrich', 'brambling', 'goldfinch', 'house finch', 'junco',\n 'indigo bunting', 'American robin', 'bulbul', 'jay', 'magpie', 'chickadee',\n 'American dipper', 'kite (bird of prey)', 'bald eagle', 'vulture',\n 'great grey owl', 'fire salamander', 'smooth newt', 'newt',\n 'spotted salamander', 'axolotl', 'American bullfrog', 'tree frog',\n 'tailed frog', 'loggerhead sea turtle', 'leatherback sea turtle',\n 'mud turtle', 'terrapin', 'box turtle', 'banded gecko', 'green iguana',\n 'Carolina anole', 'desert grassland whiptail lizard', 'agama',\n 'frilled-necked lizard', 'alligator lizard', 'Gila monster',\n 'European green lizard', 'chameleon', 'Komodo dragon', 'Nile crocodile',\n 'American alligator', 'triceratops', 'worm snake', 'ring-necked snake',\n 'eastern hog-nosed snake', 'smooth green snake', 'kingsnake',\n 'garter snake', 'water snake', 'vine snake', 'night snake',\n 'boa constrictor', 'African rock python', 'Indian cobra', 'green mamba',\n 'sea snake', 'Saharan horned viper', 'eastern diamondback rattlesnake',\n 'sidewinder rattlesnake', 'trilobite', 'harvestman', 'scorpion',\n 'yellow garden spider', 'barn spider', 'European garden spider',\n 'southern black widow', 'tarantula', 'wolf spider', 'tick', 'centipede',\n 'black grouse', 'ptarmigan', 'ruffed grouse', 'prairie grouse', 'peafowl',\n 'quail', 'partridge', 'african grey parrot', 'macaw',\n 'sulphur-crested cockatoo', 'lorikeet', 'coucal', 'bee eater', 'hornbill',\n 'hummingbird', 'jacamar', 'toucan', 'duck', 'red-breasted merganser',\n 'goose', 'black swan', 'tusker', 'echidna', 'platypus', 'wallaby', 'koala',\n 'wombat', 'jellyfish', 'sea anemone', 'brain coral', 'flatworm',\n 'nematode', 'conch', 'snail', 'slug', 'sea slug', 'chiton',\n 'chambered nautilus', 'Dungeness crab', 'rock crab', 'fiddler crab',\n 'red king crab', 'American lobster', 'spiny lobster', 'crayfish',\n 'hermit crab', 'isopod', 'white stork', 'black stork', 'spoonbill',\n 'flamingo', 'little blue heron', 'great egret', 'bittern bird',\n 'crane bird', 'limpkin', 'common gallinule', 'American coot', 'bustard',\n 'ruddy turnstone', 'dunlin', 'common redshank', 'dowitcher',\n 'oystercatcher', 'pelican', 'king penguin', 'albatross', 'grey whale',\n 'killer whale', 'dugong', 'sea lion', 'Chihuahua', 'Japanese Chin',\n 'Maltese', 'Pekingese', 'Shih Tzu', 'King Charles Spaniel', 'Papillon',\n 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound', 'Basset Hound',\n 'Beagle', 'Bloodhound', 'Bluetick Coonhound', 'Black and Tan Coonhound',\n 'Treeing Walker Coonhound', 'English foxhound', 'Redbone Coonhound',\n 'borzoi', 'Irish Wolfhound', 'Italian Greyhound', 'Whippet',\n 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki',\n 'Scottish Deerhound', 'Weimaraner', 'Staffordshire Bull Terrier',\n 'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier',\n 'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier',\n 'Norwich Terrier', 'Yorkshire Terrier', 'Wire Fox Terrier',\n 'Lakeland Terrier', 'Sealyham Terrier', 'Airedale Terrier',\n 'Cairn Terrier', 'Australian Terrier', 'Dandie Dinmont Terrier',\n 'Boston Terrier', 'Miniature Schnauzer', 'Giant Schnauzer',\n 'Standard Schnauzer', 'Scottish Terrier', 'Tibetan Terrier',\n 'Australian Silky Terrier', 'Soft-coated Wheaten Terrier',\n 'West Highland White Terrier', 'Lhasa Apso', 'Flat-Coated Retriever',\n 'Curly-coated Retriever', 'Golden Retriever', 'Labrador Retriever',\n 'Chesapeake Bay Retriever', 'German Shorthaired Pointer', 'Vizsla',\n 'English Setter', 'Irish Setter', 'Gordon Setter', 'Brittany dog',\n 'Clumber Spaniel', 'English Springer Spaniel', 'Welsh Springer Spaniel',\n 'Cocker Spaniel', 'Sussex Spaniel', 'Irish Water Spaniel', 'Kuvasz',\n 'Schipperke', 'Groenendael dog', 'Malinois', 'Briard', 'Australian Kelpie',\n 'Komondor', 'Old English Sheepdog', 'Shetland Sheepdog', 'collie',\n 'Border Collie', 'Bouvier des Flandres dog', 'Rottweiler',\n 'German Shepherd Dog', 'Dobermann', 'Miniature Pinscher',\n 'Greater Swiss Mountain Dog', 'Bernese Mountain Dog',\n 'Appenzeller Sennenhund', 'Entlebucher Sennenhund', 'Boxer', 'Bullmastiff',\n 'Tibetan Mastiff', 'French Bulldog', 'Great Dane', 'St. Bernard', 'husky',\n 'Alaskan Malamute', 'Siberian Husky', 'Dalmatian', 'Affenpinscher',\n 'Basenji', 'pug', 'Leonberger', 'Newfoundland dog', 'Great Pyrenees dog',\n 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond', 'brussels griffon',\n 'Pembroke Welsh Corgi', 'Cardigan Welsh Corgi', 'Toy Poodle',\n 'Miniature Poodle', 'Standard Poodle',\n 'Mexican hairless dog (xoloitzcuintli)', 'grey wolf',\n 'Alaskan tundra wolf', 'red wolf or maned wolf', 'coyote', 'dingo',\n 'dhole', 'African wild dog', 'hyena', 'red fox', 'kit fox', 'Arctic fox',\n 'grey fox', 'tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat',\n 'Egyptian Mau', 'cougar', 'lynx', 'leopard', 'snow leopard', 'jaguar',\n 'lion', 'tiger', 'cheetah', 'brown bear', 'American black bear',\n 'polar bear', 'sloth bear', 'mongoose', 'meerkat', 'tiger beetle',\n 'ladybug', 'ground beetle', 'longhorn beetle', 'leaf beetle',\n 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', 'ant',\n 'grasshopper', 'cricket insect', 'stick insect', 'cockroach',\n 'praying mantis', 'cicada', 'leafhopper', 'lacewing', 'dragonfly',\n 'damselfly', 'red admiral butterfly', 'ringlet butterfly',\n 'monarch butterfly', 'small white butterfly', 'sulphur butterfly',\n 'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber',\n 'cottontail rabbit', 'hare', 'Angora rabbit', 'hamster', 'porcupine',\n 'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel horse',\n 'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox',\n 'water buffalo', 'bison', 'ram (adult male sheep)', 'bighorn sheep',\n 'Alpine ibex', 'hartebeest', 'impala (antelope)', 'gazelle',\n 'arabian camel', 'llama', 'weasel', 'mink', 'European polecat',\n 'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo',\n 'three-toed sloth', 'orangutan', 'gorilla', 'chimpanzee', 'gibbon',\n 'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur',\n 'black-and-white colobus', 'proboscis monkey', 'marmoset',\n 'white-headed capuchin', 'howler monkey', 'titi monkey',\n \"Geoffroy's spider monkey\", 'common squirrel monkey', 'ring-tailed lemur',\n 'indri', 'Asian elephant', 'African bush elephant', 'red panda',\n 'giant panda', 'snoek fish', 'eel', 'silver salmon', 'rock beauty fish',\n 'clownfish', 'sturgeon', 'gar fish', 'lionfish', 'pufferfish', 'abacus',\n 'abaya', 'academic gown', 'accordion', 'acoustic guitar',\n 'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance',\n 'amphibious vehicle', 'analog clock', 'apiary', 'apron', 'trash can',\n 'assault rifle', 'backpack', 'bakery', 'balance beam', 'balloon',\n 'ballpoint pen', 'Band-Aid', 'banjo', 'baluster / handrail', 'barbell',\n 'barber chair', 'barbershop', 'barn', 'barometer', 'barrel', 'wheelbarrow',\n 'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap',\n 'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker',\n 'military hat (bearskin or shako)', 'beer bottle', 'beer glass',\n 'bell tower', 'baby bib', 'tandem bicycle', 'bikini', 'ring binder',\n 'binoculars', 'birdhouse', 'boathouse', 'bobsleigh', 'bolo tie',\n 'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'hunting bow',\n 'bow tie', 'brass memorial plaque', 'bra', 'breakwater', 'breastplate',\n 'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train',\n 'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe',\n 'can opener', 'cardigan', 'car mirror', 'carousel', 'tool kit',\n 'cardboard box / carton', 'car wheel', 'automated teller machine',\n 'cassette', 'cassette player', 'castle', 'catamaran', 'CD player', 'cello',\n 'mobile phone', 'chain', 'chain-link fence', 'chain mail', 'chainsaw',\n 'storage chest', 'chiffonier', 'bell or wind chime', 'china cabinet',\n 'Christmas stocking', 'church', 'movie theater', 'cleaver',\n 'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug',\n 'coffeemaker', 'spiral or coil', 'combination lock', 'computer keyboard',\n 'candy store', 'container ship', 'convertible', 'corkscrew', 'cornet',\n 'cowboy boot', 'cowboy hat', 'cradle', 'construction crane',\n 'crash helmet', 'crate', 'infant bed', 'Crock Pot', 'croquet ball',\n 'crutch', 'cuirass', 'dam', 'desk', 'desktop computer',\n 'rotary dial telephone', 'diaper', 'digital clock', 'digital watch',\n 'dining table', 'dishcloth', 'dishwasher', 'disc brake', 'dock',\n 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum', 'drumstick',\n 'dumbbell', 'Dutch oven', 'electric fan', 'electric guitar',\n 'electric locomotive', 'entertainment center', 'envelope',\n 'espresso machine', 'face powder', 'feather boa', 'filing cabinet',\n 'fireboat', 'fire truck', 'fire screen', 'flagpole', 'flute',\n 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen',\n 'four-poster bed', 'freight car', 'French horn', 'frying pan', 'fur coat',\n 'garbage truck', 'gas mask or respirator', 'gas pump', 'goblet', 'go-kart',\n 'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano',\n 'greenhouse', 'radiator grille', 'grocery store', 'guillotine',\n 'hair clip', 'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer',\n 'hand-held computer', 'handkerchief', 'hard disk drive', 'harmonica',\n 'harp', 'combine harvester', 'hatchet', 'holster', 'home theater',\n 'honeycomb', 'hook', 'hoop skirt', 'gymnastic horizontal bar',\n 'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron',\n 'carved pumpkin', 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'rickshaw',\n 'joystick', 'kimono', 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade',\n 'laptop computer', 'lawn mower', 'lens cap', 'letter opener', 'library',\n 'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick',\n 'slip-on shoe', 'lotion', 'music speaker', 'loupe magnifying glass',\n 'sawmill', 'magnetic compass', 'messenger bag', 'mailbox', 'tights',\n 'one-piece bathing suit', 'manhole cover', 'maraca', 'marimba', 'mask',\n 'matchstick', 'maypole', 'maze', 'measuring cup', 'medicine cabinet',\n 'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can',\n 'minibus', 'miniskirt', 'minivan', 'missile', 'mitten', 'mixing bowl',\n 'mobile home', 'ford model t', 'modem', 'monastery', 'monitor', 'moped',\n 'mortar and pestle', 'graduation cap', 'mosque', 'mosquito net', 'vespa',\n 'mountain bike', 'tent', 'computer mouse', 'mousetrap', 'moving van',\n 'muzzle', 'metal nail', 'neck brace', 'necklace', 'baby pacifier',\n 'notebook computer', 'obelisk', 'oboe', 'ocarina', 'odometer',\n 'oil filter', 'pipe organ', 'oscilloscope', 'overskirt', 'bullock cart',\n 'oxygen mask', 'product packet / packaging', 'paddle', 'paddle wheel',\n 'padlock', 'paintbrush', 'pajamas', 'palace', 'pan flute', 'paper towel',\n 'parachute', 'parallel bars', 'park bench', 'parking meter',\n 'railroad car', 'patio', 'payphone', 'pedestal', 'pencil case',\n 'pencil sharpener', 'perfume', 'Petri dish', 'photocopier', 'plectrum',\n 'Pickelhaube', 'picket fence', 'pickup truck', 'pier', 'piggy bank',\n 'pill bottle', 'pillow', 'ping-pong ball', 'pinwheel', 'pirate ship',\n 'drink pitcher', 'block plane', 'planetarium', 'plastic bag', 'plate rack',\n 'farm plow', 'plunger', 'Polaroid camera', 'pole', 'police van', 'poncho',\n 'pool table', 'soda bottle', 'plant pot', \"potter's wheel\", 'power drill',\n 'prayer rug', 'printer', 'prison', 'missile', 'projector', 'hockey puck',\n 'punching bag', 'purse', 'quill', 'quilt', 'race car', 'racket',\n 'radiator', 'radio', 'radio telescope', 'rain barrel',\n 'recreational vehicle', 'fishing casting reel', 'reflex camera',\n 'refrigerator', 'remote control', 'restaurant', 'revolver', 'rifle',\n 'rocking chair', 'rotisserie', 'eraser', 'rugby ball',\n 'ruler measuring stick', 'sneaker', 'safe', 'safety pin', 'salt shaker',\n 'sandal', 'sarong', 'saxophone', 'scabbard', 'weighing scale',\n 'school bus', 'schooner', 'scoreboard', 'CRT monitor', 'screw',\n 'screwdriver', 'seat belt', 'sewing machine', 'shield', 'shoe store',\n 'shoji screen / room divider', 'shopping basket', 'shopping cart',\n 'shovel', 'shower cap', 'shower curtain', 'ski', 'balaclava ski mask',\n 'sleeping bag', 'slide rule', 'sliding door', 'slot machine', 'snorkel',\n 'snowmobile', 'snowplow', 'soap dispenser', 'soccer ball', 'sock',\n 'solar thermal collector', 'sombrero', 'soup bowl', 'keyboard space bar',\n 'space heater', 'space shuttle', 'spatula', 'motorboat', 'spider web',\n 'spindle', 'sports car', 'spotlight', 'stage', 'steam locomotive',\n 'through arch bridge', 'steel drum', 'stethoscope', 'scarf', 'stone wall',\n 'stopwatch', 'stove', 'strainer', 'tram', 'stretcher', 'couch', 'stupa',\n 'submarine', 'suit', 'sundial', 'sunglasses', 'sunglasses', 'sunscreen',\n 'suspension bridge', 'mop', 'sweatshirt', 'swim trunks / shorts', 'swing',\n 'electrical switch', 'syringe', 'table lamp', 'tank', 'tape player',\n 'teapot', 'teddy bear', 'television', 'tennis ball', 'thatched roof',\n 'front curtain', 'thimble', 'threshing machine', 'throne', 'tile roof',\n 'toaster', 'tobacco shop', 'toilet seat', 'torch', 'totem pole',\n 'tow truck', 'toy store', 'tractor', 'semi-trailer truck', 'tray',\n 'trench coat', 'tricycle', 'trimaran', 'tripod', 'triumphal arch',\n 'trolleybus', 'trombone', 'hot tub', 'turnstile', 'typewriter keyboard',\n 'umbrella', 'unicycle', 'upright piano', 'vacuum cleaner', 'vase',\n 'vaulted or arched ceiling', 'velvet fabric', 'vending machine',\n 'vestment', 'viaduct', 'violin', 'volleyball', 'waffle iron', 'wall clock',\n 'wallet', 'wardrobe', 'military aircraft', 'sink', 'washing machine',\n 'water bottle', 'water jug', 'water tower', 'whiskey jug', 'whistle',\n 'hair wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle',\n 'airplane wing', 'wok', 'wooden spoon', 'wool', 'split-rail fence',\n 'shipwreck', 'sailboat', 'yurt', 'website', 'comic book', 'crossword',\n 'traffic or street sign', 'traffic light', 'dust jacket', 'menu', 'plate',\n 'guacamole', 'consomme', 'hot pot', 'trifle', 'ice cream', 'popsicle',\n 'baguette', 'bagel', 'pretzel', 'cheeseburger', 'hot dog',\n 'mashed potatoes', 'cabbage', 'broccoli', 'cauliflower', 'zucchini',\n 'spaghetti squash', 'acorn squash', 'butternut squash', 'cucumber',\n 'artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith apple',\n 'strawberry', 'orange', 'lemon', 'fig', 'pineapple', 'banana', 'jackfruit',\n 'cherimoya (custard apple)', 'pomegranate', 'hay', 'carbonara',\n 'chocolate syrup', 'dough', 'meatloaf', 'pizza', 'pot pie', 'burrito',\n 'red wine', 'espresso', 'tea cup', 'eggnog', 'mountain', 'bubble', 'cliff',\n 'coral reef', 'geyser', 'lakeshore', 'promontory', 'sandbar', 'beach',\n 'valley', 'volcano', 'baseball player', 'bridegroom', 'scuba diver',\n 'rapeseed', 'daisy', \"yellow lady's slipper\", 'corn', 'acorn', 'rose hip',\n 'horse chestnut seed', 'coral fungus', 'agaric', 'gyromitra',\n 'stinkhorn mushroom', 'earth star fungus', 'hen of the woods mushroom',\n 'bolete', 'corn cob', 'toilet paper')" }, { "identifier": "MODELS", "path": "mmpretrain/registry.py", "snippet": "MODELS = Registry(\n 'model',\n parent=MMENGINE_MODELS,\n locations=['mmpretrain.models'],\n)" }, { "identifier": "TOKENIZER", "path": "mmpretrain/registry.py", "snippet": "TOKENIZER = Registry(\n 'tokenizer',\n locations=['mmpretrain.models'],\n)" }, { "identifier": "DataSample", "path": "mmpretrain/structures/data_sample.py", "snippet": "class DataSample(BaseDataElement):\n \"\"\"A general data structure interface.\n\n It's used as the interface between different components.\n\n The following fields are convention names in MMPretrain, and we will set or\n get these fields in data transforms, models, and metrics if needed. You can\n also set any new fields for your need.\n\n Meta fields:\n img_shape (Tuple): The shape of the corresponding input image.\n ori_shape (Tuple): The original shape of the corresponding image.\n sample_idx (int): The index of the sample in the dataset.\n num_classes (int): The number of all categories.\n\n Data fields:\n gt_label (tensor): The ground truth label.\n gt_score (tensor): The ground truth score.\n pred_label (tensor): The predicted label.\n pred_score (tensor): The predicted score.\n mask (tensor): The mask used in masked image modeling.\n\n Examples:\n >>> import torch\n >>> from mmpretrain.structures import DataSample\n >>>\n >>> img_meta = dict(img_shape=(960, 720), num_classes=5)\n >>> data_sample = DataSample(metainfo=img_meta)\n >>> data_sample.set_gt_label(3)\n >>> print(data_sample)\n <DataSample(\n META INFORMATION\n num_classes: 5\n img_shape: (960, 720)\n DATA FIELDS\n gt_label: tensor([3])\n ) at 0x7ff64c1c1d30>\n >>>\n >>> # For multi-label data\n >>> data_sample = DataSample().set_gt_label([0, 1, 4])\n >>> print(data_sample)\n <DataSample(\n DATA FIELDS\n gt_label: tensor([0, 1, 4])\n ) at 0x7ff5b490e100>\n >>>\n >>> # Set one-hot format score\n >>> data_sample = DataSample().set_pred_score([0.1, 0.1, 0.6, 0.1])\n >>> print(data_sample)\n <DataSample(\n META INFORMATION\n num_classes: 4\n DATA FIELDS\n pred_score: tensor([0.1000, 0.1000, 0.6000, 0.1000])\n ) at 0x7ff5b48ef6a0>\n >>>\n >>> # Set custom field\n >>> data_sample = DataSample()\n >>> data_sample.my_field = [1, 2, 3]\n >>> print(data_sample)\n <DataSample(\n DATA FIELDS\n my_field: [1, 2, 3]\n ) at 0x7f8e9603d3a0>\n >>> print(data_sample.my_field)\n [1, 2, 3]\n \"\"\"\n\n def set_gt_label(self, value: LABEL_TYPE) -> 'DataSample':\n \"\"\"Set ``gt_label``.\"\"\"\n self.set_field(format_label(value), 'gt_label', dtype=torch.Tensor)\n return self\n\n def set_gt_score(self, value: SCORE_TYPE) -> 'DataSample':\n \"\"\"Set ``gt_score``.\"\"\"\n score = format_score(value)\n self.set_field(score, 'gt_score', dtype=torch.Tensor)\n if hasattr(self, 'num_classes'):\n assert len(score) == self.num_classes, \\\n f'The length of score {len(score)} should be '\\\n f'equal to the num_classes {self.num_classes}.'\n else:\n self.set_field(\n name='num_classes', value=len(score), field_type='metainfo')\n return self\n\n def set_pred_label(self, value: LABEL_TYPE) -> 'DataSample':\n \"\"\"Set ``pred_label``.\"\"\"\n self.set_field(format_label(value), 'pred_label', dtype=torch.Tensor)\n return self\n\n def set_pred_score(self, value: SCORE_TYPE):\n \"\"\"Set ``pred_label``.\"\"\"\n score = format_score(value)\n self.set_field(score, 'pred_score', dtype=torch.Tensor)\n if hasattr(self, 'num_classes'):\n assert len(score) == self.num_classes, \\\n f'The length of score {len(score)} should be '\\\n f'equal to the num_classes {self.num_classes}.'\n else:\n self.set_field(\n name='num_classes', value=len(score), field_type='metainfo')\n return self\n\n def set_mask(self, value: Union[torch.Tensor, np.ndarray]):\n if isinstance(value, np.ndarray):\n value = torch.from_numpy(value)\n elif not isinstance(value, torch.Tensor):\n raise TypeError(f'Invalid mask type {type(value)}')\n self.set_field(value, 'mask', dtype=torch.Tensor)\n return self\n\n def __repr__(self) -> str:\n \"\"\"Represent the object.\"\"\"\n\n def dump_items(items, prefix=''):\n return '\\n'.join(f'{prefix}{k}: {v}' for k, v in items)\n\n repr_ = ''\n if len(self._metainfo_fields) > 0:\n repr_ += '\\n\\nMETA INFORMATION\\n'\n repr_ += dump_items(self.metainfo_items(), prefix=' ' * 4)\n if len(self._data_fields) > 0:\n repr_ += '\\n\\nDATA FIELDS\\n'\n repr_ += dump_items(self.items(), prefix=' ' * 4)\n\n repr_ = f'<{self.__class__.__name__}({repr_}\\n\\n) at {hex(id(self))}>'\n return repr_" }, { "identifier": "track_on_main_process", "path": "mmpretrain/utils/progress.py", "snippet": "def track_on_main_process(sequence, description='', total=None):\n if not dist.is_main_process() or disable_progress_bar:\n yield from sequence\n else:\n yield from track(sequence, total=total, description=description)" }, { "identifier": "OPENAI_CIFAR100_PROMPT", "path": "mmpretrain/models/multimodal/clip/utils.py", "snippet": "OPENAI_CIFAR100_PROMPT = [\n lambda c: f'a photo of a {c}.',\n lambda c: f'a blurry photo of a {c}.',\n lambda c: f'a black and white photo of a {c}.',\n lambda c: f'a low contrast photo of a {c}.',\n lambda c: f'a high contrast photo of a {c}.',\n lambda c: f'a bad photo of a {c}.',\n lambda c: f'a good photo of a {c}.',\n lambda c: f'a photo of a small {c}.',\n lambda c: f'a photo of a big {c}.',\n lambda c: f'a photo of the {c}.',\n lambda c: f'a blurry photo of the {c}.',\n lambda c: f'a black and white photo of the {c}.',\n lambda c: f'a low contrast photo of the {c}.',\n lambda c: f'a high contrast photo of the {c}.',\n lambda c: f'a bad photo of the {c}.',\n lambda c: f'a good photo of the {c}.',\n lambda c: f'a photo of the small {c}.',\n lambda c: f'a photo of the big {c}.',\n]" }, { "identifier": "OPENAI_IMAGENET_PROMPT", "path": "mmpretrain/models/multimodal/clip/utils.py", "snippet": "OPENAI_IMAGENET_PROMPT = [\n lambda c: f'a bad photo of a {c}.',\n lambda c: f'a photo of many {c}.',\n lambda c: f'a sculpture of a {c}.',\n lambda c: f'a photo of the hard to see {c}.',\n lambda c: f'a low resolution photo of the {c}.',\n lambda c: f'a rendering of a {c}.',\n lambda c: f'graffiti of a {c}.',\n lambda c: f'a bad photo of the {c}.',\n lambda c: f'a cropped photo of the {c}.',\n lambda c: f'a tattoo of a {c}.',\n lambda c: f'the embroidered {c}.',\n lambda c: f'a photo of a hard to see {c}.',\n lambda c: f'a bright photo of a {c}.',\n lambda c: f'a photo of a clean {c}.',\n lambda c: f'a photo of a dirty {c}.',\n lambda c: f'a dark photo of the {c}.',\n lambda c: f'a drawing of a {c}.',\n lambda c: f'a photo of my {c}.',\n lambda c: f'the plastic {c}.',\n lambda c: f'a photo of the cool {c}.',\n lambda c: f'a close-up photo of a {c}.',\n lambda c: f'a black and white photo of the {c}.',\n lambda c: f'a painting of the {c}.',\n lambda c: f'a painting of a {c}.',\n lambda c: f'a pixelated photo of the {c}.',\n lambda c: f'a sculpture of the {c}.',\n lambda c: f'a bright photo of the {c}.',\n lambda c: f'a cropped photo of a {c}.',\n lambda c: f'a plastic {c}.',\n lambda c: f'a photo of the dirty {c}.',\n lambda c: f'a jpeg corrupted photo of a {c}.',\n lambda c: f'a blurry photo of the {c}.',\n lambda c: f'a photo of the {c}.',\n lambda c: f'a good photo of the {c}.',\n lambda c: f'a rendering of the {c}.',\n lambda c: f'a {c} in a video game.',\n lambda c: f'a photo of one {c}.',\n lambda c: f'a doodle of a {c}.',\n lambda c: f'a close-up photo of the {c}.',\n lambda c: f'a photo of a {c}.',\n lambda c: f'the origami {c}.',\n lambda c: f'the {c} in a video game.',\n lambda c: f'a sketch of a {c}.',\n lambda c: f'a doodle of the {c}.',\n lambda c: f'a origami {c}.',\n lambda c: f'a low resolution photo of a {c}.',\n lambda c: f'the toy {c}.',\n lambda c: f'a rendition of the {c}.',\n lambda c: f'a photo of the clean {c}.',\n lambda c: f'a photo of a large {c}.',\n lambda c: f'a rendition of a {c}.',\n lambda c: f'a photo of a nice {c}.',\n lambda c: f'a photo of a weird {c}.',\n lambda c: f'a blurry photo of a {c}.',\n lambda c: f'a cartoon {c}.',\n lambda c: f'art of a {c}.',\n lambda c: f'a sketch of the {c}.',\n lambda c: f'a embroidered {c}.',\n lambda c: f'a pixelated photo of a {c}.',\n lambda c: f'itap of the {c}.',\n lambda c: f'a jpeg corrupted photo of the {c}.',\n lambda c: f'a good photo of a {c}.',\n lambda c: f'a plushie {c}.',\n lambda c: f'a photo of the nice {c}.',\n lambda c: f'a photo of the small {c}.',\n lambda c: f'a photo of the weird {c}.',\n lambda c: f'the cartoon {c}.',\n lambda c: f'art of the {c}.',\n lambda c: f'a drawing of the {c}.',\n lambda c: f'a photo of the large {c}.',\n lambda c: f'a black and white photo of a {c}.',\n lambda c: f'the plushie {c}.',\n lambda c: f'a dark photo of a {c}.',\n lambda c: f'itap of a {c}.',\n lambda c: f'graffiti of the {c}.',\n lambda c: f'a toy {c}.',\n lambda c: f'itap of my {c}.',\n lambda c: f'a photo of a cool {c}.',\n lambda c: f'a photo of a small {c}.',\n lambda c: f'a tattoo of the {c}.',\n]" }, { "identifier": "OPENAI_IMAGENET_PROMPT_SUB", "path": "mmpretrain/models/multimodal/clip/utils.py", "snippet": "OPENAI_IMAGENET_PROMPT_SUB = [\n lambda c: f'itap of a {c}.',\n lambda c: f'a bad photo of the {c}.',\n lambda c: f'a origami {c}.',\n lambda c: f'a photo of the large {c}.',\n lambda c: f'a {c} in a video game.',\n lambda c: f'art of the {c}.',\n lambda c: f'a photo of the small {c}.',\n]" } ]
from abc import abstractmethod from typing import List, Optional, Tuple, Union from mmengine.model import BaseModel from torch import nn from mmpretrain.datasets.categories import (CIFAR100_CATEGORIES, IMAGENET_SIMPLE_CATEGORIES) from mmpretrain.registry import MODELS, TOKENIZER from mmpretrain.structures import DataSample from mmpretrain.utils import track_on_main_process from .utils import (OPENAI_CIFAR100_PROMPT, OPENAI_IMAGENET_PROMPT, OPENAI_IMAGENET_PROMPT_SUB) import numpy as np import torch import torch.nn.functional as F
11,382
data_samples: DataSample = None) -> DataSample: raise NotImplementedError def tokenize(self, texts: Union[str, List[str]]) -> torch.LongTensor: """Returns the tokenized representation of given input string(s) Args: texts (Union[str, List[str]]): An input string or a list of input strings to tokenize context_length (int): The context length to use. Defaults to 52. Returns: torch.Tensor: Resulting tokens. """ if isinstance(texts, str): texts = [texts] all_tokens = [] for text in texts: # adapt the text to Chinese BERT vocab # text = text.lower().replace('“', "\"").replace('”', "\"") # add special tokens all_tokens.append( [self.tokenizer.vocab['<|startoftext|>'] ] + # <|startoftext|>代表[CLS] token self.tokenizer.convert_tokens_to_ids( self.tokenizer.tokenize(text))[:self.context_length - 2] + [self.tokenizer.vocab['<|endoftext|>']]) result = torch.zeros( len(all_tokens), self.context_length, dtype=torch.long) for i, tokens in enumerate(all_tokens): assert len(tokens) <= self.context_length result[i, :len(tokens)] = torch.tensor(tokens) return result @MODELS.register_module() class CLIPZeroShot(CLIP): def __init__( self, vision_backbone: dict, projection: dict, text_backbone: dict, tokenizer: dict, vocab_size: int, transformer_width: int, proj_dim: int, context_length: int = 77, data_preprocessor: Optional[dict] = None, init_cfg: Optional[dict] = None, text_prototype: Union[str, List[str]] = 'imagenet', text_prompt: str = 'vanilla', ): super(CLIPZeroShot, self).__init__(vision_backbone, projection, text_backbone, tokenizer, vocab_size, transformer_width, proj_dim, context_length, data_preprocessor, init_cfg) # for zero-shot classification if isinstance(text_prototype, str) and text_prototype in PROTOTYPE_MAP.keys(): self.prototype = PROTOTYPE_MAP[text_prototype] else: self.prototype = text_prototype self.text_prototype_embeds = None self.prompt = PROMPT_MAP[text_prompt] def predict(self, images: torch.Tensor, data_samples: DataSample = None) -> DataSample: """Predict the classes of the input images. The prediction is for zero-shot classification and the text prototypes will be prepared in thisfunction. Args: images (torch.Tensor): The input images. data_samples (DataSample): The data samples with information from dataset. Returns: DataSample: The results of prediction. """ if self.text_prototype_embeds is None: self.prepare_text_prototype(device=images.device) image_features = self.extract_image_feat(images=images) image_features /= image_features.norm(dim=-1, keepdim=True) # cosine similarity as logits logits_per_image = image_features @ self.text_prototype_embeds.to( image_features.device) * self.logit_scale.exp() pred_scores = F.softmax(logits_per_image, dim=1) pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() out_data_samples = [] if data_samples is None: data_samples = [None for _ in range(pred_scores.size(0))] for data_sample, score, label in zip(data_samples, pred_scores, pred_labels): if data_sample is None: data_sample = DataSample() data_sample.set_pred_score(score).set_pred_label(label) out_data_samples.append(data_sample) return out_data_samples def prepare_text_prototype(self, device) -> None: """The function to prepare text prototypes with prompt.""" class_embeddings = []
# Copyright (c) OpenMMLab. All rights reserved. CIFAR100_CATEGORIES = [' '.join(c.split('_')) for c in CIFAR100_CATEGORIES] PROTOTYPE_MAP = { 'imagenet': IMAGENET_SIMPLE_CATEGORIES, 'cifar100': CIFAR100_CATEGORIES, } PROMPT_MAP = { 'openai_imagenet': OPENAI_IMAGENET_PROMPT, 'openai_cifar100': OPENAI_CIFAR100_PROMPT, 'vanilla': [lambda c: f'a photo of a {c}'], 'openai_imagenet_sub': OPENAI_IMAGENET_PROMPT_SUB } class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward function.""" orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class CLIP(BaseModel): """The implementation of `CLIP <https://arxiv.org/abs/2103.00020>`_. Args: vision_backbone (dict): Config dict for vision backbone. text_backbone (dict): Config dict for text backbone. tokenizer (dict): Config dict for text tokenizer. proj_dim (int): Projection dimension for similarity computation. text_prototype (str): Text prototype, which can be a key in `PROTOTYPE_MAP` or list of text. text_prompt (str): The prompt for text prototype. Defaults to 'vanilla',which refers to "a photo of {cls}". context_length (int): The context length to use. Defaults to 77. data_preprocessor (Union[dict, nn.Module], optional): The config for preprocessing input data. If None or no specified type, it will use "MultiModalDataPreprocessor" as type. See :class:`MultiModalDataPreprocessor` for more details. Defaults to None. init_cfg (dict, optional): The config to control the initialization. Defaults to None. """ def __init__(self, vision_backbone: dict, projection: dict, text_backbone: dict, tokenizer: dict, vocab_size: int, transformer_width: int, proj_dim: int, context_length: int = 77, data_preprocessor: Optional[dict] = None, init_cfg: Optional[dict] = None): if data_preprocessor is None: data_preprocessor = {} data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') data_preprocessor = MODELS.build(data_preprocessor) super().__init__( data_preprocessor=data_preprocessor, init_cfg=init_cfg) self.context_length = context_length # build the vision transformer self.visual = MODELS.build(vision_backbone) # build the visual projection self.visual_proj = MODELS.build(projection) # build attn_mask for casual-attn text_backbone['attn_mask'] = self.build_attention_mask() # build the text transformer self.transformer = MODELS.build(text_backbone) self.vocab_size = vocab_size self.token_embedding = nn.Embedding(vocab_size, transformer_width) self.positional_embedding = nn.Parameter( torch.empty(self.context_length, transformer_width)) self.ln_final = LayerNorm(transformer_width) self.text_projection = nn.Parameter( torch.empty(transformer_width, proj_dim)) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) self.initialize_parameters() self.tokenizer = TOKENIZER.build(tokenizer) self.tokenizer.vocab = self.tokenizer.get_vocab( ) # CLIPTokenizer has no attribute named 'vocab', so manually def initialize_parameters(self) -> None: """Initialize the parameters. The pretrained weight will override the initialized parameters by this function. """ nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.positional_embedding, std=0.01) proj_std = (self.transformer.width**-0.5) * ( (2 * self.transformer.layers)**-0.5) attn_std = self.transformer.width**-0.5 fc_std = (2 * self.transformer.width)**-0.5 for block in self.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std) nn.init.normal_(block.attn.out_proj.weight, std=proj_std) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) if self.text_projection is not None: nn.init.normal_( self.text_projection, std=self.transformer.width**-0.5) def build_attention_mask(self): # lazily create causal attention mask, # with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(self.context_length, self.context_length) mask.fill_(float('-inf')) mask.triu_(1) # zero out the lower diagonal return mask def forward( self, images: torch.Tensor, data_samples: Optional[list] = None, mode: str = 'predict', **kwargs, ): """The unified entry for a forward process in both training and test. The method accepts the following modes: - "predict": Forward and return a list of data samples contain the predict results. Args: images (torch.Tensor): the preprocessed image tensor of shape ``(N, C, H, W)``. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. """ if mode == 'predict': return self.predict(images, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}".') def extract_image_feat(self, images: torch.Tensor) -> torch.Tensor: """The function to extract image latent features.""" return self.visual_proj(self.visual(images))[0] def extract_text_feat(self, texts: torch.Tensor) -> torch.Tensor: """The function to extract text latent features.""" x = self.token_embedding(texts) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x)[0] x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # x.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding # (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), texts.argmax(dim=-1)] @ self.text_projection return x def extract_feat( self, images: torch.Tensor, texts: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor]]: """The function to extract image and text latent features, the input image or text can not both be None.""" assert images is not None or texts is not None, \ 'text and image cannot both be None!' if images is None: return self.extract_text_feat(texts) elif texts is None: return self.extract_image_feat(images) image_features = self.extract_image_feat(images) text_features = self.extract_text_feat(texts) image_features = image_features / image_features.norm( dim=-1, keepdim=True) text_features = text_features / text_features.norm( dim=-1, keepdim=True) return image_features, text_features def compute_similarity(self, images, texts): """Extract images and texts features and compute cosine similarity.""" image_features, text_features = self.extract_feat( images=images, texts=texts) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_image = logit_scale * image_features @ text_features.t() logits_per_text = logits_per_image.t() # shape (N, N) return logits_per_image, logits_per_text @abstractmethod def predict(self, images: torch.Tensor, data_samples: DataSample = None) -> DataSample: raise NotImplementedError def tokenize(self, texts: Union[str, List[str]]) -> torch.LongTensor: """Returns the tokenized representation of given input string(s) Args: texts (Union[str, List[str]]): An input string or a list of input strings to tokenize context_length (int): The context length to use. Defaults to 52. Returns: torch.Tensor: Resulting tokens. """ if isinstance(texts, str): texts = [texts] all_tokens = [] for text in texts: # adapt the text to Chinese BERT vocab # text = text.lower().replace('“', "\"").replace('”', "\"") # add special tokens all_tokens.append( [self.tokenizer.vocab['<|startoftext|>'] ] + # <|startoftext|>代表[CLS] token self.tokenizer.convert_tokens_to_ids( self.tokenizer.tokenize(text))[:self.context_length - 2] + [self.tokenizer.vocab['<|endoftext|>']]) result = torch.zeros( len(all_tokens), self.context_length, dtype=torch.long) for i, tokens in enumerate(all_tokens): assert len(tokens) <= self.context_length result[i, :len(tokens)] = torch.tensor(tokens) return result @MODELS.register_module() class CLIPZeroShot(CLIP): def __init__( self, vision_backbone: dict, projection: dict, text_backbone: dict, tokenizer: dict, vocab_size: int, transformer_width: int, proj_dim: int, context_length: int = 77, data_preprocessor: Optional[dict] = None, init_cfg: Optional[dict] = None, text_prototype: Union[str, List[str]] = 'imagenet', text_prompt: str = 'vanilla', ): super(CLIPZeroShot, self).__init__(vision_backbone, projection, text_backbone, tokenizer, vocab_size, transformer_width, proj_dim, context_length, data_preprocessor, init_cfg) # for zero-shot classification if isinstance(text_prototype, str) and text_prototype in PROTOTYPE_MAP.keys(): self.prototype = PROTOTYPE_MAP[text_prototype] else: self.prototype = text_prototype self.text_prototype_embeds = None self.prompt = PROMPT_MAP[text_prompt] def predict(self, images: torch.Tensor, data_samples: DataSample = None) -> DataSample: """Predict the classes of the input images. The prediction is for zero-shot classification and the text prototypes will be prepared in thisfunction. Args: images (torch.Tensor): The input images. data_samples (DataSample): The data samples with information from dataset. Returns: DataSample: The results of prediction. """ if self.text_prototype_embeds is None: self.prepare_text_prototype(device=images.device) image_features = self.extract_image_feat(images=images) image_features /= image_features.norm(dim=-1, keepdim=True) # cosine similarity as logits logits_per_image = image_features @ self.text_prototype_embeds.to( image_features.device) * self.logit_scale.exp() pred_scores = F.softmax(logits_per_image, dim=1) pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() out_data_samples = [] if data_samples is None: data_samples = [None for _ in range(pred_scores.size(0))] for data_sample, score, label in zip(data_samples, pred_scores, pred_labels): if data_sample is None: data_sample = DataSample() data_sample.set_pred_score(score).set_pred_label(label) out_data_samples.append(data_sample) return out_data_samples def prepare_text_prototype(self, device) -> None: """The function to prepare text prototypes with prompt.""" class_embeddings = []
for classname in track_on_main_process(self.prototype,
5
2023-12-23 08:36:47+00:00
16k